@ part of the hostname. Unpublished ports are not displayed in Workbench and have a default acccess level of "private".
\ No newline at end of file
diff --git a/doc/_includes/_container_runtime_constraints.liquid b/doc/_includes/_container_runtime_constraints.liquid
index 1c62dbb239..378d82dc0d 100644
--- a/doc/_includes/_container_runtime_constraints.liquid
+++ b/doc/_includes/_container_runtime_constraints.liquid
@@ -15,9 +15,23 @@ table(table table-bordered table-condensed).
|keep_cache_disk|integer|When the container process accesses data from Keep via the filesystem, that data will be cached on disk, up to this amount in bytes.|Optional. If your cluster is configured to use a disk cache by default, the default size will match your @ram@ constraint, bounded between 2GiB and 32GiB.|
|keep_cache_ram|integer|When the container process accesses data from Keep via the filesystem, that data will be cached in memory, up to this amount in bytes.|Optional. If your cluster is configured to use a RAM cache by default, the administrator sets a default cache size.|
|API|boolean|When set, ARVADOS_API_HOST and ARVADOS_API_TOKEN will be set, and container will have networking enabled to access the Arvados API server.|Optional.|
-|cuda|object|Request CUDA GPU support, see below|Optional.|
+|gpu|object|Request GPU support, see below|Optional.|
+|cuda|object|Old way to request CUDA GPU support, included for backwards compatability only. Use the 'gpu' field instead.|Deprecated.|
-h3. CUDA GPU support
+h3. GPU support
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
+|stack|string|One of 'cuda' or 'rocm' to request Nvidia or AMD GPU support.||
+|device_count|int|Number of GPUs to request.|Count greater than 0 enables GPU support.|
+|driver_version|string|Minimum driver version, in "X.Y" format.|Required when device_count > 0|
+|hardware_target|array of strings|For CUDA: a single item with minimum CUDA hardware capability, in "X.Y" format, or multiple items listing CUDA specific hardware capability versions, one of which must be an exact match on the compute node the container is scheduled on.
+For ROCm: A list of one or more hardware targets (e.g. gfx1100) corresponding to the GPU architectures supported by the container. To be scheduled, at least one item in this list must match the @HardwareTarget@ of one of the cluster's @InstanceTypes@.|Required when device_count > 0|
+|vram|int|Amount of VRAM to request, in bytes.||
+
+h3. CUDA support (deprecated)
+
+Note. This API is deprecated. Use the 'gpu' API instead.
table(table table-bordered table-condensed).
|_. Key|_. Type|_. Description|_. Notes|
diff --git a/doc/_includes/_create_superuser_token.liquid b/doc/_includes/_create_superuser_token.liquid
deleted file mode 100644
index ed085ea105..0000000000
--- a/doc/_includes/_create_superuser_token.liquid
+++ /dev/null
@@ -1,14 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-On the API server, use the following commands:
-
-
-~$ cd /var/www/arvados-api/current
-$ sudo -u webserver-user RAILS_ENV=production bin/bundle exec script/create_superuser_token.rb
-zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
-
-
diff --git a/doc/_includes/_gpg_key_fingerprint.liquid b/doc/_includes/_gpg_key_fingerprint.liquid
deleted file mode 100644
index a10fd8688d..0000000000
--- a/doc/_includes/_gpg_key_fingerprint.liquid
+++ /dev/null
@@ -1,15 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-The Arvados signing key fingerprint is
-
-
-pub rsa2048 2010-11-15 [SC]
- B2DA 2991 656E B4A5 0314 CA2B 5716 5911 1078 ECD7
-uid [ unknown] Arvados Automatic Signing Key
-sub rsa2048 2010-11-15 [E]
-
-
diff --git a/doc/_includes/_html_tags.liquid b/doc/_includes/_html_tags.liquid
new file mode 100644
index 0000000000..ed8a7b3448
--- /dev/null
+++ b/doc/_includes/_html_tags.liquid
@@ -0,0 +1,11 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The following HTML tags are permitted: *a*, *b*, *blockquote*, *br*, *code*, *del*, *dd*, *dl*, *dt*, *em*, *h1*, *h2*, *h3*, *h4*, *h5*, *h6*, *hr*, *i*, *img*, *kbd*, *li*, *ol*, *p*, *pre*, *s*, *del*, *section*, *span*, *strong*, *sub*, *sup*, and *ul*.
+
+The following HTML attributes are permitted: *src*, *width*, *height*, *href*, *alt*, *title*, and *style*.
+
+All styling must be made in-line with the style attribute. Disallowed tags and attributes will not render.
\ No newline at end of file
diff --git a/doc/_includes/_install_ansible.liquid b/doc/_includes/_install_ansible.liquid
new file mode 100644
index 0000000000..955722db43
--- /dev/null
+++ b/doc/_includes/_install_ansible.liquid
@@ -0,0 +1,104 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{{ header_level|default: 'h3' }}(#install-ansible-pipx). Option 1. Install Ansible with pipx
+
+The pipx tool is packaged in many of our supported distributions. You can install it on Debian/Ubuntu by running:
+
+
+# apt install pipx
+
+
+
+Or install it on Red Hat/AlmaLinux/Rocky Linux by running:
+
+
+# dnf install pipx
+
+
+
+{% include 'notebox_begin' %}
+If the pipx package is not found, it is not available for your distribution. Instead "install Ansible with virtualenv and pip":#install-ansible-venv.
+{% include 'notebox_end' %}
+
+After pipx is installed, install Ansible by running:
+
+
+$ cd arvados/tools/ansible
+arvados/tools/ansible $ pipx install "$(grep -E '^ansible-core[^-_[:alnum:]]' requirements.txt)"
+ installed package ansible-core 2.15.13, installed using Python 3.11.2
+ These apps are now globally available
+ - ansible
+ - ansible-config
+ - ansible-connection
+ - ansible-console
+ - ansible-doc
+ - ansible-galaxy
+ - ansible-inventory
+ - ansible-playbook
+ - ansible-pull
+ - ansible-test
+ - ansible-vault
+done! ⨠ð â¨
+
+arvados/tools/ansible $ pipx runpip ansible-core install -r requirements.txt
+[â¦]
+Successfully installed argcomplete-3.6.2 certifi-2025.6.15 charset_normalizer-3.4.2 docker-7.1.0 [⦠and other packagesâ¦]
+
+arvados/tools/ansible $ ansible-galaxy install -r requirements.yml
+[â¦]
+ansible.posix:2.0.0 was installed successfully
+community.docker:4.6.1 was installed successfully
+community.general:10.7.1 was installed successfully
+community.postgresql:4.1.0 was installed successfully
+
+
+
+If you complete these steps successfully, skip the next section.
+
+{{ header_level|default: 'h3' }}(#install-ansible-venv). Option 2. Install Ansible in a virtualenv with pip
+
+This method works on all of our supported distributions, but requires you to configure a lot of paths manually. Install Python and virtualenv on Debian/Ubuntu by running:
+
+
+# apt install python3-venv
+
+
+
+Or install it on Red Hat/AlmaLinux/Rocky Linux by running:
+
+
+# dnf install python3
+
+
+
+Next, set up a virtualenv. If you want to install this somewhere other than @~/arvados-ansible@, you may change that path each time it appears.
+
+
+$ cd arvados/tools/ansible
+arvados/tools/ansible $ python3 -m venv ~/arvados-ansible
+
+arvados/tools/ansible $ ~/arvados-ansible/bin/pip install -r requirements.txt
+[â¦]
+Successfully installed MarkupSafe-3.0.2 PyYAML-6.0.2 ansible-core-2.15.13 [⦠and other packagesâ¦]
+
+arvados/tools/ansible $ ~/arvados-ansible/bin/ansible-galaxy install -r requirements.yml
+[â¦]
+ansible.posix:2.0.0 was installed successfully
+community.docker:4.6.1 was installed successfully
+community.general:10.7.1 was installed successfully
+community.postgresql:4.1.0 was installed successfully
+
+
+
+Finally, add all the Ansible tools to your executable path. If you keep personal executables somewhere other than @~/.local/bin@, you may change that path.
+
+
+$ ln -st ~/.local/bin ~/arvados-ansible/bin/ansible*
+
+
+
+Alternatively, you may reconfigure your shell to add $HOME/arvados-ansible/bin
to the end of your @$PATH@ variable.
diff --git a/doc/_includes/_install_ca_cert.liquid b/doc/_includes/_install_ca_cert.liquid
index a886a62dbd..186ad7e10b 100644
--- a/doc/_includes/_install_ca_cert.liquid
+++ b/doc/_includes/_install_ca_cert.liquid
@@ -48,7 +48,7 @@ h4. Debian/Ubuntu
-h4. Alma/CentOS/Red Hat/Rocky
+h4. Red Hat, AlmaLinux, and Rocky Linux
cp {{ca_cert_name}} /etc/pki/ca-trust/source/anchors/
diff --git a/doc/_includes/_install_compute_docker.liquid b/doc/_includes/_install_compute_docker.liquid
index 9c3e54c7c3..ed4964f071 100644
--- a/doc/_includes/_install_compute_docker.liquid
+++ b/doc/_includes/_install_compute_docker.liquid
@@ -15,7 +15,7 @@ If you are using a distribution in the compute nodes that ships with cgroups v2
After making changes, reboot the system to make these changes effective.
-h3. Alma/CentOS/Red Hat/Rocky
+h3. Red Hat, AlmaLinux, and Rocky Linux
~$ sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0'
diff --git a/doc/_includes/_install_cuda.liquid b/doc/_includes/_install_cuda.liquid
index cb1519a616..394a58c94c 100644
--- a/doc/_includes/_install_cuda.liquid
+++ b/doc/_includes/_install_cuda.liquid
@@ -6,16 +6,4 @@ SPDX-License-Identifier: CC-BY-SA-3.0
h2(#cuda). Install NVIDA CUDA Toolkit (optional)
-If you want to use NVIDIA GPUs, "install the CUDA toolkit.":https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
-
-In addition, you also must install the NVIDIA Container Toolkit:
-
-
-DIST=$(. /etc/os-release; echo $ID$VERSION_ID)
-curl -s -L https://nvidia.github.io/libnvidia-container/gpgkey | \
- sudo apt-key add -
-curl -s -L https://nvidia.github.io/libnvidia-container/$DIST/libnvidia-container.list | \
- sudo tee /etc/apt/sources.list.d/libnvidia-container.list
-sudo apt-get update
-apt-get install libnvidia-container1 libnvidia-container-tools nvidia-container-toolkit
-
+If you want to use NVIDIA GPUs, "install the CUDA toolkit":https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html and the "NVIDIA Container Toolkit":https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html.
diff --git a/doc/_includes/_install_debian_key.liquid b/doc/_includes/_install_debian_key.liquid
index 91b24a8a8d..84937e8398 100644
--- a/doc/_includes/_install_debian_key.liquid
+++ b/doc/_includes/_install_debian_key.liquid
@@ -5,13 +5,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-# apt-get --no-install-recommends install curl gnupg2 ca-certificates
-# curl https://apt.arvados.org/pubkey.gpg -o /etc/apt/trusted.gpg.d/arvados.asc
+# install -d /etc/apt/keyrings
+# curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
-
-The Arvados package signing GPG key is also available via the keyservers, though they can be unreliable. To retrieve the signing key via keyserver.ubuntu.com:
-
-
-# /usr/bin/apt-key adv --keyserver keyserver.ubuntu.com --recv 1078ECD7
-
diff --git a/doc/_includes/_install_packages.liquid b/doc/_includes/_install_packages.liquid
index 595b0a8b71..681d68e98f 100644
--- a/doc/_includes/_install_packages.liquid
+++ b/doc/_includes/_install_packages.liquid
@@ -13,7 +13,7 @@ fallback on arvados_component if not defined
h2(#install-packages). Install {{packages_to_install | join: " and " }}
-h3. Alma/CentOS/Red Hat/Rocky
+h3. Red Hat, AlmaLinux, and Rocky Linux
# dnf install {{packages_to_install | join: " "}}
@@ -23,6 +23,6 @@ h3. Alma/CentOS/Red Hat/Rocky
h3. Debian and Ubuntu
-# apt-get install {{packages_to_install | join " "}}
+# apt install {{packages_to_install | join " "}}
diff --git a/doc/_includes/_install_ruby_and_bundler.liquid b/doc/_includes/_install_ruby_and_bundler.liquid
index 5d5bc9e9d7..7cf87f97cf 100644
--- a/doc/_includes/_install_ruby_and_bundler.liquid
+++ b/doc/_includes/_install_ruby_and_bundler.liquid
@@ -6,14 +6,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Ruby 2.7 or newer is required.
-* "Option 1: Install from packages":#packages
-* "Option 2: Install with RVM":#rvm
-
-h2(#packages). Option 1: Install from packages
-
-h3. Alma/CentOS/Red Hat/Rocky
-
-Version 7 of these distributions does not provide a new enough Ruby version. Use "RVM":#rvm to install Ruby 2.7 or newer.
+h2. Red Hat, AlmaLinux, and Rocky Linux
Version 8 of these distributions provides Ruby 2.7. You can install it by running:
@@ -22,63 +15,10 @@ Version 8 of these distributions provides Ruby 2.7. You can install it by runnin
# dnf install --enablerepo=devel ruby ruby-devel
-h3. Debian and Ubuntu
-
-Debian 10 (buster) and Ubuntu 18.04 (bionic) ship with Ruby 2.5, which is too old for Arvados. Use "RVM":#rvm to install Ruby 2.7 or newer.
+h2. Debian and Ubuntu
-Debian 11 (bullseye) and Ubuntu 20.04 (focal) and later ship with Ruby 2.7 or newer, which is sufficient for Arvados.
+All supported versions of Debian and Ubuntu include a version of Ruby you can use with Arvados.
-# apt-get --no-install-recommends install ruby ruby-dev
+# apt --no-install-recommends install ruby ruby-dev
-
-h2(#rvm). Option 2: Install with RVM
-
-{% include 'notebox_begin_warning' %}
-We do not recommend using RVM unless the Ruby version provided by your OS distribution is older than 2.7.
-{% include 'notebox_end' %}
-
-h3. Install gpg and curl
-
-h4. CentOS/Red Hat 7
-
-
-yum install gpg curl which findutils procps
-
-
-{% comment %}
-To build ruby 3.2.2 on CentOS 7, add: "yum --enablerepo=powertools install libyaml-devel"
-{% endcomment %}
-
-h4. Alma/CentOS/Red Hat/Rocky 8+
-
-
-dnf install gpg curl which findutils procps
-
-
-h4. Debian and Ubuntu
-
-
-apt-get --no-install-recommends install gpg curl ca-certificates dirmngr procps
-
-
-h3. Install RVM, Ruby and Bundler
-
-
-gpg --keyserver pgp.mit.edu --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
-\curl -sSL https://get.rvm.io | bash -s stable --ruby=2.7.7
-
-
-This command installs the Ruby 2.7.7 release, as well as the @gem@ and @bundle@ commands.
-
-To use Ruby installed from RVM, load it in an open shell like this:
-
-
-source /usr/local/rvm/scripts/rvm
-
-
-Alternately you can use @rvm-exec@ (the first parameter is the ruby version to use, or "default"), for example:
-
-
-rvm-exec default ruby -v
-
diff --git a/doc/_includes/_mount_types.liquid b/doc/_includes/_mount_types.liquid
index 86e05be866..f22f7d3551 100644
--- a/doc/_includes/_mount_types.liquid
+++ b/doc/_includes/_mount_types.liquid
@@ -24,15 +24,6 @@ At container startup, the target path will have the same directory structure as
"kind":"collection",
"uuid":"..."
}
|
-|Git tree|@git_tree@|@"uuid"@ must be the UUID of an Arvados-hosted git repository.
-@"commit"@ must be a full 40-character commit hash.
-@"path"@, if provided, must be "/".
-At container startup, the target path will have the source tree indicated by the given commit. The @.git@ metadata directory _will not_ be available.|{
- "kind":"git_tree",
- "uuid":"zzzzz-s0uqq-xxxxxxxxxxxxxxx",
- "commit":"f315c59f90934cccae6381e72bba59d27ba42099"
-}
-
|
|Temporary directory|@tmp@|@"capacity"@: capacity (in bytes) of the storage device.
@"device_type"@ (optional, default "network"): one of @{"ram", "ssd", "disk", "network"}@ indicating the acceptable level of performance. (*note: not yet implemented as of v1.5*)
At container startup, the target path will be empty. When the container finishes, the content will be discarded. This will be backed by a storage mechanism no slower than the specified type.|{
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index 2d8bbfc806..256e22eae6 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -12,6 +12,7 @@ To simplify certificate management, we recommend creating a single certificate f
xarv1.example.com
*.xarv1.example.com
*.collections.xarv1.example.com
+*.containers.xarv1.example.com
(Replacing @xarv1.example.com@ with your own @${DOMAIN}@)
@@ -22,7 +23,7 @@ The script expects cert/key files with these basenames (matching the role except
# @balancer@ -- Optional on multi-node installations
# @collections@ -- Part of keepweb, must be a wildcard for @*.collections.${DOMAIN}@
-# @controller@
+# @controller@ -- Must be valid for @${DOMAIN}@ and @*.containers.${DOMAIN}@
# @download@ -- Part of keepweb
# @grafana@ -- Service available by default on multi-node installations
# @keepproxy@ -- Corresponds to default domain @keep.${DOMAIN}@
diff --git a/doc/_includes/_note_python_sc.liquid b/doc/_includes/_note_python_sc.liquid
deleted file mode 100644
index 4b081770a7..0000000000
--- a/doc/_includes/_note_python_sc.liquid
+++ /dev/null
@@ -1,29 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin' %}
-
-{% if rh_version %} On CentOS {{rh_version}} and RHEL {{rh_version}},
-{% else %} On CentOS and RHEL,
-{% endif %} these packages require a more recent version from Software Collections. The Software Collection will be installed automatically as long as Software Collections are enabled on your system.
-
-To "enable Software Collections on CentOS":https://wiki.centos.org/AdditionalResources/Repositories/SCL, run:
-
-
-~$ sudo yum install centos-release-scl scl-utils
-
-
-
-To enable Software Collections on RHEL:
-
-
-~$ sudo yum-config-manager --enable rhel-server-rhscl-7-rpms
-
-
-
-"See also section 2.1 of Red Hat's Installation chapter":https://access.redhat.com/documentation/en-US/Red_Hat_Software_Collections/2/html/2.0_Release_Notes/chap-Installation.html .
-
-{% include 'notebox_end' %}
diff --git a/doc/_includes/_setup_debian_repo.liquid b/doc/_includes/_setup_debian_repo.liquid
new file mode 100644
index 0000000000..83f1b02291
--- /dev/null
+++ b/doc/_includes/_setup_debian_repo.liquid
@@ -0,0 +1,31 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+
+packages_to_install may be a space-separated string
+{% endcomment %}
+
+Set up the Arvados package repository
+{%- if packages_to_install == nil %}
+{%- elsif packages_to_install contains " " %} and install the packages
+{%- else %} and install @{{ packages_to_install }}@
+{%- endif %} by running these commands:
+
+
+# install -d /etc/apt/keyrings
+# curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
+# declare $(grep "^VERSION_CODENAME=" /etc/os-release || echo VERSION_CODENAME=MISSING)
+# tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF
+Types: deb
+URIs: https://apt.arvados.org/$VERSION_CODENAME
+Suites: $VERSION_CODENAME
+Components: main
+Signed-by: /etc/apt/keyrings/arvados.asc
+EOF
+# apt update
+{%- if packages_to_install != nil %}
+# apt install {{ packages_to_install }}
+{% endif -%}
+
+
diff --git a/doc/_includes/_setup_redhat_repo.liquid b/doc/_includes/_setup_redhat_repo.liquid
new file mode 100644
index 0000000000..6dd103a2a6
--- /dev/null
+++ b/doc/_includes/_setup_redhat_repo.liquid
@@ -0,0 +1,38 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+
+modules_to_enable and packages_to_install may be space-separated strings
+{% endcomment %}
+
+{%- if modules_to_enable != nil %}
+{% include 'notebox_begin_warning' %}
+
+Arvados tools require newer language runtimes than the default versions included with these distributions. These instructions will **upgrade language runtimes for the entire system**. Check that won't interfere with any existing software before you proceed.
+
+{% include 'notebox_end' %}
+{% endif -%}
+
+Set up the Arvados package repository
+{%- if packages_to_install == nil %}
+{%- elsif packages_to_install contains " " %} and install the packages
+{%- else %} and install @{{ packages_to_install }}@
+{%- endif %} by running these commands:
+
+
+# tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'
+[arvados]
+name=Arvados
+baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/
+gpgcheck=1
+gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados
+EOF
+{%- if modules_to_enable != nil %}
+# dnf module enable {{ modules_to_enable }}
+{% endif -%}
+{%- if packages_to_install != nil -%}
+# dnf install {{ packages_to_install }}
+{% endif -%}
+
+
diff --git a/doc/_includes/_singularity_mksquashfs_configuration.liquid b/doc/_includes/_singularity_mksquashfs_configuration.liquid
index e31e801c17..149e0ad0bb 100644
--- a/doc/_includes/_singularity_mksquashfs_configuration.liquid
+++ b/doc/_includes/_singularity_mksquashfs_configuration.liquid
@@ -4,7 +4,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-h2(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration
+{{ mksquashfs_header|default: "h2" }}(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration
{% if show_docker_warning != nil %}
{% include 'notebox_begin_warning' %}
diff --git a/doc/_includes/_ssh_intro.liquid b/doc/_includes/_ssh_intro.liquid
index 8cb09f135f..4bf72c1b31 100644
--- a/doc/_includes/_ssh_intro.liquid
+++ b/doc/_includes/_ssh_intro.liquid
@@ -5,7 +5,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Arvados requires a public SSH key in order to securely log in to an Arvados VM instance, or to access an Arvados Git repository. The three sections below help you get started:
+Arvados requires a public SSH key in order to securely log in to an Arvados VM instance. The three sections below help you get started:
# "Getting your SSH key":#gettingkey
# "Adding your key to Arvados Workbench":#workbench
diff --git a/doc/_includes/_start_service.liquid b/doc/_includes/_start_service.liquid
index 27c42c94c9..9d29915fcf 100644
--- a/doc/_includes/_start_service.liquid
+++ b/doc/_includes/_start_service.liquid
@@ -1,3 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
h2(#start-service). Start the service
@@ -10,6 +16,6 @@ h2(#start-service). Start the service
If @systemctl status@ indicates it is not running, use @journalctl@ to check logs for errors:
-# journalctl -n12 --unit {{arvados_component}}
+# journalctl --since -5min -u {{ arvados_component | split: ' ' | join: ' -u ' }}
diff --git a/doc/_includes/_supportedlinux.liquid b/doc/_includes/_supportedlinux.liquid
index a682909355..8072e3a4a0 100644
--- a/doc/_includes/_supportedlinux.liquid
+++ b/doc/_includes/_supportedlinux.liquid
@@ -6,14 +6,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
table(table table-bordered table-condensed).
|_. *Supported Linux Distributions*|
-|AlmaLinux 8|
-|CentOS 8|
-|CentOS 7|
-|Red Hat Enterprise Linux 8|
-|Rocky Linux 8|
+|AlmaLinux 9|
+|AlmaLinux 8 (since 8.4)|
+|Debian 12 ("bookworm")|
|Debian 11 ("bullseye")|
-|Debian 10 ("buster")|
+|Red Hat Enterprise Linux 9|
+|Red Hat Enterprise Linux 8 (since 8.4)|
+|Rocky Linux 9|
+|Rocky Linux 8 (since 8.4)|
+|Ubuntu 24.04 ("noble")|
+|Ubuntu 22.04 ("jammy")|
|Ubuntu 20.04 ("focal")|
-|Ubuntu 18.04 ("bionic")|
-
-Arvados packages are published for current Debian releases (until the EOL date), current Ubuntu LTS releases (until the end of standard support), and the latest version of CentOS.
diff --git a/doc/_includes/_tutorial_git_repo_expectations.liquid b/doc/_includes/_tutorial_git_repo_expectations.liquid
deleted file mode 100644
index 8a172de283..0000000000
--- a/doc/_includes/_tutorial_git_repo_expectations.liquid
+++ /dev/null
@@ -1,9 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin' %}
-This tutorial assumes that you have a working Arvados repository. If you do not have a repository created, you can follow the instructions in the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *$USER/tutorial* repository created in that page as the example.
-{% include 'notebox_end' %}
diff --git a/doc/admin/config-urls.html.textile.liquid b/doc/admin/config-urls.html.textile.liquid
index 3cf6e79722..57a7a05a1b 100644
--- a/doc/admin/config-urls.html.textile.liquid
+++ b/doc/admin/config-urls.html.textile.liquid
@@ -31,7 +31,7 @@ table(table table-bordered table-condensed).
|controller |yes |yes|yes ^2,4^|InternalURLs used by reverse proxy and container shell connections|
|arvados-dispatch-cloud|no |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|
|arvados-dispatch-lsf|no |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|
-|git-http |yes |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|container web services|yes |no |no |controller's InternalURLs are used by reverse proxy (e.g. Nginx)|
|git-ssh |yes |no |no ||
|keepproxy |yes |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
|keepstore |no |yes|yes |All clients connect to InternalURLs|
@@ -160,32 +160,15 @@ Consider this section for the @RailsAPI@ service:
There is no @ExternalURL@ defined because the @RailsAPI@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it.
-The @RailsAPI@ service is also a Rails application, and its listening host/port is defined in the Nginx configuration:
+The @RailsAPI@ service is also a Rails application, and its listening host and port are set in the @arvados-railsapi.service@ unit definition:
-
-server {
- # This configures the Arvados API server. It is written using Ruby
- # on Rails and uses the Passenger application server.
-
- listen localhost:8004;
- server_name localhost-api;
-
- root /var/www/arvados-api/current/public;
- index index.html index.htm index.php;
-
- passenger_enabled on;
-
- # If you are using RVM, uncomment the line below.
- # If you're using system ruby, leave it commented out.
- #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
-
- # This value effectively limits the size of API objects users can
- # create, especially collections. If you change this, you should
- # also ensure the following settings match it:
- # * `client_max_body_size` in the previous server section
- # * `API.MaxRequestSize` in config.yml
- client_max_body_size 128m;
-}
+
+# systemctl cat arvados-railsapi.service
+[...]
+[Service]
+Environment=PASSENGER_ADDRESS=localhost
+Environment=PASSENGER_PORT=8004
+[...]
So then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Arvados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint.
@@ -202,7 +185,7 @@ Consider this section for the @Controller@ service:
ExternalURL: "https://ClusterID.example.com"
{% endcodeblock %}
-The @ExternalURL@ advertised to clients is @https://ClusterID.example.com@. The @arvados-controller@ process will listen on @localhost@ port 8003. Other Arvados service processes in the cluster can connect to this specific controller instance, using the URL @https://ctrl-0.internal@. Nginx is configured to sit in front of the @Controller@ service and terminate TLS:
+The @ExternalURL@ advertised to clients is @https://ClusterID.example.com@. The @arvados-controller@ process will listen on @localhost@ port 8003. Other Arvados service processes in the cluster can connect to this specific controller instance, using the URL @https://ctrl-0.internal@. Container web service traffic at @https://*.containers.ClusterID.example.com@ is also handled by the same @arvados-controller@ process. Nginx is configured to sit in front of the @Controller@ service and terminate TLS:
# This is the port where nginx expects to contact arvados-controller.
@@ -215,7 +198,9 @@ server {
# the request is reverse proxied to the upstream 'controller'
listen 443 ssl;
- server_name ClusterID.example.com ctrl-0.internal;
+ server_name ClusterID.example.com
+ ctrl-0.internal
+ *.containers.ClusterID.example.com;
ssl_certificate /YOUR/PATH/TO/cert.pem;
ssl_certificate_key /YOUR/PATH/TO/cert.key;
diff --git a/doc/admin/health-checks.html.textile.liquid b/doc/admin/health-checks.html.textile.liquid
index fa273cd204..ea8bcc9628 100644
--- a/doc/admin/health-checks.html.textile.liquid
+++ b/doc/admin/health-checks.html.textile.liquid
@@ -46,26 +46,26 @@ If all checks pass, it writes @health check OK@ to stderr (unless the @-quiet@ f
{% codeblock as yaml %}
Checks:
"arvados-api-server+http://localhost:8004/_health/ping":
- ClockTime: "2022-11-16T16:08:57Z"
- ConfigSourceSHA256: e2c086ae3dd290cf029cb3fe79146529622279b6280cf6cd17dc8d8c30daa57f
- ConfigSourceTimestamp: "2022-11-07T18:08:24.539545Z"
+ ClockTime: "2024-12-13T14:38:25Z"
+ ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c
+ ConfigSourceTimestamp: "2024-12-12T11:14:06.487848-05:00"
HTTPStatusCode: 200
Health: OK
Response:
health: OK
- ResponseTime: 0.017159
- Server: nginx/1.14.0 + Phusion Passenger(R) 6.0.15
- Version: 2.5.0~dev20221116141533
+ ResponseTime: 0.051136
+ Server: nginx/1.26.1 + Phusion Passenger(R) 6.0.23
+ Version: 3.0.0
"arvados-controller+http://localhost:8003/_health/ping":
- ClockTime: "2022-11-16T16:08:57Z"
- ConfigSourceSHA256: e2c086ae3dd290cf029cb3fe79146529622279b6280cf6cd17dc8d8c30daa57f
- ConfigSourceTimestamp: "2022-11-07T18:08:24.539545Z"
+ ClockTime: "2024-12-13T14:38:25Z"
+ ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c
+ ConfigSourceTimestamp: "2024-12-12T11:14:06.487848-05:00"
HTTPStatusCode: 200
Health: OK
Response:
health: OK
- ResponseTime: 0.004748
+ ResponseTime: 0.014869
Server: ""
- Version: 2.5.0~dev20221116141533 (go1.18.8)
+ Version: 3.0.0 (go1.21.10)
# ...
{% endcodeblock %}
diff --git a/doc/admin/inspect.html.textile.liquid b/doc/admin/inspect.html.textile.liquid
index fff94cb55f..601d26c5cb 100644
--- a/doc/admin/inspect.html.textile.liquid
+++ b/doc/admin/inspect.html.textile.liquid
@@ -25,7 +25,6 @@ table(table table-bordered table-condensed table-hover){width:40em}.
|arvados-controller|â|
|arvados-dispatch-cloud|â|
|arvados-dispatch-lsf|â|
-|arvados-git-httpd||
|arvados-ws|â|
|composer||
|keepproxy|â|
diff --git a/doc/admin/keep-measuring-deduplication.html.textile.liquid b/doc/admin/keep-measuring-deduplication.html.textile.liquid
index 13838f61fe..2a45d643d3 100644
--- a/doc/admin/keep-measuring-deduplication.html.textile.liquid
+++ b/doc/admin/keep-measuring-deduplication.html.textile.liquid
@@ -10,7 +10,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-The @arvados-client@ tool can be used to generate a deduplication report across an arbitrary number of collections. It can be installed from packages (@apt install arvados-client@ or @yum install arvados-client@).
+The @arvados-client@ tool can be used to generate a deduplication report across an arbitrary number of collections. It can be installed from packages (@apt install arvados-client@ or @dnf install arvados-client@).
h2(#syntax). Syntax
diff --git a/doc/admin/maintenance-and-upgrading.html.textile.liquid b/doc/admin/maintenance-and-upgrading.html.textile.liquid
index 2ec1700fc9..7b73da82ed 100644
--- a/doc/admin/maintenance-and-upgrading.html.textile.liquid
+++ b/doc/admin/maintenance-and-upgrading.html.textile.liquid
@@ -65,7 +65,7 @@ Upgrading Arvados typically involves the following steps:
# Update compute nodes
## (cloud) Rebuild and deploy the "compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html
## (slurm/LSF) Upgrade the @python3-arvados-fuse@ package used on your compute nodes
-# Install new packages using @apt-get upgrade@ or @yum upgrade@.
+# Install new packages using @apt upgrade@ or @dnf upgrade@.
# Wait for package installation scripts as they perform any necessary data migrations.
# Run @arvados-server config-check@ to detect configuration errors or deprecated entries.
# Verify that the Arvados services were restarted as part of the package upgrades.
diff --git a/doc/admin/management-token.html.textile.liquid b/doc/admin/management-token.html.textile.liquid
index a4939b740c..5650c5038d 100644
--- a/doc/admin/management-token.html.textile.liquid
+++ b/doc/admin/management-token.html.textile.liquid
@@ -21,7 +21,6 @@ h2. API server and other services
The following services also support monitoring.
* API server
-* arvados-git-httpd
* controller
* keep-balance
* keepproxy
diff --git a/doc/admin/metrics.html.textile.liquid b/doc/admin/metrics.html.textile.liquid
index ed9fbbd7ae..113536ff58 100644
--- a/doc/admin/metrics.html.textile.liquid
+++ b/doc/admin/metrics.html.textile.liquid
@@ -35,9 +35,7 @@ table(table table-bordered table-condensed table-hover).
|arvados-controller|â|
|arvados-dispatch-cloud|â|
|arvados-dispatch-lsf|â|
-|arvados-git-httpd||
|arvados-ws|â|
-|composer||
|keepproxy|â|
|keepstore|â|
|keep-balance|â|
diff --git a/doc/admin/restricting-upload-download.html.textile.liquid b/doc/admin/restricting-upload-download.html.textile.liquid
index add99bbadb..19db7bbf29 100644
--- a/doc/admin/restricting-upload-download.html.textile.liquid
+++ b/doc/admin/restricting-upload-download.html.textile.liquid
@@ -35,7 +35,7 @@ The default policy allows anyone to upload or download.
h2. WebDAV and S3 API Permissions
-Permitting @WebDAV@ makes it possible to use WebDAV, S3 API, and upload/download with Workbench 2. It works in terms of individual files. It prints a log each time a user uploads or downloads a file. When @WebDAVLogEvents@ (default true) is enabled, it also adds an entry into the API server @logs@ table.
+Permitting @WebDAV@ makes it possible to use WebDAV, S3 API, and upload/download with Workbench 2. It works in terms of individual files. It prints a log each time a user uploads or downloads a file ("subject to throttling discussed below":#throttling). When @WebDAVLogEvents@ (default true) is enabled, it also adds an entry into the API server @logs@ table.
When a user attempts to upload or download from a service without permission, they will receive a @403 Forbidden@ response. This only applies to file content.
@@ -55,7 +55,8 @@ The default policy allows anyone to upload or download.
Download: true
Upload: true
WebDAVLogEvents: true
-
+ WebDAVLogDownloadInterval: 30s
+
When a user or admin creates a sharing link, a custom scoped token is embedded in that link. This effectively allows anonymous user access to the associated data via that link. These custom scoped tokens are always treated as user tokens for the purposes of restricting download access, even when created by an admin user. In other words, these custom scoped tokens, when used in a sharing link, are always subject to the value of the @WebDAVPermission/User/Download@ configuration setting.
@@ -73,7 +74,7 @@ You set separate permissions for @WebDAV@ and @Keepproxy@, with separate policie
These policies apply to only access from outside the cluster, using Workbench or Arvados CLI tools.
-The @WebDAVLogEvents@ option should be enabled if you intend to the run the "User Activity Report":user-activity.html . If you don't need audits, or you are running a site that is mostly serving public data to anonymous downloaders, you can disable in to avoid the extra API server request.
+The @WebDAVLogEvents@ option should be enabled if you intend to the run the "User Activity Report":user-activity.html. If you don't need audits, or you are running a site that is mostly serving public data to anonymous downloaders, you can disable it to avoid the extra API server request.
h3. Audited downloads
@@ -167,3 +168,7 @@ Jul 20 15:03:38 keep.xxxx1.arvadosapi.com keepproxy[63828]: {"level":"info","loc
It is possible to do a reverse lookup from the locator to find all matching collections: the @manifest_text@ field of a collection lists all the block locators that are part of the collection. The @manifest_text@ field also provides the relevant filename in the collection. Because this lookup is rather involved and there is no automated tool to do it, we recommend disabling @KeepproxyPermission.User.Download@ and @KeepproxyPermission.User.Upload@ for sites where the audit log is important and @arv-get@ and @arv-put@ are not essential.
+
+h3(#throttling). WebDAV download log throttling
+
+If a client requests partial content past the start of a file, and a request from the same client for the same file was logged within the last time interval configured by @WebDAVLogDownloadInterval@, @keep-web@ will not write a new log. This throttling applies to both printed and API server logs. The default value of 30 seconds reduces log output when clients like @aws s3 cp@ download one file in small chunks in parallel. Administrators can set this setting to @0@ to disable log throttling. This setting lets administrators choose how they want to balance full auditability against logging overhead: a shorter interval means more download requests are logged, with all the overhead that entails.
diff --git a/doc/admin/scoped-tokens.html.textile.liquid b/doc/admin/scoped-tokens.html.textile.liquid
index 415f635dcd..b36c1921c4 100644
--- a/doc/admin/scoped-tokens.html.textile.liquid
+++ b/doc/admin/scoped-tokens.html.textile.liquid
@@ -45,20 +45,15 @@ A scoped token can be created at the command line:
$ arv api_client_authorization create --api-client-authorization '{"scopes": [["GET", "/arvados/v1/collections"], ["GET", "/arvados/v1/collections/"]]}'
{
- "href":"/api_client_authorizations/zzzzz-gj3su-bizbsw0mx5pju3w",
"kind":"arvados#apiClientAuthorization",
"etag":"9yk144t0v6cvyp0342exoh2vq",
"uuid":"zzzzz-gj3su-bizbsw0mx5pju3w",
"owner_uuid":"zzzzz-tpzed-fr97h9t4m5jffxs",
"created_at":"2020-03-12T20:36:12.517375422Z",
- "modified_by_client_uuid":null,
"modified_by_user_uuid":null,
"modified_at":null,
- "user_id":3,
- "api_client_id":7,
"api_token":"5a74htnoqwkhtfo2upekpfbsg04hv7cy5v4nowf7dtpxer086m",
"created_by_ip_address":null,
- "default_owner_uuid":null,
"expires_at":null,
"last_used_at":null,
"last_used_by_ip_address":null,
diff --git a/doc/admin/token-expiration-policy.html.textile.liquid b/doc/admin/token-expiration-policy.html.textile.liquid
index 5efbccbc19..f78c2256f7 100644
--- a/doc/admin/token-expiration-policy.html.textile.liquid
+++ b/doc/admin/token-expiration-policy.html.textile.liquid
@@ -56,6 +56,8 @@ Clusters:
This is independent of @Workbench.IdleTimeout@. Even if Workbench auto-logout is disabled, this option will ensure that the user is always required to log in again after the configured amount of time.
+The default value of @Login.TokenLifetime@ is zero, meaning login tokens do not expire (unless @API.MaxTokenLifetime@ is set).
+
h2. Untrusted login tokens
@@ -63,13 +65,11 @@ Clusters:
zzzzz:
...
Login:
- TrustLoginTokens: false
+ IssueTrustedTokens: false
...
-When `TrustLoginTokens` is `false`, tokens issued through login will be "untrusted" by default. Untrusted tokens cannot be used to list other tokens issued to the user, and cannot be used to grant new tokens. This stops an attacker from leveraging a leaked token to aquire other tokens, but also interferes with some Workbench features that create new tokens on behalf of the user.
-
-The default value @Login.TokenLifetime@ is zero, meaning login tokens do not expire (unless @API.MaxTokenLifetime@ is set).
+When @IssueTrustedTokens@ is @false@, tokens are "untrusted" and cannot be used to list other tokens issued to the same user, nor to grant new tokens. This prevents an attacker from leveraging a leaked token to aquire other tokens, but also interferes with some Workbench features that create new tokens on behalf of the user.
h2. Automatic expiration of all tokens
@@ -98,9 +98,9 @@ h2. Choosing a policy
@Workbench.IdleTimeout@ only affects browser behavior. It is strongly recommended that automatic browser logout be used together with @Login.TokenLifetime@, which is enforced on API side.
-@TrustLoginTokens: true@ (default value) is less restrictive. Be aware that an unrestricted token can be "refreshed" to gain access for an indefinite period. This means, during the window that the token is valid, the user is permitted to create a new token, which will have a new expiration further in the future (of course, once the token has expired, this is no longer possible). Unrestricted tokens are required for some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended if many users will interact with the system through the command line.
+@IssueTrustedTokens: true@ (default value) is less restrictive. Be aware that an unrestricted token can be "refreshed" to gain access for an indefinite period. This means, during the window that the token is valid, the user is permitted to create a new token, which will have a new expiration further in the future (of course, once the token has expired, this is no longer possible). Unrestricted tokens are required for some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended if many users will interact with the system through the command line.
-@TrustLoginTokens: false@ is more restrictive. A token obtained by logging into Workbench cannot be "refreshed" to gain access for an indefinite period. However, it interferes with some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended only if most users will only ever interact with the system through Workbench or WebShell. For users or service accounts that need to tokens with fewer restrictions, the admin can "create a token at the command line":user-management-cli.html#create-token using the @SystemRootToken@.
+@IssueTrustedTokens: false@ is more restrictive. A token obtained by logging into Workbench cannot be "refreshed" to gain access for an indefinite period. However, it interferes with some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended only if most users will only ever interact with the system through Workbench or WebShell. With this configuration, it is still possible to "create a token at the command line":user-management-cli.html#create-token using the @SystemRootToken@.
In every case, admin users may always create tokens with expiration dates far in the future.
diff --git a/doc/admin/upgrade-crunch2.html.textile.liquid b/doc/admin/upgrade-crunch2.html.textile.liquid
deleted file mode 100644
index 98baf3ba6a..0000000000
--- a/doc/admin/upgrade-crunch2.html.textile.liquid
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: default
-navsection: admin
-title: Upgrading to Containers API
-...
-
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-The "containers" API is the recommended way to submit compute work to Arvados. It supersedes the "jobs" API, which is end-of-life in Arvados 2.0.
-
-h2. Benefits over the "jobs" API
-
-* Simpler and more robust execution with fewer points of failure
-* Automatic retry for containers that fail to run to completion due to infrastructure errors
-* Scales to thousands of simultaneous containers
-* Able to support alternate schedulers/dispatchers in addition to slurm
-* Improved logging, different streams logs/metrics stored in different files in the log collection
-* Records more upfront detail about the compute node, and additional metrics (such as available disk space over the course of the container run)
-* Better behavior when deciding whether to reuse past work -- pick the oldest container that matches the criteria
-* Can reuse running containers between workflows, cancelling a workflow will not cancel containers that are shared with other workflows
-* Supports setting time-to-live on intermediate output collections for automatic cleanup
-* Supports "secret" inputs, suitable for passwords or access tokens, which are hidden from the API responses and logs, and forgotten after use
-* Does not require "git" for dispatching work
-
-h2. Differences from the "jobs" API
-
-Containers cannot reuse jobs (but can reuse other containers)
-
-Uses the service "crunch-dispatch-slurm":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html instead of @crunch-dispatch.rb@
-
-Non-CWL Arvados "pipeline templates" are not supported with containers. Pipeline templates should be rewritten in CWL and registered as "Workflows".
-
-The containers APIs is incompatible with the jobs API, code which integrates with the "jobs" API must be updated to work with containers
-
-Containers have network access disabled by default
-
-The keep mount only exposes collections which are explicitly listed as inputs
-
-h2. Migrating to "containers" API
-
-Run your workflows using @arvados-cwl-runner --api=containers@ (only necessary if both the jobs and containers APIs are enabled, if the jobs API is disabled, it will use the containers API automatically)
-
-Register your workflows so they can be run from workbench using @arvados-cwl-runner --api=containers --create-workflow@
-
-Read "Migrating running CWL on jobs API to containers API":{{site.baseurl}}/user/cwl/cwl-style.html#migrate
-
-Use @arv:APIRequirement: {}@ in the @requirements@ section of your CWL file to enable network access for the container (see "Arvados CWL Extensions":{{site.baseurl}}/user/cwl/cwl-extensions.html)
-
-For examples on how to manage container requests with the Python SDK, see "Python cookbook":{{site.baseurl}}/sdk/python/cookbook.html
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 64a113b6f8..667bc2997a 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -30,7 +30,175 @@ TODO: extract this information based on git commit messages and generate changel
h2(#main). development main
-"previous: Upgrading to 2.7.1":#v2_7_1
+"previous: Upgrading to 3.1.2":#v3_1_2
+
+
+h3. New GPG key URL for Red Hat, AlmaLinux, and Rocky Linux
+
+As part of adding support for the RHEL 9 family of distributions, we have started using a new signing key for packages. For these distributions, the key corresponding to your distribution is now available at a URL that includes the release version. Before you upgrade, on each system where you have the Arvados package repository installed, edit the file with that repository configuration, usually @/etc/yum.repos.d/arvados.repo@. Find the line that defines @gpgkey@:
+
+
+[arvados]
+â¦
+gpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados
+
+
+
+Edit this line to add @$releasever/@ after @RHEL/@, so it looks like this:
+
+
+gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados
+
+
+
+Then save and close the file. The old key URL still works, so this step is not required to upgrade Arvados itself. However, doing it now will help ensure you retain access to the Arvados repositories next time you upgrade your distribution.
+
+h3. @Users.SendUserSetupNotificationEmail@ is disabled by default
+
+If you want to preserve the old default behavior of sending an email to each user when their account has been set up, update your configuration file accordingly.
+
+
+Users:
+ SendUserSetupNotificationEmail: true
+
+
+h3. Admin container shell access is enabled by default
+
+"Container shell access":{{ site.baseurl }}/user/debugging/container-shell-access.html by admin users is now enabled by default to make it easier to diagnose workflow issues on new deployments. If you prefer to leave it disabled, update your configuration file accordingly.
+
+
+Containers:
+ ShellAccess:
+ Admin: false
+
+
+Container shell access for non-admin users is still disabled by default.
+
+h3. Configure ExternalURL, DNS, and TLS for container web services
+
+Arvados now allows external clients to connect to HTTP services running in containers. To enable this feature:
+* Add a @Services.ContainerWebServices.ExternalURL@ entry to @/etc/arvados/config.yml@ with a wildcard URL, e.g., @https://*.containers.ClusterID.example.com/@
+* Add the wildcard name to the @server_name@ directive in the controller section of your Nginx configuration, e.g., @server_name ClusterID.example.com *.containers.ClusterID.example.com;@
+* Add wildcard DNS records so @*.containers.ClusterID.example.com@ names resolve to the same address(es) as your controller's external URL
+* Update the TLS certificate used by Nginx for @ClusterID.example.com@ so it also validates for @*.containers.ClusterID.example.com@
+
+h2(#v3_1_2). v3.1.2 (2025-05-27)
+
+"previous: Upgrading to 3.1.1":#v3_1_1
+
+There are no changes that require administrator attention in this release.
+
+h2(#v3_1_1). v3.1.1 (2025-04-14)
+
+"previous: Upgrading to 3.1.0":#v3_1_0
+
+h3. Clusters using cloud dispatch should rebuild a compute node image
+
+Arvados 3.1.1 fixes a handful of bugs in installation tools, particularly for deployments on Ubuntu. If you have already successfully upgraded to 3.1.0, the only thing in this release that affects you is a bug fix in the compute node image builder for cloud deployments. If your cluster uses @arvados-dispatch-cloud@, you should "build a new compute node following our install guide":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html and configure your cluster to use it. You do not need to upgrade any cluster services; there are no changes to them since 3.1.0.
+
+h2(#v3_1_0). v3.1.0 (2025-03-20)
+
+"previous: Upgrading to 3.0.0":#v3_0_0
+
+h3. Rails API server now runs standalone
+
+The Arvados Rails API server now runs from a standalone Passenger server to simplify deployment. Before upgrading, existing deployments should remove the Rails API server from their nginx configuration. e.g., remove the entire @server@ block with @root /var/www/arvados-api/current/public@ from @/etc/nginx/conf.d/arvados-api-and-controller.conf@. If you customized this deployment at all, the "updated install instructions":{{ site.baseurl }}/install/install-api-server.html#railsapi-config explain how to customize the standalone Passenger server. Finally, you'll need to enable the new service by running:
+
+
+# systemctl enable --now arvados-railsapi.service
+
+
+h3. Rails API server needs PowerTools on Red Hat, AlmaLinux, and Rocky Linux
+
+The Arvados Rails API server now needs to be able to link against @libyaml@ development headers. On Red Hat, AlmaLinux, and Rocky Linux, these are provided by the @libyaml-devel@ package in the PowerTools repository. Before you upgrade, make sure you have this repository enabled on the host where you run the Rails API server by running:
+
+
+# dnf config-manager --set-enabled powertools
+
+
+h3. "cuda" runtime constraint is deprecated in favor of "gpu"
+
+Arvados 3.1.0 adds support for containers that use AMD ROCm alongside our existing support for NVIDIA CUDA. As part of this, the @cuda@ runtime constraint has been deprecated and replaced with a more general @gpu@ constraint. The requested type of GPU is named in the @stack@ field of this object. Other fields have been carried over from @cuda@ and work the same way. Refer to the "runtime constraints reference":{{ site.baseurl }}/api/methods/container_requests.html#runtime_constraints for details.
+
+If client software creates or updates a container request with a @cuda@ runtime constraint, the Arvados API server will automatically translate that to a @gpu@ constraint. This client software should still be updated to specify a @gpu@ runtime constraint, but you can safely upgrade to Arvados 3.1.0 and do these updates opportunistically.
+
+Client software that reads and reports runtime constraints (like Workbench does) must be updated to read the new @gpu@ constraint. The @cuda@ constraint will no longer appear in API responses.
+
+h3. Generalized configuration for GPU compute nodes
+
+As part of adding support for AMD GPUs in Arvados 3.1, the @CUDA@ section of @InstanceType@ definitions is now deprecated in favor of a new @GPU@ section that is generalized for both CUDA and ROCm.
+
+Where previously there would be a @CUDA@ section:
+
+
+ InstanceTypes:
+ gpuInstance:
+ CUDA:
+ DriverVersion: "11.0"
+ HardwareCapability: "9.0"
+ DeviceCount: 1
+
+
+The configuration file should now be updated to use a @GPU@ section:
+
+
+ InstanceTypes:
+ gpuInstance:
+ GPU:
+ Stack: "cuda"
+ DriverVersion: "11.0"
+ HardwareTarget: "9.0"
+ DeviceCount: 1
+ VRAM: 8GiB
+
+
+To minimize disruption, the config loader will continue to accept the deprecated @CUDA@ field and a emit warning. Admins are advised to update the configuration file as the legacy field will be removed in a future version.
+
+h3. BsubCUDAArguments renamed to BsubGPUArguments
+
+The configuration item @Containers.LSF.BsubCUDAArguments@ has been renamed to @Containers.LSF.BsubGPUArguments@. There is no change in content. To minimize disruption, the config loader will continue to accept the deprecated @BsubCUDAArguments@ field and a emit warning. Admins are advised to update the configuration file as the legacy field will be removed in a future version.
+
+h2(#v3_0_0). v3.0.0 (2024-11-12)
+
+"previous: Upgrading to 2.7.4":#v2_7_4
+
+h3. Debian 10 and Ubuntu 18.04 are no longer supported
+
+Arvados 3.0 no longer supports some of the older distributions supported by Arvados 2.7: Debian 10 "buster" and Ubuntu 18.04 "bionic." If you are running Arvados on any of these distributions, you must first upgrade to a supported distribution before you upgrade to Arvados 3.0.
+
+Arvados 2.7 supports Debian 11 "bullseye" and Ubuntu 20.04 "focal." You can upgrade your Arvados cluster to one of those releases, then proceed to upgrade Arvados to 3.0.
+
+The list of distributions supported by Arvados 3.0 can be found on "Planning and prerequisites.":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux
+
+h3. Red Hat 8 package dependency on package streams
+
+The Red Hat 8 package of the Rails API server now depends on the Ruby 3.1 stream, and the various Python packages now depend on the Python 3.9 stream. Plan for these streams to be activated and installed automatically during your upgrade.
+
+h3. RVM is no longer supported
+
+Some Arvados packages, most notably the Rails API server package @arvados-api-server@, would check whether RVM is installed on the system, and invoke Ruby commands through it if so. Arvados 3.0 no longer specially supports RVM. Instead, Arvados 3.0 supports all the different versions of Ruby that are packaged in our supported distributions, mitigating the need to support separate Ruby installations. Package scripts run plain @ruby@ and @gem@ commands and expect they come from a supported version.
+
+If you have a custom install that requires a different version of Ruby than the one included with your distribution, you must configure your system to ensure package scripts find that version of @ruby@ before any others. For example, you might do this on Debian-based distributions by customizing apt's @DPkg::Path@ setting.
+
+h3. Keep-web requires PostgreSQL database access
+
+The keep-web service now connects directly to the PostgreSQL database. Make sure these connections are supported by your network firewall rules, PostgreSQL connection settings, and PostgreSQL server configuration (in @pg_hba.conf@) as shown in the "PostgreSQL install instructions":{{site.baseurl}}/install/install-postgresql.html.
+
+h3. Slow migration on upgrade
+
+This upgrade includes a database schema update to rebuild full text search indexes to remove UUID and portable data hash column data. This will provide better search results to users and take less space on the database, but plan for the @arvados-api-server@ package upgrade to take longer than usual.
+
+h3. WebDAV service uses @/var/cache@ for file content
+
+When running as root, @keep-web@ now stores copies of recently accessed data blocks in @/var/cache/arvados/keep@ instead of in memory. This directory is created automatically. The default cache size is 10% of the filesystem size. Use the new @Collections.WebDAVCache.DiskCacheSize@ config to specify a different percentage or an absolute size. If @keep-web@ is not running as root, it will store the cache in @$HOME/.cache/arvados/keep@.
+
+If the previously supported @MaxBlockEntries@ config is present, remove it to avoid warning messages at startup.
+
+h3. Python SDK reorganization of internal classes and modules
+
+We have reorganized the Python SDK to make it clearer which APIs are intended to be public, and make it easier to find documentation for them. As part of this work, some modules that only included internal support code have been moved, most notably @arvados.diskcache@, @arvados.http_to_keep@, and @arvados.timer@.
+
+If you need immediate access to these modules, you can find them under @arvados._internal@, but we do not intend to support them as part of our public SDK API, so they may change or be removed entirely in future versions. If you've written client software that relies on these modules, please "file an issue":https://dev.arvados.org/projects/arvados/issues/new to let us know so we can figure out how best to support you.
h3. Virtual environments inside distribution Python packages have moved
@@ -56,20 +224,102 @@ You must update it to:
#!/usr/lib/python3-arvados-python-client/bin/python
-h3. WebDAV service uses @/var/cache@ for file content
+h3. costanalyzer subcommand replaced by Arvados cluster activity tool
-@keep-web@ now stores copies of recently accessed data blocks in @/var/cache/arvados/keep@ instead of in memory. That directory will be created automatically. The default cache size is 10% of the filesystem size. Use the new @Collections.WebDAVCache.DiskCacheSize@ config to specify a different percentage or an absolute size.
+The functionality of @arvados-client costanalyzer@ has been replaced by a new @arvados-cluster-activity@ tool. More information can be found at "Analyzing workflow cost":{{site.baseurl}}/user/cwl/costanalyzer.html .
-If the previously supported @MaxBlockEntries@ config is present, remove it to avoid warning messages at startup.
+h3. @arv-migrate-docker19@ tool removed
+
+The @arv-migrate-docker19@ tool that updates images from Docker 1.9 to be used with Docker 1.10+ (released February 2016) has been removed. In the unlikely event you still need to "run this migration":https://doc.arvados.org/v2.7/install/migrate-docker19.html, please do so before you upgrade to Arvados 3.0.
+
+h3. Legacy APIs and response fields have been removed
+
+The following APIs have been removed:
+* "api_clients":https://doc.arvados.org/v2.7/api/methods/api_clients.html
+* "humans":https://doc.arvados.org/v2.7/api/methods/humans.html
+* "jobs":https://doc.arvados.org/v2.7/api/methods/jobs.html
+* "job_tasks":https://doc.arvados.org/v2.7/api/methods/job_tasks.html
+* "nodes":https://doc.arvados.org/v2.7/api/methods/nodes.html
+* "pipeline_instances":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html
+* "pipeline_templates":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html
+* "repositories":https://doc.arvados.org/v2.7/api/methods/repositories.html, and "keep_disks":https://doc.arvados.org/v2.7/api/methods/keep_disks.html
+* "specimens":https://doc.arvados.org/v2.7/api/methods/specimens.html
+* "traits":https://doc.arvados.org/v2.7/api/methods/traits.html
+
+The following fields are no longer returned in API responses.
+* @api_client_id@, @user_id@, @default_owner_uuid@ ("api_client_authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html API)
+* @modified_by_client_uuid@ (all APIs)
+
+h3. Configuration entries have been removed or renamed
+
+The following configuration keys have been renamed or removed. Renamed keys will still be loaded if they appear with their old names, but you should update your @/etc/arvados/config.yml@ file to avoid warnings when services start up.
+* @API.LogCreateRequestFraction@ has been removed
+* @Containers.JobsAPI.Enable@ has been removed
+* @Mail.EmailFrom@ has been removed
+* @Mail.IssueReporterEmailFrom@ has been removed
+* @Mail.IssueReporterEmailTo@ has been removed
+* @Mail.MailchimpAPIKey@ has been removed
+* @Mail.MailchimpListID@ has been removed
+* @Mail.SendUserSetupNotificationEmail@ has moved to @Users.SendUserSetupNotificationEmail@
+* @Mail.SupportEmailAddress@ has moved to @Users.SupportEmailAddress@
+
+h3. S3 volume IAMRole configuration entry has been removed
+
+The @Volumes.*.DriverParameters.IAMRole@ configuration entry for S3 volumes has been removed. You should remove it from your @/etc/arvados/config.yml@ file to avoid warnings when services start up. As before, if @AccessKeyID@ and @SecretAccessKey@ are blank, keepstore will retrieve IAM role credentials from instance metadata. Previously, documentation indicated that keepstore would refuse to use the IAM credentials if @IAMRole@ was specified and did not match the instance metadata, but that check has not been working for some time.
+
+h3. Legacy container logging system has been removed
+
+The following configuration keys are no longer supported. Remove them from your @/etc/arvados/config.yml@ file to avoid warnings when services start up.
+* @Containers.Logging.LimitLogBytesPerJob@
+* @Containers.Logging.LogBytesPerEvent@
+* @Containers.Logging.LogPartialLineThrottlePeriod@
+* @Containers.Logging.LogSecondsBetweenEvents@
+* @Containers.Logging.LogThrottleBytes@
+* @Containers.Logging.LogThrottleLines@
+* @Containers.Logging.LogThrottlePeriod@
+* @Containers.Logging.MaxAge@
+* @Containers.Logging.SweepInterval@
+
+Any container logging content remaining in the database from the legacy system will be deleted.
+
+h2(#v2_7_4). v2.7.4 (2024-07-08)
+
+"previous: Upgrading to 2.7.3":#v2_7_3
+
+Starting from 2.7.4, Arvados no longer supports CentOS. CentOS users should migrate to an Arvados-supported version of Red Hat Enterprise Linux (RHEL), Rocky Linux or AlmaLinux.
+
+There are no other configuration changes requiring administrator attention in this release.
+
+h2(#v2_7_3). v2.7.3 (2024-05-24)
+
+"previous: Upgrading to 2.7.2":#v2_7_2
+
+There are no configuration changes requiring administrator attention in this release.
+
+h2(#v2_7_2). v2.7.2 (2024-04-09)
+
+"previous: Upgrading to 2.7.1":#v2_7_1
h3. Check MaxGatewayTunnels config
If you use the LSF or Slurm dispatcher, ensure the new @API.MaxGatewayTunnels@ config entry is high enough to support the size of your cluster. See "LSF docs":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html#MaxGatewayTunnels or "Slurm docs":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html#MaxGatewayTunnels for details.
-h2(#2_7_1). v2.7.1 (2023-12-12)
+h3. New LSF dispatcher config items MaxRunTimeOverhead and MaxRunTimeDefault
+
+Now supports configuration parameter @Containers.LSF.MaxRunTimeDefault@ as the default value for @max_run_time@ for containers that do not specify a time limit (using CWL @ToolTimeLimit@).
+
+Now supports configuration parameter @Containers.LSF.MaxRunTimeOverhead@ so that when @scheduling_constraints.max_run_time@ or @MaxRunTimeDefault@ are non-zero, this adds time to account for crunch-run startup/shutdown overhead.
+
+h2(#v2_7_1). v2.7.1 (2023-12-12)
"previous: Upgrading to 2.7.0":#v2_7_0
+h3. Separate configs for MaxConcurrentRequests and MaxConcurrentRailsRequests
+
+The default configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be processed by a single instance of an arvados service process) is raised from 8 to 64.
+
+A new configuration key @API.MaxConcurrentRailsRequests@ (default 8) limits the number of concurrent requests processed by a RailsAPI service process.
+
h3. Remove Workbench1 packages after upgrading the salt installer
If you installed a previous version of Arvados with the Salt installer, and you upgrade your installer to upgrade the cluster, you should uninstall the @arvados-workbench@ package from the workbench instance afterwards.
@@ -142,7 +392,7 @@ We have introduced a small exception to the previous behavior of "Arvados API to
h3. Deprecated/legacy APIs slated for removal
-The legacy APIs "humans":../api/methods/humans.html, "specimens":../api/methods/specimens.html, "traits":../api/methods/traits.html, "jobs":../api/methods/jobs.html, "job_tasks":../api/methods/job_tasks.html, "pipeline_instances":../api/methods/pipeline_instances.html, "pipeline_templates":../api/methods/pipeline_templates.html, "nodes":../api/methods/nodes.html, "repositories":../api/methods/repositories.html, and "keep_disks":../api/methods/keep_disks.html are deprecated and will be removed in a future major version of Arvados.
+The legacy APIs "humans":https://doc.arvados.org/v2.7/api/methods/humans.html, "specimens":https://doc.arvados.org/v2.7/api/methods/specimens.html, "traits":https://doc.arvados.org/v2.7/api/methods/traits.html, "jobs":https://doc.arvados.org/v2.7/api/methods/jobs.html, "job_tasks":https://doc.arvados.org/v2.7/api/methods/job_tasks.html, "pipeline_instances":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html, "pipeline_templates":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html, "nodes":https://doc.arvados.org/v2.7/api/methods/nodes.html, "repositories":https://doc.arvados.org/v2.7/api/methods/repositories.html, and "keep_disks":https://doc.arvados.org/v2.7/api/methods/keep_disks.html are deprecated and will be removed in a future major version of Arvados.
In addition, the @default_owner_uuid@, @api_client_id@, and @user_id@ fields of "api_client_authorizations":../api/methods/api_client_authorizations.html are deprecated and will be removed from @api_client_authorization@ responses in a future major version of Arvados. This should not affect clients as @default_owner_uuid@ was never implemented, and @api_client_id@ and @user_id@ returned internal ids that were not meaningful or usable with any other API call.
@@ -798,7 +1048,7 @@ This release includes several database migrations, which will be executed automa
The @arvados-controller@ component now requires the /etc/arvados/config.yml file to be present.
-Support for the deprecated "jobs" API is broken in this release. Users who rely on it should not upgrade. This will be fixed in an upcoming 1.3.1 patch release, however users are "encouraged to migrate":upgrade-crunch2.html as support for the "jobs" API will be dropped in an upcoming release. Users who are already using the "containers" API are not affected.
+Support for the deprecated "jobs" API is broken in this release. Users who rely on it should not upgrade. This will be fixed in an upcoming 1.3.1 patch release, however users are encouraged to migrate as support for the "jobs" API will be dropped in an upcoming release. Users who are already using the "containers" API are not affected.
h2(#v1_2_1). v1.2.1 (2018-11-26)
diff --git a/doc/admin/user-activity.html.textile.liquid b/doc/admin/user-activity.html.textile.liquid
index 01715ff6e3..0044f2c61d 100644
--- a/doc/admin/user-activity.html.textile.liquid
+++ b/doc/admin/user-activity.html.textile.liquid
@@ -35,7 +35,7 @@ Note: depends on the "Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-pyth
h2. Usage
-Set ARVADOS_API_HOST to the api server of the cluster for which the report should be generated. ARVADOS_API_TOKEN needs to be a "v2 token":../admin/scoped-tokens.html for an admin user, or a superuser token (e.g. generated with @script/create_superuser_token.rb@). Please note that in a login cluster federation, the token needs to be issued by the login cluster, but the report should be generated against the API server of the cluster for which it is desired. In other words, ARVADOS_API_HOST would point at the satellite cluster for which the report is desired, but ARVADOS_API_TOKEN would be a token that belongs to a login cluster user.
+Set ARVADOS_API_HOST to the api server of the cluster for which the report should be generated. ARVADOS_API_TOKEN needs to be a "v2 token":../admin/scoped-tokens.html for an admin user, or the system root token. Please note that in a login cluster federation, the token needs to be issued by the login cluster, but the report should be generated against the API server of the cluster for which it is desired. In other words, ARVADOS_API_HOST would point at the satellite cluster for which the report is desired, but ARVADOS_API_TOKEN would be a token that belongs to a login cluster user, or the login cluster's system root token.
Run the tool with the option @--days@ giving the number of days to report on. It will request activity logs from the API and generate a summary report on standard output.
diff --git a/doc/admin/user-management-cli.html.textile.liquid b/doc/admin/user-management-cli.html.textile.liquid
index c2d4743ddf..46ccc38cb3 100644
--- a/doc/admin/user-management-cli.html.textile.liquid
+++ b/doc/admin/user-management-cli.html.textile.liquid
@@ -56,20 +56,15 @@ As an admin, you can create tokens for other users.
$ arv api_client_authorization create --api-client-authorization '{"owner_uuid": "zzzzz-tpzed-fr97h9t4m5jffxs"}'
{
- "href":"/api_client_authorizations/zzzzz-gj3su-yyyyyyyyyyyyyyy",
"kind":"arvados#apiClientAuthorization",
"etag":"9yk144t0v6cvyp0342exoh2vq",
"uuid":"zzzzz-gj3su-yyyyyyyyyyyyyyy",
"owner_uuid":"zzzzz-tpzed-fr97h9t4m5jffxs",
"created_at":"2020-03-12T20:36:12.517375422Z",
- "modified_by_client_uuid":null,
"modified_by_user_uuid":null,
"modified_at":null,
- "user_id":3,
- "api_client_id":7,
"api_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"created_by_ip_address":null,
- "default_owner_uuid":null,
"expires_at":null,
"last_used_at":null,
"last_used_by_ip_address":null,
@@ -144,23 +139,3 @@ read -rd $'\000' newlink <
-
-h3. Git repository
-
-Give @$user_uuid@ permission to commit to @$repo_uuid@ as @$repo_username@
-
-
-user_uuid=xxxxxxxchangeme
-repo_uuid=xxxxxxxchangeme
-repo_username=xxxxxxxchangeme
-
-read -rd $'\000' newlink <
diff --git a/doc/admin/user-management.html.textile.liquid b/doc/admin/user-management.html.textile.liquid
index 7d30ee88d1..994081901c 100644
--- a/doc/admin/user-management.html.textile.liquid
+++ b/doc/admin/user-management.html.textile.liquid
@@ -60,7 +60,6 @@ notextile.
# A new user record is not set up, and not active. An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read (such as publicly available items readable by the "anonymous" user).
# Using Workbench or the "command line":{{site.baseurl}}/admin/user-management-cli.html , the admin invokes @setup@ on the user. The setup method adds the user to the "All users" group.
- If "Users.AutoSetupNewUsers":config.html is true, this happens automatically during user creation, so in that case new users start at step (3).
-- If "Users.AutoSetupNewUsersWithRepository":config.html is true, a new git repo is created for the user.
- If "Users.AutoSetupNewUsersWithVmUUID":config.html is set, the user is given login permission to the specified shell node
# User is set up, but still not yet active. The browser presents "user agreements":#user_agreements (if any) and then invokes the user @activate@ method on the user's behalf.
# The user @activate@ method checks that all "user agreements":#user_agreements are signed. If so, or there are no user agreements, the user is activated.
diff --git a/doc/api/crunch-scripts.html.textile.liquid b/doc/api/crunch-scripts.html.textile.liquid
deleted file mode 100644
index a0d244d9bc..0000000000
--- a/doc/api/crunch-scripts.html.textile.liquid
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: Concepts
-title: Crunch scripts
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "container requests.":methods/container_requests.html
-{% include 'notebox_end' %}
-
-h2. Crunch scripts
-
-A crunch script is responsible for completing a single JobTask. In doing so, it will:
-
-* (optionally) read some input from Keep
-* (optionally) store some output in Keep
-* (optionally) create some new JobTasks and add them to the current Job
-* (optionally) update the current JobTask record with the "output" attribute set to a Keep locator or a fragment of a manifest
-* update the current JobTask record with the "success" attribute set to True
-
-A task's context is provided in environment variables.
-
-table(table table-bordered table-condensed).
-|Environment variable|Description|
-|@JOB_UUID@|UUID of the current "Job":methods/jobs.html|
-|@TASK_UUID@|UUID of the current "JobTask":methods/job_tasks.html|
-|@ARVADOS_API_HOST@|Hostname and port number of API server|
-|@ARVADOS_API_TOKEN@|Authentication token to use with API calls made by the current task|
-
-The crunch script typically uses the Python SDK (or another suitable client library / SDK) to connect to the Arvados service and retrieve the rest of the details about the current job and task.
-
-The Python SDK has some shortcuts for common operations.
-
-In general, a crunch script can access information about the current job and task like this:
-
-
-import arvados
-import os
-
-job = arvados.api().jobs().get(uuid=os.environ['JOB_UUID']).execute()
-$sys.stderr.write("script_parameters['foo'] == %s"
- % job['script_parameters']['foo'])
-
-task = arvados.api().job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
-$sys.stderr.write("current task sequence number is %d"
- % task['sequence'])
-
diff --git a/doc/api/dispatch.html.textile.liquid b/doc/api/dispatch.html.textile.liquid
index cfe57640c4..a31bfcfbe7 100644
--- a/doc/api/dispatch.html.textile.liquid
+++ b/doc/api/dispatch.html.textile.liquid
@@ -58,7 +58,7 @@ Example response:
"Price": 0.146,
"Preemptible": false
},
- "scheduling_status": "waiting for new instance to be ready"
+ "scheduling_status": "Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work."
},
...
]
@@ -79,7 +79,7 @@ Example response:
"instance_type": {
...
},
- "scheduling_status": "waiting for new instance to be ready"
+ "scheduling_status": "Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work."
}
h3. Terminate a container
diff --git a/doc/api/keep-webdav.html.textile.liquid b/doc/api/keep-webdav.html.textile.liquid
index e95d523b9d..3704d3a961 100644
--- a/doc/api/keep-webdav.html.textile.liquid
+++ b/doc/api/keep-webdav.html.textile.liquid
@@ -41,6 +41,71 @@ It is possible for a project or a "filter group":methods/groups.html#filter to a
* @/by_id/uuid_of_f/p@ will show the parent project's contents, including @f@.
* @/by_id/uuid_of_f/p/f@ will appear as an empty directory.
+h3(#zip). Downloading ZIP archives
+
+Keep-web can produce an uncompressed ZIP archive of a collection, or a subset of a collection.
+
+To request a ZIP archive:
+* The request must include an @Accept: application/zip@ header _or_ @?accept=application/zip&disposition=attachment@ in the query.
+* The request URI must specify the root directory of a collection, e.g., @/by_id/
/@. See "Keep-web URLs":keep-web-urls.html for more examples.
+
+To download a subset of a collection, the request can specify one or more pathnames relative to the collection directory:
+* A @files@ parameter in the query of a @GET@ request, e.g., @https://.collections.example.com/?files=file1&files=file2@,
+* A @files@ parameter in the body of a @POST@ request with a @Content-Type: application/x-www-form-urlencoded@ header, or
+* The value of a @files@ key in a JSON object in the body of a @POST@ request with a @Content-Type: application/json@ header, e.g., @{"files":["file1","file2"]}@.
+
+Keep-web returns an error if one of the specified paths does not exist in the requested collection.
+
+The ZIP archive comment will include a download URL with the collection UUID or portable data hash, e.g., "Downloaded from https://collections.example.com/by_id/zzzzz-4zz18-0pg114rezrbz46u/".
+
+The ZIP archive will also include collection metadata if the request sets an @include_collection_metadata@ parameter, e.g., @https://.collections.example.com/?include_collection_metadata=true@. The resulting ZIP archive will also include a file named @collection.json@ containing the collection's metadata (UUID, name, description, portable data hash, properties, creation time, modification time) and information about the user who last modified it (UUID, full name, username, and email). If the collection is specified by portable data hash rather than name or UUID, @collection.json@ will contain only the portable data hash.
+
+Example @collection.json@ content:
+
+
+{
+ "created_at":"2025-04-28T19:50:49.046969000Z",
+ "description":"Description of test collection\n",
+ "modified_at":"2025-04-28T19:50:49.093166000Z",
+ "modified_by_user":{
+ "email":"example@example.com",
+ "full_name":"Example Name",
+ "username":"example",
+ "uuid":"zzzzz-tpzed-xurymjxw79nv3jz"
+ },
+ "name":"collection name",
+ "portable_data_hash":"6acf043b102afcf04e3be2443e7ea2ba+223",
+ "properties":{
+ "key":"value"
+ },
+ "uuid":"zzzzz-4zz18-0pg114rezrbz46u"
+}
+
+
+The request can also include a @download_filename@ parameter with a desired name for the downloaded zip file. This filename will be included in the @Content-Disposition@ response header. If this parameter is not provided, the filename suggested in the response header will be based on the collection name or portable data hash:
+* @{collection name}.zip@ if downloading an entire collection
+* @{collection name} - {file name}.zip@ if a single file was specified in the request
+* @{collection name} - 3 files.zip@ if a directory or multiple files were specified in the request
+* @{portable data hash}.zip@, @{portable data hash} - {file name}.zip@, etc., if the source collection was specified by portable data hash rather than name or UUID
+
+Example request:
+
+
+GET /by_id/zzzzz-4zz18-0pg114rezrbz46u
+Accept: application/zip
+Content-Type: application/json
+
+{
+ "download_filename": "odd-numbered files and directories.zip",
+ "files": [
+ "file1.txt",
+ "file3.bin",
+ "dir5"
+ ],
+ "include_collection_metadata": true
+}
+
+
h3(#auth). Authentication mechanisms
A token can be provided in an Authorization header as a @Bearer@ token:
diff --git a/doc/api/methods/api_client_authorizations.html.textile.liquid b/doc/api/methods/api_client_authorizations.html.textile.liquid
index 5bfeca8bc6..545988056d 100644
--- a/doc/api/methods/api_client_authorizations.html.textile.liquid
+++ b/doc/api/methods/api_client_authorizations.html.textile.liquid
@@ -60,7 +60,6 @@ Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|api_client_id|integer||query||
|scopes|array||query||
h3(#current). current
diff --git a/doc/api/methods/api_clients.html.textile.liquid b/doc/api/methods/api_clients.html.textile.liquid
deleted file mode 100644
index 3f7abd4113..0000000000
--- a/doc/api/methods/api_clients.html.textile.liquid
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "api_clients"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_clients@
-
-Object type: @ozdt8@
-
-Example UUID: @zzzzz-ozdt8-0123456789abcde@
-
-h2. Resource
-
-The "api_clients" resource determines if web applications that have gone through the browser login flow may create or list API tokens.
-
-Each ApiClient has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|name|string|||
-|url_prefix|string|||
-|is_trusted|boolean|Trusted by users to handle their API tokens (ApiClientAuthorizations).||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new ApiClient.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|api_client|object||query||
-
-h3. delete
-
-Delete an existing ApiClient.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
-
-h3. get
-
-Gets a ApiClient's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
-
-h3. list
-
-List api_clients.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing ApiClient.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
-|api_client|object||query||
diff --git a/doc/api/methods/collections.html.textile.liquid b/doc/api/methods/collections.html.textile.liquid
index 29d28d42a2..d145a69fb7 100644
--- a/doc/api/methods/collections.html.textile.liquid
+++ b/doc/api/methods/collections.html.textile.liquid
@@ -26,10 +26,10 @@ Each collection has, in addition to the "Common resource fields":{{site.baseurl}
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|name|string|||
-|description|text|||
+|description|text|Free text description of the group. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions||
|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
|portable_data_hash|string|The MD5 sum of the manifest text stripped of block hints other than the size hint.||
-|manifest_text|text|||
+|manifest_text|text|The manifest describing how to assemble blocks into files, in the "Arvados manifest format":{{site.baseurl}}/architecture/manifest-format.html||
|replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@|
|replication_confirmed|number|Replication level most recently confirmed by the storage system. This field is null when a collection is first created, and is reset to null when the manifest_text changes in a way that introduces a new data block. An integer value indicates the replication level of the _least replicated_ data block in the collection.|@2@, null|
|replication_confirmed_at|datetime|When @replication_confirmed@ was confirmed. If @replication_confirmed@ is null, this field is also null.||
@@ -55,6 +55,178 @@ Referenced blocks are protected from garbage collection in Keep.
Data can be shared with other users via the Arvados permission model.
+h3(#trashing). Trashing collections
+
+Collections can be trashed by updating the record and setting the @trash_at@ field, or with the "delete":#delete method. The delete method sets @trash_at@ to "now".
+
+The value of @trash_at@ can be set to a time in the future as a feature to automatically expire collections.
+
+When @trash_at@ is set, @delete_at@ will also be set. Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@. When the @trash_at@ time is past but @delete_at@ is in the future, the trashed collection is invisible to most API calls unless the @include_trash@ parameter is true. Collections in the trashed state can be "untrashed":#untrash so long as @delete_at@ has not past. Collections are also trashed if they are contained in a "trashed group":groups.html#trashing
+
+Once @delete_at@ is past, the collection and all of its previous versions will be deleted permanently and can no longer be untrashed.
+
+h3(#replace_files). Using "replace_files" to create or update a collection
+
+The @replace_files@ option can be used with the "create":#create and "update":#update APIs to efficiently and atomically copy individual files and directory trees from other collections, copy/rename/delete items within an existing collection, and add new items to a collection.
+
+@replace_files@ keys indicate target paths in the new collection, and values specify sources that should be copied to the target paths.
+* Each target path must be an absolute canonical path beginning with @/@. It must not contain @.@ or @..@ components, consecutive @/@ characters, or a trailing @/@ after the final component.
+* Each source must be one of the following:
+** an empty string (signifying that the target path is to be deleted),
+** @/@ where @@ is the portable data hash of a collection on the cluster and @@ is a file or directory in that collection,
+** @manifest_text/@ where @@ is an existing file or directory in a collection supplied in the @manifest_text@ attribute in the request, or
+** @current/@ where @@ is an existing file or directory in the collection being updated.
+
+In an @update@ request, sources may reference the current portable data hash of the collection being updated. However, in many cases it is more appropriate to use a @current/@ source instead, to ensure the latest content is used even if the collection has been updated since the PDH was last retrieved.
+
+h4(#replace_files-delete). Delete a file
+
+Delete @foo.txt@.
+
+
+"replace_files": {
+ "/foo.txt": ""
+}
+
+
+h4(#replace_files-rename). Rename a file
+
+Rename @foo.txt@ to @bar.txt@.
+
+
+"replace_files": {
+ "/foo.txt": "",
+ "/bar.txt": "current/foo.txt"
+}
+
+
+h4(#replace_files-swap). Swap files
+
+Swap contents of files @foo@ and @bar@.
+
+
+"replace_files": {
+ "/foo": "current/bar",
+ "/bar": "current/foo"
+}
+
+
+h4(#replace_files-add). Add a file
+
+
+"replace_files": {
+ "/new_directory/new_file.txt": "manifest_text/new_file.txt"
+},
+"collection": {
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
+}
+
+
+h4(#replace_files-replace). Replace all content with new content
+
+Note this is equivalent to omitting the @replace_files@ argument.
+
+
+"replace_files": {
+ "/": "manifest_text/"
+},
+"collection": {
+ "manifest_text": "./new_directory acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
+}
+
+
+h4(#replace_files-rename-and-replace). Atomic rename and replace
+
+Rename @current_file.txt@ to @old_file.txt@ and replace @current_file.txt@ with new content, all in a single atomic operation.
+
+
+"replace_files": {
+ "/current_file.txt": "manifest_text/new_file.txt",
+ "/old_file.txt": "current/current_file.txt"
+},
+"collection": {
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
+}
+
+
+h4(#replace_files-combine). Combine collections
+
+Delete all current content, then copy content from other collections into new subdirectories.
+
+
+"replace_files": {
+ "/": "",
+ "/copy of collection 1": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/",
+ "/copy of collection 2": "ea10d51bcf88862dbcc36eb292017dfd+45/"
+}
+
+
+h4(#replace_files-extract-subdirectory). Extract a subdirectory
+
+Replace all current content with a copy of a subdirectory from another collection.
+
+
+"replace_files": {
+ "/": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/subdir"
+}
+
+
+h4(#replace_files-usage-restrictions). Usage restrictions
+
+A target path with a non-empty source cannot be the ancestor of another target path in the same request. For example, the following request is invalid:
+
+
+"replace_files": {
+ "/foo": "fa7aeb5140e2848d39b416daeef4ffc5+45/",
+ "/foo/this_will_return_an_error": ""
+}
+
+
+It is an error to supply a non-empty @manifest_text@ that is unused, i.e., the @replace_files@ argument does not contain any values beginning with @"manifest_text/"@. For example, the following request is invalid:
+
+
+"replace_files": {
+ "/foo": "current/bar"
+},
+"collection": {
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
+}
+
+
+Collections on other clusters in a federation cannot be used as sources. Each source must exist on the current cluster and be readable by the current user.
+
+Similarly, if @manifest_text@ is provided, it must only reference data blocks that are stored on the current cluster. This API does not copy data from other clusters in a federation.
+
+h3(#replace_segments). Using "replace_segments" to repack file data
+
+The @replace_segments@ option can be used with the "create":#create or "update":#update API to atomically apply a new file packing, typically with the goal of replacing a number of small blocks with one larger block. The repacking is specified in terms of _block segments_: a block segment is a portion of a stored block that is referenced by a file in a manifest.
+
+@replace_segments@ keys indicate existing block segments in the collection, and values specify replacement segments.
+* Each segment is specified as space-separated tokens: @"locator offset length"@ where @locator@ is a signed block locator and @offset@ and @length@ are decimal-encoded integers specifying a portion of the block that is referenced in the collection.
+* Each replacement block locator must be properly signed (just as if it appeared in a @manifest_text@).
+* Each existing block segment must correspond to an entire contiguous portion of a block referenced by a single file (splitting existing segments is not supported).
+* If a segment to be replaced does not match any existing block segment in the manifest, that segment _and all other @replace_segments@ entries referencing the same replacement block_ will be skipped. Other replacements will still be applied. Replacements that are skipped for this reason do not cause the request to fail. This rule ensures that when concurrent clients compute different repackings and request similar replacements such as @a,b,c,d,e â X@ and @a,b,c,d,e,f â Y@, the resulting manifest references @X@ or @Y@ but not both. Otherwise, the effect could be @a,b,c,d,e â X, f â Y@ where @Y@ is just an inefficient way to reference the same data as @f@.
+
+The @replace_files@ and @manifest_text@ options, if present, are applied before @replace_segments@. This means @replace_segments@ can apply to blocks from @manifest_text@ and/or other collections referenced by @replace_files@.
+
+In the following example, two files were originally saved by writing two small blocks (@c410@ and @c93e@). After concatenating the two small blocks and writing a single larger block @ca9c@, the manifest is being updated to reference the larger block.
+
+
+"collection": {
+ "manifest_text": ". c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0:2:file1.txt 2:3:file2.txt\n"
+},
+"replace_segments": {
+ "c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 0 2": "ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0 2",
+ "693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0 3": "ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 2 3"
+}
+
+
+Resulting manifest:
+
+
+. ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0:2:file1.txt 2:3:file2.txt
+
+
h2. Methods
See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
@@ -63,7 +235,7 @@ Required arguments are displayed in %{background:#ccffcc}green%.
Supports federated @get@ only, which may be called with either a uuid or a portable data hash. When requesting a portable data hash which is not available on the home cluster, the query is forwarded to all the clusters listed in @RemoteClusters@ and returns the first successful result.
-h3. create
+h3(#create). create
Create a new Collection.
@@ -72,11 +244,14 @@ Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|collection|object||query||
-|replace_files|object|Initialize files and directories using content from other collections|query||
+|replace_files|object|Initialize files and directories with new content and/or content from other collections|query||
+|replace_segments|object|Repack the collection by substituting data blocks|query||
-The new collection's content can be initialized by providing a @manifest_text@ key in the provided @collection@ object, or by using the @replace_files@ option (see "replace_files":#replace_files below).
+The new collection's content can be initialized by providing a @manifest_text@ key in the provided @collection@ object, or by "using the @replace_files@ option":#replace_files.
-h3. delete
+An alternative file packing can be applied atomically "using the @replace_segments@ option":#replace_segments.
+
+h3(#delete). delete
Put a Collection in the trash. This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL. A trashed collection is invisible to most API calls unless the @include_trash@ parameter is true.
@@ -139,7 +314,7 @@ As a workaround, you can search for both the directory path and file name separa
filters: [["file_names", "ilike", "%dir1/dir2/dir3%"], ["file_names", "ilike", "%sample1234.fastq%"]]
-h3. update
+h3(#update). update
Update attributes of an existing Collection.
@@ -149,11 +324,14 @@ table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
|collection|object||query||
-|replace_files|object|Delete and replace files and directories using content from other collections|query||
+|replace_files|object|Add, delete, and replace files and directories with new content and/or content from other collections|query||
+|replace_segments|object|Repack the collection by substituting data blocks|query||
+
+The collection's existing content can be replaced entirely by providing a @manifest_text@ key in the provided @collection@ object, or updated in place by "using the @replace_files@ option":#replace_files.
-The collection's content can be updated by providing a @manifest_text@ key in the provided @collection@ object, or by using the @replace_files@ option (see "replace_files":#replace_files below).
+An alternative file packing can be applied atomically "using the @replace_segments@ option":#replace_segments.
-h3. untrash
+h3(#untrash). untrash
Remove a Collection from the trash. This sets the @trash_at@ and @delete_at@ fields to @null@.
@@ -196,56 +374,3 @@ Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Collection to get usage.|path||
-
-h2(#replace_files). Using "replace_files" to create/update collections
-
-The @replace_files@ option can be used with the @create@ and @update@ APIs to efficiently copy individual files and directory trees from other collections, and copy/rename/delete items within an existing collection, without transferring any file data.
-
-@replace_files@ keys indicate target paths in the new collection, and values specify sources that should be copied to the target paths.
-* Each target path must be an absolute canonical path beginning with @/@. It must not contain @.@ or @..@ components, consecutive @/@ characters, or a trailing @/@ after the final component.
-* Each source must be either an empty string (signifying that the target path is to be deleted), or @PDH/path@ where @PDH@ is the portable data hash of a collection on the cluster and @/path@ is a file or directory in that collection.
-* In an @update@ request, sources may reference the current portable data hash of the collection being updated.
-
-Example: delete @foo.txt@ from a collection
-
-
-"replace_files": {
- "/foo.txt": ""
-}
-
-
-Example: rename @foo.txt@ to @bar.txt@ in a collection with portable data hash @fa7aeb5140e2848d39b416daeef4ffc5+45@
-
-
-"replace_files": {
- "/foo.txt": "",
- "/bar.txt": "fa7aeb5140e2848d39b416daeef4ffc5+45/foo.txt"
-}
-
-
-Example: delete current contents, then add content from multiple collections
-
-
-"replace_files": {
- "/": "",
- "/copy of collection 1": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/",
- "/copy of collection 2": "ea10d51bcf88862dbcc36eb292017dfd+45/"
-}
-
-
-Example: replace entire collection with a copy of a subdirectory from another collection
-
-
-"replace_files": {
- "/": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/subdir"
-}
-
-
-A target path with a non-empty source cannot be the ancestor of another target path in the same request. For example, the following request is invalid:
-
-
-"replace_files": {
- "/foo": "fa7aeb5140e2848d39b416daeef4ffc5+45/",
- "/foo/this_will_return_an_error": ""
-}
-
diff --git a/doc/api/methods/computed_permissions.html.textile.liquid b/doc/api/methods/computed_permissions.html.textile.liquid
new file mode 100644
index 0000000000..b18a8c4241
--- /dev/null
+++ b/doc/api/methods/computed_permissions.html.textile.liquid
@@ -0,0 +1,43 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "computed_permissions"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/computed_permissions@
+
+h2. Resource
+
+Computed permissions are entries from the internal cache of the highest permission level each user has on each permission target.
+
+Each entry has the following attributes:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|user_uuid|string|An individual user.|
+|target_uuid|string|An object (role group, project group, collection, etc.) on which the user has implicit or explicit permission.|
+|perm_level|string|@can_read@, @can_write@, or @can_manage@|
+
+There is only one row for a given (@user_uuid@, @target_uuid@) pair.
+
+Computed permissions cannot be created or updated directly. To change permissions, use "groups":groups.html and "links":links.html APIs as described in the "permission model":../permission-model.html.
+
+h2. Method
+
+h3. list
+
+@GET /arvados/v1/computed_permissions@
+
+List computed permissions.
+
+The computed permissions API accepts the arguments described in the "common resource list method":{{site.baseurl}}/api/methods.html#index with the following exceptions:
+* It is an error to supply a non-zero @offset@ argument.
+* The default value for @order@ is @["user_uuid", "target_uuid"]@.
+* The default value for @count@ is @"none"@ and no other values are accepted.
diff --git a/doc/api/methods/container_requests.html.textile.liquid b/doc/api/methods/container_requests.html.textile.liquid
index 1c269fb3e6..21960eaa22 100644
--- a/doc/api/methods/container_requests.html.textile.liquid
+++ b/doc/api/methods/container_requests.html.textile.liquid
@@ -28,7 +28,7 @@ All attributes are optional, unless otherwise marked as required.
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Notes|
|name|string|The name of the container_request.||
-|description|string|The description of the container_request.||
+|description|string|The description of the container_request. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions ||
|properties|hash|User-defined metadata that does not affect how the container is run. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
|state|string|The allowed states are "Uncommitted", "Committed", and "Final".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the "Final" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).|
|requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.|
@@ -49,21 +49,25 @@ table(table table-bordered table-condensed).
|cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Required.|
|command|array of strings|Command to execute in the container.|Required. e.g., @["echo","hello"]@|
|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount. See "Pre-populate output using Mount points":#pre-populate-output for details regarding optional output pre-population using mount points and "Symlinks in output":#symlinks-in-output for additional details.|Required.|
+|output_glob|array of strings|Glob patterns determining which files (of those present in the output directory when the container finishes) will be included in the output collection. If multiple patterns are given, files that match any pattern are included. If null or empty, all files will be included.|e.g., @["**/*.vcf", "**/*.vcf.gz"]@
+See "Glob patterns":#glob_patterns for more details.|
|output_name|string|Desired name for the output collection. If null or empty, a name will be assigned automatically.||
|output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.||
-|priority|integer|Range 0-1000. Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed". See "below for more details":#priority .|
+|priority|integer|Range 0-1000. Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed". See "priority below for more details.":#priority |
|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
|use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true|
|log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet started running.
To retrieve logs in real time while the container is running, use the log API (see below).|
|output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.|
-|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|
+|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|This attribute is not implemented yet. The value should always be null.|
|runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc. |Not returned in API responses. Reset to null when state is "Complete" or "Cancelled".|
|runtime_user_uuid|string|The user permission that will be granted to this container.||
|runtime_auth_scopes|array of string|The scopes associated with the auth token used to run this container.||
|output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container request|default is ["default"]|
|output_properties|hash|User metadata properties to set on the output collection. The output collection will also have default properties "type" ("intermediate" or "output") and "container_request" (the uuid of container request that produced the collection).|
|cumulative_cost|number|Estimated cost of the cloud VMs used to satisfy the request, including retried attempts and completed subrequests, but not including reused containers.|0 if container was reused or VM price information was not available.|
+|service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job. Incompatible with @use_existing@||
+|published_ports|hash|Web service ports that are published by this container. See "published ports":#published_ports below.||
h2(#lifecycle). Container request lifecycle
@@ -138,6 +142,10 @@ h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}
+h2(#glob_patterns). {% include 'container_glob_patterns' %}
+
+h2(#published_ports). {% include 'container_published_ports' %}
+
h2(#container_reuse). Container reuse
When a container request is "Committed", the system will try to find and reuse an existing Container with the same command, cwd, environment, output_path, container_image, mounts, secret_mounts, runtime_constraints, runtime_user_uuid, and runtime_auth_scopes being requested.
diff --git a/doc/api/methods/containers.html.textile.liquid b/doc/api/methods/containers.html.textile.liquid
index 1d2fed768c..e09864694a 100644
--- a/doc/api/methods/containers.html.textile.liquid
+++ b/doc/api/methods/containers.html.textile.liquid
@@ -30,9 +30,10 @@ table(table table-bordered table-condensed).
|finished_at|datetime|When this container finished.|Null if container has not yet finished.|
|log|string|Portable data hash of a collection containing the log messages produced when executing the container.|Null if container has not yet started. The Crunch system will periodically update this field for a running container.|
|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|Must be equal to a ContainerRequest's environment in order to satisfy the ContainerRequest.|
-|cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest|
+|cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest.|
|command|array of strings|Command to execute.| Must be equal to a ContainerRequest's command in order to satisfy the ContainerRequest.|
|output_path|string|Path to a directory or file inside the container that should be preserved as this container's output when it finishes.|Must be equal to a ContainerRequest's output_path in order to satisfy the ContainerRequest.|
+|output_glob|array of strings|Glob patterns determining which files will be included in the output collection. See corresponding attribute in the "container_requests resource":container_requests.html.|Must be equal to a ContainerRequest's output_glob in order to satisfy the ContainerRequest. See "Glob patterns":#glob_patterns for more details.|
|mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|See "Mount types":#mount_types for more details.|
|secret_mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|Not returned in API responses. Reset to empty when state is "Complete" or "Cancelled".|
|runtime_constraints|hash|Compute resources, and access to the outside world, that are / were available to the container.
@@ -63,6 +64,8 @@ Generally this will contain additional keys that are not present in any correspo
|output_properties|hash|User metadata properties to set on the output collection.|
|cost|number|Estimated cost of the cloud VM used to run the container.|0 if not available.|
|subrequests_cost|number|Total estimated cumulative cost of container requests submitted by this container.|0 if not available.|
+|service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job. Incompatible with @use_existing@||
+|published_ports|hash|Web service ports that are published by this container. See "published ports":#published_ports below.||
h2(#container_states). Container states
@@ -97,6 +100,10 @@ table(table table-bordered table-condensed).
h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}
+h2(#glob_patterns). {% include 'container_glob_patterns' %}
+
+h2(#published_ports). {% include 'container_published_ports' %}
+
h2. Methods
See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
diff --git a/doc/api/methods/credentials.html.textile.liquid b/doc/api/methods/credentials.html.textile.liquid
new file mode 100644
index 0000000000..5df054d734
--- /dev/null
+++ b/doc/api/methods/credentials.html.textile.liquid
@@ -0,0 +1,123 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "credentials"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/credentials@
+
+Object type: @oss07@
+
+Example UUID: @zzzzz-oss07-0123456789abcde@
+
+h2. Resource
+
+Stores a credential, such as a username/password or API token, for use by running containers to access an external resource on the user's behalf.
+
+Each Credential offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|name|string|Name for the credential, unique by owner.|
+|description|string|Free text description of this credential.|
+|credential_class|string|The type of credential stored in this record. See below for more information.|
+|scopes|array of string|(optional) One or more specific resources this credential applies to.|
+|external_id|string|The non-secret part of the credential.|
+|secret|string|The secret part of the credential that should kept hidden where possible.|
+|expires_at|timestamp|Date at which the @secret@ field is not longer valid and can no longer be accessed (and may be scrubbed from the database). If @expires_at@ has past, any attempts to access the @secret@ endpoint (see below) also return an error.|
+
+The @secret@ field can be set when the record is created or updated by users with at @can_write@ permission, however the value of @secret@ is not returned in the regular @get@ or @list@ API calls, and cannot be used in queries.
+
+Credentials can be read using an Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential, using the @secret@ API call (see below). Calling the @secret@ API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error.
+
+This design is intended to minimize accidental exposure of the secret material, but does not inherently protect it from users who have been given @can_read@ access, since it is necessary for code running on those user's behalf to access the secret in order to make use of it.
+
+As of Arvados 3.2, all credentials are owned by the system user and the @name@ field must be unique on a given Arvados instance. Credentials are shared using normal permission links.
+
+h2. Credential classes
+
+The @credential_class@ field is used to identify what kind of credential is stored and how to interpret the other fields of the record.
+
+h3. aws_access_key
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Description|
+|credential_class|String "aws_access_key"|
+|scopes|(optional, not yet implemented in Arvados 3.2) A list of S3 buckets (in the form "s3://bucketname") to which these credentials grant access.|
+|external_id|The value of "aws_access_key_id" from @~/.aws/credentials@|
+|secret|The value of "aws_secret_access_key" @~/.aws/credentials@|
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Credential.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|credential|object|Credential resource|request body||
+
+h3. delete
+
+Delete an existing Credential.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||
+
+h3. get
+
+Get a credential by UUID. The @secret@ field is not returned in @get@ API calls. To get the value of @secret@, use the @secret@ API call.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||
+
+h3. list
+
+List credentials. The @secret@ field is not returned in @list@ API calls, and cannot be used in queries. To get the value of @secret@, use the @secret@ API call.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing credential. May be used to update the value of @secret@.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||
+|credential|object||query||
+
+h3. secret
+
+Get the value of @secret@. Returns a JSON object in the form @{"external_id": "...", "secret": "..."}@.
+
+Only permitted when called with a Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential. Calling this API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error.
+
+If @expires_at@ has passed, this endpoint will return an error.
+
+Calls to the @secret@ API endpoint are logged as @event_type: secret_access@ in the audit log table.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||
diff --git a/doc/api/methods/groups.html.textile.liquid b/doc/api/methods/groups.html.textile.liquid
index 05d3fb1c7b..6eb1c0f39d 100644
--- a/doc/api/methods/groups.html.textile.liquid
+++ b/doc/api/methods/groups.html.textile.liquid
@@ -28,27 +28,28 @@ table(table table-bordered table-condensed).
|group_class|string|Type of group. @project@ and @filter@ indicate that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects. @role@ is used as part of the "permission system":{{site.baseurl}}/api/permission-model.html. |@"filter"@
@"project"@
@"role"@|
-|description|text|||
+|description|text|Free text description of the group. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions ||
|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
-|writable_by|array|(Deprecated) List of UUID strings identifying Users and other Groups that have write permission for this Group. Users who are allowed to administer the Group will receive a list of user/group UUIDs that have permission via explicit permission links; permissions via parent/ancestor groups are not taken into account. Other users will receive a partial list including only the Group's owner_uuid and (if applicable) their own user UUID.||
|can_write|boolean|True if the current user has write permission on this group.||
|can_manage|boolean|True if the current user has manage permission on this group.||
-|trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls. May be untrashed.||
+|trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls. May be untrashed as long as @delete_at@ is in the future.||
|delete_at|datetime|If @delete_at@ is non-null and in the past, the group and all objects directly or indirectly owned by the group may be permanently deleted.||
|is_trashed|datetime|True if @trash_at@ is in the past, false if not.||
|frozen_by_uuid|string|For a frozen project, indicates the user who froze the project; null in all other cases. When a project is frozen, no further changes can be made to the project or its contents, even by admins. Attempting to add new items or modify, rename, move, trash, or delete the project or its contents, including any subprojects, will return an error.||
-h3(#frozen). Frozen projects
+h2. Group types and states
-A user with @manage@ permission can set the @frozen_by_uuid@ attribute of a @project@ group to their own user UUID. Once this is done, no further changes can be made to the project or its contents, including subprojects.
+h3(#project). Project groups
-The @frozen_by_uuid@ attribute can be cleared by an admin user. It can also be cleared by a user with @manage@ permission, unless the @API.UnfreezeProjectRequiresAdmin@ configuration setting is active.
+Groups with @group_class: project@ are used to organize objects and subprojects through ownership. When "trashed or deleted":#trashing, all items owned by the project (including subprojects, collections, or container requests) as well as permissions (permission links) granted to the project are also trashed or deleted.
-The optional @API.FreezeProjectRequiresDescription@ and @API.FreezeProjectRequiresProperties@ configuration settings can be used to prevent users from freezing projects that have empty @description@ and/or specified @properties@ entries.
+h3(#role). Role groups
+
+Groups with @group_class: role@ are used to grant permissions to users (or other groups) through permission links. Role groups can confer "can_manage" permission but cannot directly own objects. When "trashed and deleted":#trashing group membership and permission grants (expressed as permission links) are deleted as well.
h3(#filter). Filter groups
-@filter@ groups are virtual groups; they can not own other objects. Filter groups have a special @properties@ field named @filters@, which must be an array of filter conditions. See "list method filters":{{site.baseurl}}/api/methods.html#filters for details on the syntax of valid filters, but keep in mind that the attributes must include the object type (@collections@, @container_requests@, @groups@, @workflows@), separated with a dot from the field to be filtered on.
+Groups with @group_class: filter@ groups are virtual groups; they can not own other objects, but instead their contents (as returned by the "contents":#contents API method) are defined by a query. Filter groups have a special @properties@ field named @filters@, which must be an array of filter conditions. See "list method filters":{{site.baseurl}}/api/methods.html#filters for details on the syntax of valid filters, but keep in mind that the attributes must include the object type (@collections@, @container_requests@, @groups@, @workflows@), separated with a dot from the field to be filtered on.
Filters are applied with an implied *and* between them, but each filter only applies to the object type specified. The results are subject to the usual access controls - they are a subset of all objects the user can see. Here is an example:
@@ -93,6 +94,28 @@ The 'is_a' filter operator is of particular interest to limit the @filter@ group
},
+"Trashed or deleting":#trashing a filter group causes the group itself to be hidden or deleted, but has no effect on the items returned in "contents", i.e. the database objects in "contents" are not hidden or deleted and may be accessed by other means.
+
+h3(#trashing). Trashing groups
+
+Groups can be trashed by updating the record and setting the @trash_at@ field, or with the "delete":#delete method. The delete method sets @trash_at@ to "now".
+
+The value of @trash_at@ can be set to a time in the future as a feature to automatically expire groups.
+
+When @trash_at@ is set, @delete_at@ will also be set. Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@ for projects and filter groups, and @delete_at = trash_at@ for role groups. When the @trash_at@ time is past but @delete_at@ is in the future, the trashed group is invisible to most API calls unless the @include_trash@ parameter is true. All objects directly or indirectly owned by the group (including subprojects, collections, or container requests) are considered trashed as well. Groups in the trashed state can be "untrashed":#untrash so long as @delete_at@ has not past.
+
+Once @delete_at@ is past, the group will be deleted permanently and can no longer be untrashed. Different group types have different behavior when deleted, described above.
+
+Note: like other groups, "role" groups may have @trash_at@ set to date in the future, however roles groups are required to have @delete_at = trash_at@, so the trash time and delete time expire at the same time. This means once @trash_at@ expires the role group is deleted immediately. Role groups with @trash_at@ set can only be "untrashed":#untrash before they expire.
+
+h3(#frozen). Frozen projects
+
+A user with @manage@ permission can set the @frozen_by_uuid@ attribute of a @project@ group to their own user UUID. Once this is done, no further changes can be made to the project or its contents, including subprojects.
+
+The @frozen_by_uuid@ attribute can be cleared by an admin user. It can also be cleared by a user with @manage@ permission, unless the @API.UnfreezeProjectRequiresAdmin@ configuration setting is active.
+
+The optional @API.FreezeProjectRequiresDescription@ and @API.FreezeProjectRequiresProperties@ configuration settings can be used to prevent users from freezing projects that have empty @description@ and/or empty @properties@ entries.
+
h2. Methods
See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
@@ -113,7 +136,8 @@ table(table table-bordered table-condensed).
|filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@|
|recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project. Use this to get a list of items that are shared with the user. Uses the logic described under the "shared" endpoint.|query|@true@|
-|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query||
+|include|array|Look up objects referenced by the indicated fields and include them in the response. Only "owner_uuid", "container_uuid" and "collection_uuid" are supported. If "owner_uuid" is given, the parent project or user will be returned. If "container_uuid" is given and container requests are returned in the response, the corresponding container records will also be returned. If "collection_uuid" is given and workflows are returned in the response, the collection records will also be returned. These referenced objects will be returned in the "included" field of the response. For compatibility, a string @"owner_uuid"@ is accepted as equivalent to @["owner_uuid"]@.|query|@"owner_uuid"@
+@["owner_uuid","container_uuid"]@|
|include_trash|boolean (default false)|Include trashed objects.|query|@true@|
|include_old_versions|boolean (default false)|Include past versions of the collections being listed.|query|@true@|
|select|array|Attributes of each object to return in the response. Specify an unqualified name like @uuid@ to select that attribute on all object types, or a qualified name like @collections.name@ to select that attribute on objects of the specified type. By default, all available attributes are returned, except on collections, where @manifest_text@ is not returned and cannot be selected due to an implementation limitation. This limitation may be removed in the future.|query|@["uuid", "collections.name"]@|
@@ -126,6 +150,8 @@ Use filters with the attribute format @- .@ to filter items
When called with âinclude=owner_uuidâ, the @included@ field of the response is populated with users, projects, or other groups that own the objects returned in @items@. This can be used to fetch an object and its parent with a single API call.
+When called with âinclude=container_uuidâ, the @included@ field of the response is populated with the container associated with each container request in the response.
+
h3. create
@@ -138,9 +164,9 @@ table(table table-bordered table-condensed).
|group|object||query||
|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|
-h3. delete
+h3(#delete). delete
-Put a Group in the trash. This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL. A trashed group is invisible to most API calls unless the @include_trash@ parameter is true. All objects directly or indirectly owned by the Group are considered trashed as well.
+Put a Group in the trash. See "Trashing groups":#trashing for details.
Arguments:
@@ -186,9 +212,9 @@ table(table table-bordered table-condensed).
|group|object||query||
|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|
-h3. untrash
+h3(#untrash). untrash
-Remove a Group from the trash. This sets the @trash_at@ and @delete_at@ fields to @null@.
+Remove a Group from the trash. Only valid when @delete_at@ is in the future. This sets the @trash_at@ and @delete_at@ fields to @null@.
Arguments:
diff --git a/doc/api/methods/humans.html.textile.liquid b/doc/api/methods/humans.html.textile.liquid
deleted file mode 100644
index 1c338217eb..0000000000
--- a/doc/api/methods/humans.html.textile.liquid
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "humans"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and is slated to be removed entirely in a future major release of Arvados. The recommended way to store metadata is with "'properties' field on collections and projects.":../properties.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/humans@
-
-Object type: @7a9it@
-
-Example UUID: @zzzzz-7a9it-0123456789abcde@
-
-h2. Resource
-
-A metadata record that may be used to represent a human subject.
-
-Each Human has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|properties|hash|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new Human.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|human|object||query||
-
-h3. delete
-
-Delete an existing Human.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
-
-h3. get
-
-Gets a Human's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
-
-h3. list
-
-List humans.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing Human.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
-|human|object||query||
diff --git a/doc/api/methods/job_tasks.html.textile.liquid b/doc/api/methods/job_tasks.html.textile.liquid
deleted file mode 100644
index 880fe56219..0000000000
--- a/doc/api/methods/job_tasks.html.textile.liquid
+++ /dev/null
@@ -1,101 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "job_tasks"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "container requests.":container_requests.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/job_tasks@
-
-Object type: @ot0gb@
-
-Example UUID: @zzzzz-ot0gb-0123456789abcde@
-
-h2. Resource
-
-Deprecated.
-
-A job task is a individually scheduled unit of work executed as part of an overall job.
-
-Each JobTask has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|sequence|integer|Execution sequence.
-A step cannot be run until all steps with lower sequence numbers have completed.
-Job steps with the same sequence number can be run in any order.||
-|parameters|hash|||
-|output|text|||
-|progress|float|||
-|success|boolean|Is null if the task has neither completed successfully nor failed permanently.||
-
-The following attributes should not be updated by anyone other than the job manager:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Notes|
-|qsequence|integer|Order of arrival|0-based|
-|job_uuid|string|||
-|created_by_job_task_uuid|string|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new JobTask.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|job_task|object||query||
-
-h3. delete
-
-Delete an existing JobTask.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
-
-h3. get
-
-Gets a JobTask's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
-
-h3. list
-
-List job_tasks.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing JobTask.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
-|job_task|object||query||
diff --git a/doc/api/methods/jobs.html.textile.liquid b/doc/api/methods/jobs.html.textile.liquid
deleted file mode 100644
index 75d7368c8e..0000000000
--- a/doc/api/methods/jobs.html.textile.liquid
+++ /dev/null
@@ -1,290 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "jobs"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "container requests.":container_requests.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/jobs@
-
-Object type: @8i9sb@
-
-Example UUID: @zzzzz-8i9sb-0123456789abcde@
-
-h2. Resource
-
-A job describes a work order to be executed by the Arvados cluster.
-
-Each job has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Notes|
-|script|string|The filename of the job script.|This program will be invoked by Crunch for each job task. It is given as a path to an executable file, relative to the @/crunch_scripts@ directory in the Git tree specified by the _repository_ and _script_version_ attributes.|
-|script_parameters|hash|The input parameters for the job.|Conventionally, one of the parameters is called @"input"@. Typically, some parameter values are collection UUIDs. Ultimately, though, the significance of parameters is left entirely up to the script itself.|
-|repository|string|Git repository name or URL.|Source of the repository where the given script_version is to be found. This can be given as the name of a locally hosted repository, or as a publicly accessible URL starting with @git://@, @http://@, or @https://@.
-Examples:
-@yourusername/yourrepo@
-@https://github.com/arvados/arvados.git@|
-|script_version|string|Git commit|During a **create** transaction, this is the Git branch, tag, or hash supplied by the client. Before the job starts, Arvados updates it to the full 40-character SHA-1 hash of the commit used by the job.
-See "Specifying Git versions":#script_version below for more detail about acceptable ways to specify a commit.|
-|cancelled_by_client_uuid|string|API client ID|Is null if job has not been cancelled|
-|cancelled_by_user_uuid|string|Authenticated user ID|Is null if job has not been cancelled|
-|cancelled_at|datetime|When job was cancelled|Is null if job has not been cancelled|
-|started_at|datetime|When job started running|Is null if job has not [yet] started|
-|finished_at|datetime|When job finished running|Is null if job has not [yet] finished|
-|running|boolean|Whether the job is running||
-|success|boolean|Whether the job indicated successful completion|Is null if job has not finished|
-|is_locked_by_uuid|string|UUID of the user who has locked this job|Is null if job is not locked. The system user locks the job when starting the job, in order to prevent job attributes from being altered.|
-|node_uuids|array|List of UUID strings for node objects that have been assigned to this job||
-|log|string|Collection UUID|Is null if the job has not finished. After the job runs, the given collection contains a text file with log messages provided by the @arv-crunch-job@ task scheduler as well as the standard error streams provided by the task processes.|
-|tasks_summary|hash|Summary of task completion states.|Example: @{"done":0,"running":4,"todo":2,"failed":0}@|
-|output|string|Collection UUID|Is null if the job has not finished.|
-|nondeterministic|boolean|The job is expected to produce different results if run more than once.|If true, this job will not be considered as a candidate for automatic re-use when submitting subsequent identical jobs.|
-|submit_id|string|Unique ID provided by client when job was submitted|Optional. This can be used by a client to make the "jobs.create":{{site.baseurl}}/api/methods/jobs.html#create method idempotent.|
-|priority|string|||
-|arvados_sdk_version|string|Git commit hash that specifies the SDK version to use from the Arvados repository|This is set by searching the Arvados repository for a match for the arvados_sdk_version runtime constraint.|
-|docker_image_locator|string|Portable data hash of the collection that contains the Docker image to use|This is set by searching readable collections for a match for the docker_image runtime constraint.|
-|runtime_constraints|hash|Constraints that must be satisfied by the job/task scheduler in order to run the job.|See below.|
-|components|hash|Name and uuid pairs representing the child work units of this job. The uuids can be of different object types.|Example components hash: @{"name1": "zzzzz-8i9sb-xyz...", "name2": "zzzzz-d1hrv-xyz...",}@|
-
-h3(#script_version). Specifying Git versions
-
-The script_version attribute and arvados_sdk_version runtime constraint are typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts Git versions in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example,
HEAD@{1}
refers to the local reflog, and @origin/main@ typically refers to a remote branch: neither is likely to work as desired if given as a Git version.
-
-h3. Runtime constraints
-
-table(table table-bordered table-condensed).
-|_. Key|_. Type|_. Description|_. Implemented|
-|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository. See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit. If you use this, you must also specify a @docker_image@ constraint (see below). In order to install the Python SDK successfully, Crunch must be able to find and run virtualenv inside the container.|✓|
-|docker_image|string|The Docker image that this Job needs to run. If specified, Crunch will create a Docker container from this image, and run the Job's script inside that. The Keep mount and work directories will be available as volumes inside this container. The image must be uploaded to Arvados using @arv keep docker@. You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id. Alternatively, you may specify the portable data hash of the image Collection.|✓|
-|min_nodes|integer||✓|
-|max_nodes|integer|||
-|min_cores_per_node|integer|Require that each node assigned to this Job have the specified number of CPU cores|✓|
-|min_ram_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of real memory (in MiB)|✓|
-|min_scratch_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of scratch storage available (in MiB)|✓|
-|max_tasks_per_node|integer|Maximum simultaneous tasks on a single node|✓|
-|keep_cache_mb_per_task|integer|Size of file data buffer for per-task Keep directory ($TASK_KEEPMOUNT), in MiB. Default is 256 MiB. Increase this to reduce cache thrashing in situtations such as accessing multiple large (64+ MiB) files at the same time, or accessing different parts of a large file at the same time.|✓|
-|min_ram_per_task|integer|Minimum real memory (KiB) per task||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. cancel
-
-Cancel a job that is queued or running.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
-h3(#create). create
-
-Create a new Job.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|job|object|Job resource|request body||
-|minimum_script_version |string |Git branch, tag, or commit hash specifying the minimum acceptable script version (earliest ancestor) to consider when deciding whether to re-use a past job.[1]|query|@"c3e86c9"@|
-|exclude_script_versions|array of strings|Git commit branches, tags, or hashes to exclude when deciding whether to re-use a past job.|query|@["8f03c71","8f03c71"]@
-@["badtag1","badtag2"]@|
-|filters|array of arrays|Conditions to find Jobs to reuse.|query||
-|find_or_create |boolean |Before creating, look for an existing job that has identical script, script_version, and script_parameters to those in the present job, has nondeterministic=false, and did not fail (it could be queued, running, or completed). If such a job exists, respond with the existing job instead of submitting a new one.|query|@false@|
-
-When a job is submitted to the queue using the **create** method, the @script_version@ attribute is updated to a full 40-character Git commit hash based on the current content of the specified repository. If @script_version@ cannot be resolved, the job submission is rejected.
-
-fn1. See the "note about specifying Git commits":#script_version for more detail.
-
-h4. Specialized filters
-
-Special filter operations are available for specific Job columns.
-
-* @script_version@ @in git@ @REFSPEC@, @arvados_sdk_version@ @in git@ @REFSPEC@
Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ in that list. When creating a job and filtering @script_version@, the search will find commits between @REFSPEC@ and the submitted job's @script_version@; all other searches will find commits between @REFSPEC@ and HEAD. This list may include parallel branches if there is more than one path between @REFSPEC@ and the end commit in the graph. Use @not in@ or @not in git@ filters (below) to blacklist specific commits.
-
-* @script_version@ @not in git@ @REFSPEC@, @arvados_sdk_version@ @not in git@ @REFSPEC@
Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ not in that list.
-
-* @docker_image_locator@ @in docker@ @SEARCH@
@SEARCH@ can be a Docker image hash, a repository name, or a repository name and tag separated by a colon (@:@). The server will find collections that contain a Docker image that match that search criteria, then match jobs with a @docker_image_locator@ in that list.
-
-* @docker_image_locator@ @not in docker@ @SEARCH@
Negate the @in docker@ filter.
-
-h4. Reusing jobs
-
-Because Arvados records the exact version of the script, input parameters, and runtime environment that was used to run the job, if the script is deterministic (meaning that the same code version is guaranteed to produce the same outputs from the same inputs) then it is possible to re-use the results of past jobs, and avoid re-running the computation to save time. Arvados uses the following algorithm to determine if a past job can be re-used:
-
-notextile.
-
-# If @find_or_create@ is false or omitted, create a new job and skip the rest of these steps.
-# If @filters@ are specified, find jobs that match those filters. If any filters are given, there must be at least one filter on the @repository@ attribute and one on the @script@ attribute: otherwise an error is returned.
-# If @filters@ are not specified, find jobs with the same @repository@ and @script@, with a @script_version@ between @minimum_script_version@ and @script_version@ inclusively (excluding @excluded_script_versions@), and a @docker_image_locator@ with the latest Collection that matches the submitted job's @docker_image@ constraint. If the submitted job includes an @arvados_sdk_version@ constraint, jobs must have an @arvados_sdk_version@ between that refspec and HEAD to be found. *This form is deprecated: use filters instead.*
-# If the found jobs include a completed job, and all found completed jobs have consistent output, return one of them. Which specific job is returned is undefined.
-# If the found jobs only include incomplete jobs, return one of them. Which specific job is returned is undefined.
-# If no job has been returned so far, create and return a new job.
-
-
-
-h4. Examples
-
-Run the script "crunch_scripts/hash.py" in the repository "you" using the "main" commit. Arvados should re-use a previous job if the script_version of the previous job is the same as the current "main" commit. This works irrespective of whether the previous job was submitted using the name "main", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
-
-
-{
- "job": {
- "script": "hash.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
- },
- "find_or_create": true
-}
-
-
-Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvados should re-use a previous job if the "script_version" of that job is also "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5".
-
-
-{
- "job": {
- "script": "hash.py",
- "repository": "you/you",
- "script_version": "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
- },
- "find_or_create": true
-}
-
-
-Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "main" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "main" commit.
-
-
-{
- "job": {
- "script": "hash.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
- },
- "minimum_script_version": "earlier_version_tag",
- "exclude_script_versions": ["blacklisted_version_tag"],
- "find_or_create": true
-}
-
-
-The same behavior, using filters:
-
-
-{
- "job": {
- "script": "hash.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
- },
- "filters": [["script", "=", "hash.py"],
- ["repository", "=", "you/you"],
- ["script_version", "in git", "earlier_version_tag"],
- ["script_version", "not in git", "blacklisted_version_tag"]],
- "find_or_create": true
-}
-
-
-Run the script "crunch_scripts/monte-carlo.py" in the repository "you/you" using the current "main" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
-
-
-{
- "job": {
- "script": "monte-carlo.py",
- "repository": "you/you",
- "script_version": "main",
- "nondeterministic": true,
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
- }
-}
-
-
-h3. delete
-
-Delete an existing Job.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
-
-h3. get
-
-Gets a Job's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
-
-h3. list
-
-List jobs.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-See the create method documentation for more information about Job-specific filters.
-
-h3. log_tail_follow
-
-log_tail_follow jobs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-|buffer_size|integer (default 8192)||query||
-
-h3. queue
-
-Get the current job queue.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|filters|array||query||
-
-This method is equivalent to the "list method":#list, except that the results are restricted to queued jobs (i.e., jobs that have not yet been started or cancelled) and order defaults to queue priority.
-
-h3. update
-
-Update attributes of an existing Job.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
-|job|object||query||
diff --git a/doc/api/methods/keep_disks.html.textile.liquid b/doc/api/methods/keep_disks.html.textile.liquid
deleted file mode 100644
index 9a82a3e7ce..0000000000
--- a/doc/api/methods/keep_disks.html.textile.liquid
+++ /dev/null
@@ -1,111 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "keep_disks"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "keep services.":keep_services.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_disks@
-
-Object type: @penuu@
-
-Example UUID: @zzzzz-penuu-0123456789abcde@
-
-h2. Resource
-
-Obsoleted by "keep_services":{{site.baseurl}}/api/methods/keep_services.html
-
-Each KeepDisk has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|ping_secret|string|||
-|node_uuid|string|||
-|filesystem_uuid|string|||
-|bytes_total|integer|||
-|bytes_free|integer|||
-|is_readable|boolean|||
-|is_writable|boolean|||
-|last_read_at|datetime|||
-|last_write_at|datetime|||
-|last_ping_at|datetime|||
-|keep_service_uuid|string|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new KeepDisk.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|keep_disk|object||query||
-
-h3. delete
-
-Delete an existing KeepDisk.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
-
-h3. get
-
-Gets a KeepDisk's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
-
-h3. list
-
-List keep_disks.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. ping
-
-ping keep_disks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|ping_secret|string||query||
-{background:#ccffcc}.|service_port|string||query||
-{background:#ccffcc}.|service_ssl_flag|string||query||
-|filesystem_uuid|string||query||
-|node_uuid|string||query||
-|service_host|string||query||
-|uuid|string||query||
-
-h3. update
-
-Update attributes of an existing KeepDisk.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
-|keep_disk|object||query||
diff --git a/doc/api/methods/links.html.textile.liquid b/doc/api/methods/links.html.textile.liquid
index 53001c088b..4c8ac22996 100644
--- a/doc/api/methods/links.html.textile.liquid
+++ b/doc/api/methods/links.html.textile.liquid
@@ -91,6 +91,33 @@ table(table table-bordered table-condensed).
|→Collection | _tag name_ → _collection uuid_|
|→Job | _tag name_ → _job uuid_|
+h3. published_port
+
+A **published_port** link enables external access to container ports via user-defined domain names.
+
+If the cluster is configured as follows to forward HTTP requests from external clients to container ports:
+
+
+Services:
+ ContainerWebServices:
+ ExternalURL: https://*.containers.zzzzz.example.com/
+
+
+A user can create the following link to route HTTP requests like @https://servicename.containers.zzzzz.example.com/@ to port 12345 in the container running for container request @zzzzz-xvhdp-012340123401234@:
+
+
+{
+ "link_class" "published_port",
+ "head_uuid": "zzzzz-xvhdp-012340123401234",
+ "name": "servicename",
+ "properties": {
+ "port": 12345
+ }
+}
+
+
+Refer to the "documentation about published ports":container_requests.html#published_ports for additional information.
+
h2. Methods
See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
diff --git a/doc/api/methods/nodes.html.textile.liquid b/doc/api/methods/nodes.html.textile.liquid
deleted file mode 100644
index b29527ceeb..0000000000
--- a/doc/api/methods/nodes.html.textile.liquid
+++ /dev/null
@@ -1,106 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "nodes"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "cloud dispatcher API.":../dispatch.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/nodes@
-
-Object type: @7ekkf@
-
-Example UUID: @zzzzz-7ekkf-0123456789abcde@
-
-h2. Resource
-
-Node resources list compute nodes on which Crunch may schedule work.
-
-Each Node has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|slot_number|integer|||
-|hostname|string|||
-|domain|string|||
-|ip_address|string|||
-|job_uuid|string|The UUID of the job that this node is assigned to work on. If you do not have permission to read the job, this will be null.||
-|first_ping_at|datetime|||
-|last_ping_at|datetime|||
-|info|hash|Sensitive information about the node (only visible to admin) such as 'ping_secret' and 'ec2_instance_id'. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
-|properties|hash|Public information about the node, such as 'total_cpu_cores', 'total_ram_mb', and 'total_scratch_mb'. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new Node.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|node|object||query||
-
-h3. delete
-
-Delete an existing Node.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
-
-h3. get
-
-Gets a Node's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
-
-h3. list
-
-List nodes.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. ping
-
-Process a ping from a compute node.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|ping_secret|string||query||
-{background:#ccffcc}.|uuid|string||path||
-
-h3. update
-
-Update attributes of an existing Node.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
-|node|object||query||
-
-To remove a node's job assignment, update the node object's @job_uuid@ to null.
diff --git a/doc/api/methods/pipeline_instances.html.textile.liquid b/doc/api/methods/pipeline_instances.html.textile.liquid
deleted file mode 100644
index e19dfba02a..0000000000
--- a/doc/api/methods/pipeline_instances.html.textile.liquid
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "pipeline_instances"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "container requests.":container_requests.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_instances@
-
-Object type: @d1hrv@
-
-Example UUID: @zzzzz-d1hrv-0123456789abcde@
-
-h2. Resource
-
-Deprecated. A pipeline instance is a collection of jobs managed by @arvados-run-pipeline-instance@.
-
-Each PipelineInstance has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|pipeline_template_uuid|string|The "pipeline template":pipeline_templates.html that this instance was created from.||
-|name|string|||
-|components|hash|||
-|success|boolean|||
-|active|boolean|||
-|properties|Hash|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new PipelineInstance.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|pipeline_instance|object||query||
-
-h3. delete
-
-Delete an existing PipelineInstance.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
-
-h3. get
-
-Gets a PipelineInstance's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
-
-h3. list
-
-List pipeline_instances.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing PipelineInstance.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
-|pipeline_instance|object||query||
diff --git a/doc/api/methods/pipeline_templates.html.textile.liquid b/doc/api/methods/pipeline_templates.html.textile.liquid
deleted file mode 100644
index ddbe8ad389..0000000000
--- a/doc/api/methods/pipeline_templates.html.textile.liquid
+++ /dev/null
@@ -1,228 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "pipeline_templates"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "registered workflows.":workflows.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_templates@
-
-Object type: @p5p6p@
-
-Example UUID: @zzzzz-p5p6p-0123456789abcde@
-
-h2. Resource
-
-Deprecated. A pipeline template is a collection of jobs that can be instantiated as a pipeline_instance.
-
-Each PipelineTemplate has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|name|string|||
-|components|hash|||
-
-The pipeline template consists of "name" and "components".
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type |_. Accepted values |_. Required|_. Description|
-|name |string |any |yes |The human-readable name of the pipeline template.|
-|components |object |JSON object containing job submission objects|yes |The component jobs that make up the pipeline, with the component name as the key. |
-
-h3. Components
-
-The components field of the pipeline template is a JSON object which describes the individual steps that make up the pipeline. Each component is an Arvados job submission. "Parameters for job submissions are described on the job method page.":{{site.baseurl}}/api/methods/jobs.html#create In addition, a component can have the following parameters:
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type |_. Accepted values |_. Required|_. Description|
-|output_name |string or boolean|string or false |no |If a string is provided, use this name for the output collection of this component. If the value is false, do not create a permanent output collection (an temporary intermediate collection will still be created). If not provided, a default name will be assigned to the output.|
-
-h3. Script parameters
-
-When used in a pipeline, each parameter in the 'script_parameters' attribute of a component job can specify that the input parameter must be supplied by the user, or the input parameter should be linked to the output of another component. To do this, the value of the parameter should be JSON object containing one of the following attributes:
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type |_. Accepted values |_. Description|
-|default |any |any |The default value for this parameter.|
-|required |boolean |true or false |Specifies whether the parameter is required to have a value or not.|
-|dataclass |string |One of 'Collection', 'File' [1], 'number', or 'text' |Data type of this parameter.|
-|search_for |string |any string |Substring to use as a default search string when choosing inputs.|
-|output_of |string |the name of another component in the pipeline |Specifies that the value of this parameter should be set to the 'output' attribute of the job that corresponds to the specified component.|
-|title |string |any string |User friendly title to display when choosing parameter values|
-|description |string |any string |Extended text description for describing expected/valid values for the script parameter|
-|link_name |string |any string |User friendly name to display for the parameter value instead of the actual parameter value|
-
-The 'output_of' parameter is especially important, as this is how components are actually linked together to form a pipeline. Component jobs that depend on the output of other components do not run until the parent job completes and has produced output. If the parent job fails, the entire pipeline fails.
-
-fn1. The 'File' type refers to a specific file within a Keep collection in the form 'collection_hash/filename', for example '887cd41e9c613463eab2f0d885c6dd96+83/bob.txt'.
-
-The 'search_for' parameter is meaningful only when input dataclass of type Collection or File is used. If a value is provided, this will be preloaded into the input data chooser dialog in Workbench. For example, if your input dataclass is a File and you are interested in a certain filename extention, you can preconfigure it in this attribute.
-
-h3. Examples
-
-This is a pipeline named "Filter MD5 hash values" with two components, "do_hash" and "filter". The "input" script parameter of the "do_hash" component is required to be filled in by the user, and the expected data type is "Collection". This also specifies that the "input" script parameter of the "filter" component is the output of "do_hash", so "filter" will not run until "do_hash" completes successfully. When the pipeline runs, past jobs that meet the criteria described above may be substituted for either or both components to avoid redundant computation.
-
-
-{
- "name": "Filter MD5 hash values",
- "components": {
- "do_hash": {
- "script": "hash.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": {
- "required": true,
- "dataclass": "Collection",
- "search_for": ".fastq.gz",
- "title":"Please select a fastq file"
- }
- },
- },
- "filter": {
- "script": "0-filter.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": {
- "output_of": "do_hash"
- }
- },
- }
- }
-}
-
-
-This pipeline consists of three components. The components "thing1" and "thing2" both depend on "cat_in_the_hat". Once the "cat_in_the_hat" job is complete, both "thing1" and "thing2" can run in parallel, because they do not depend on each other.
-
-
-{
- "name": "Wreck the house",
- "components": {
- "cat_in_the_hat": {
- "script": "cat.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": { }
- },
- "thing1": {
- "script": "thing1.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": {
- "output_of": "cat_in_the_hat"
- }
- },
- },
- "thing2": {
- "script": "thing2.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "input": {
- "output_of": "cat_in_the_hat"
- }
- },
- },
- }
-}
-
-
-This pipeline consists of three components. The component "cleanup" depends on "thing1" and "thing2". Both "thing1" and "thing2" are started immediately and can run in parallel, because they do not depend on each other, but "cleanup" cannot begin until both "thing1" and "thing2" have completed.
-
-
-{
- "name": "Clean the house",
- "components": {
- "thing1": {
- "script": "thing1.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": { }
- },
- "thing2": {
- "script": "thing2.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": { }
- },
- "cleanup": {
- "script": "cleanup.py",
- "repository": "you/you",
- "script_version": "main",
- "script_parameters": {
- "mess1": {
- "output_of": "thing1"
- },
- "mess2": {
- "output_of": "thing2"
- }
- }
- }
- }
-}
-
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new PipelineTemplate.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|pipeline_template|object||query||
-
-h3. delete
-
-Delete an existing PipelineTemplate.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
-
-h3. get
-
-Gets a PipelineTemplate's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
-
-h3. list
-
-List pipeline_templates.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing PipelineTemplate.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
-|pipeline_template|object||query||
diff --git a/doc/api/methods/repositories.html.textile.liquid b/doc/api/methods/repositories.html.textile.liquid
deleted file mode 100644
index b2b2cab7d5..0000000000
--- a/doc/api/methods/repositories.html.textile.liquid
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "repositories"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and slated to be removed entirely in a future major release of Arvados. It is replaced by "collection versioning.":collections.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/repositories@
-
-Object type: @s0uqq@
-
-Example UUID: @zzzzz-s0uqq-0123456789abcde@
-
-h2. Resource
-
-The repositories resource lists git repositories managed by Arvados.
-
-Each Repository has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|name|string|The name of the repository on disk. Repository names must begin with a letter and contain only alphanumerics. Unless the repository is owned by the system user, the name must begin with the owner's username, then be separated from the base repository name with @/@. You may not create a repository that is owned by a user without a username.|@username/project1@|
-|clone_urls|array|URLs from which the repository can be cloned. Read-only.|@["git@git.zzzzz.arvadosapi.com:foo/bar.git",
- "https://git.zzzzz.arvadosapi.com/foo/bar.git"]@|
-|fetch_url|string|URL suggested as a fetch-url in git config. Deprecated. Read-only.||
-|push_url|string|URL suggested as a push-url in git config. Deprecated. Read-only.||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new Repository.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|repository|object||query||
-
-h3. delete
-
-Delete an existing Repository.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
-
-h3. get
-
-Gets a Repository's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
-
-h3. get_all_permissions
-
-get_all_permissions repositories
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-
-h3. list
-
-List repositories.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing Repository.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
-|repository|object||query||
diff --git a/doc/api/methods/specimens.html.textile.liquid b/doc/api/methods/specimens.html.textile.liquid
deleted file mode 100644
index 3820eeb242..0000000000
--- a/doc/api/methods/specimens.html.textile.liquid
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "specimens"
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and is slated to be removed entirely in a future major release of Arvados. The recommended way to store metadata is with "'properties' field on collections and projects.":../properties.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/specimens@
-
-Object type: @j58dm@
-
-Example UUID: @zzzzz-j58dm-0123456789abcde@
-
-h2. Resource
-
-A metadata record that may be used to represent a biological specimen.
-
-Each Specimen has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|material|string|||
-|properties|hash|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new Specimen.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|specimen|object||query||
-
-h3. delete
-
-Delete an existing Specimen.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
-
-h3. get
-
-Gets a Specimen's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
-
-h3. list
-
-List specimens.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing Specimen.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
-|specimen|object||query||
diff --git a/doc/api/methods/traits.html.textile.liquid b/doc/api/methods/traits.html.textile.liquid
deleted file mode 100644
index 4e356b9523..0000000000
--- a/doc/api/methods/traits.html.textile.liquid
+++ /dev/null
@@ -1,86 +0,0 @@
----
-layout: default
-navsection: api
-navmenu: API Methods
-title: "traits"
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This is a legacy API. This endpoint is deprecated, disabled by default in new installations, and is slated to be removed entirely in a future major release of Arvados. The recommended way to store metadata is with "'properties' field on collections and projects.":../properties.html
-{% include 'notebox_end' %}
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/traits@
-
-Object type: @q1cn2@
-
-Example UUID: @zzzzz-q1cn2-0123456789abcde@
-
-h2. Resource
-
-A metadata record that may be used to represent a genotype or phenotype trait.
-
-Each Trait has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|name|string|||
-|properties|hash|||
-
-h2. Methods
-
-See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
-
-Required arguments are displayed in %{background:#ccffcc}green%.
-
-h3. create
-
-Create a new Trait.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|trait|object||query||
-
-h3. delete
-
-Delete an existing Trait.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
-
-h3. get
-
-Gets a Trait's metadata by UUID.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
-
-h3. list
-
-List traits.
-
-See "common resource list method.":{{site.baseurl}}/api/methods.html#index
-
-h3. update
-
-Update attributes of an existing Trait.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
-|trait|object||query||
diff --git a/doc/api/methods/users.html.textile.liquid b/doc/api/methods/users.html.textile.liquid
index cd61bfa36b..a6fcd33321 100644
--- a/doc/api/methods/users.html.textile.liquid
+++ b/doc/api/methods/users.html.textile.liquid
@@ -31,9 +31,7 @@ table(table table-bordered table-condensed).
|identity_url|string|||
|is_admin|boolean|||
|prefs|hash|||
-|default_owner_uuid|string|||
|is_active|boolean|||
-|writable_by|array|List of UUID strings identifying Groups and other Users that can modify this User object. This will include the user's owner_uuid and, for administrators and users requesting their own User object, the requesting user's UUID.||
h2. Methods
diff --git a/doc/api/methods/workflows.html.textile.liquid b/doc/api/methods/workflows.html.textile.liquid
index 77ed6f3597..49700eb917 100644
--- a/doc/api/methods/workflows.html.textile.liquid
+++ b/doc/api/methods/workflows.html.textile.liquid
@@ -27,6 +27,26 @@ table(table table-bordered table-condensed).
|name|string|If not specified, will be set to any "name" from the "definition" attribute.||
|description|string|If not specified, will be set to any "description" from the "definition" attribute.||
|definition|string|A "Common Workflow Language" document.|Visit "Common Workflow Language":http://www.commonwl.org/ for details.|
+|collection_uuid|string|If non-null, a linked workflow definition stored in a Collection. See below.|
+
+h2. Workflows linked to Collections
+
+If @collection_uuid@ is set, this significantly changes the behavior of the workflow record.
+
+The linked Collection must have the following properties. These are extracted from and must be synchronized with the workflow in @arv:workflowMain@. They are copied into the workflow collection's @properties@ for ease of processing by client tools such as Workbench.
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|type|string|Value must be 'workflow'|
+|arv:workflowMain|string|The file path within the collection that is the top-level workflow that will be launched.|
+|arv:cwl_inputs|array of object|Array of "workflow input parameters":https://www.commonwl.org/v1.2/Workflow.html#WorkflowInputParameter in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing |
+|arv:cwl_outputs|array of object|Array of "workflow output parameters":https://www.commonwl.org/v1.2/Workflow.html#WorkflowOutputParameter in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing |
+|arv:cwl_requirements|array of object|Array of "workflow process requirements":https://www.commonwl.org/v1.2/Workflow.html#Workflow in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list requirements that affect initial launching of the workflow such as "WorkflowRunnerResources":{{site.baseurl}}/user/cwl/cwl-extensions.html ).|
+|arv:cwl_hints|array of object|Array of "workflow process hints":https://www.commonwl.org/v1.2/Workflow.html#Workflow in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list hints that affect initial launching of the workflow such as "WorkflowRunnerResources":{{site.baseurl}}/user/cwl/cwl-extensions.html ).|
+
+When @collection_uuid@ is set, the workflow record @name@, @description@, @definition@ and @owner_uuid@ are all set from the linked collection. The workflow record can no longer be updated directly, but changes to the linked collection will be reflected in the workflow record. Trashing the linked collection will cause the workflow record to become trashed and eventually deleted as well. The workflow record cannot be un-linked from a collection, only deleted and re-created.
+
+When a workflow is linked to a collection, the collection can be queried and fetched together with the workflow. The @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[["collection.properties.category", "=", "WGS"]]@ will match workflow definitions linked to collections that have a "category" property with the value "WGS". When using the "group contents":groups.html#contents API to fetch workflow records, in addition the previously-described filters, you can use @include=["collection_uuid"]@ to include the collection records corresponding to the @collection_uuid@ of the workflow records in the response.
h2. Methods
@@ -72,6 +92,8 @@ List workflows.
See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+The @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[["collection.properties.category", "=", "WGS"]]@ will match workflow definitions linked to collections that have a "category" property with the value "WGS".
+
h3. update
Update attributes of an existing Workflow.
diff --git a/doc/api/properties.html.textile.liquid b/doc/api/properties.html.textile.liquid
index 175c59b8c4..d1378833cb 100644
--- a/doc/api/properties.html.textile.liquid
+++ b/doc/api/properties.html.textile.liquid
@@ -13,6 +13,14 @@ Arvados allows you to attach arbitrary properties to "collection":methods/collec
Searching for records using properties is described in "Filtering on subproperties":methods.html#subpropertyfilters .
+h2. Controlling user-supplied properties
+
+Arvados can be configured with a vocabulary file that lists valid properties and the range of valid values for those properties. This is described in "Metadata vocabulary":{{site.baseurl}}/admin/metadata-vocabulary.html .
+
+Arvados offers options to set properties automatically and/or prevent certain properties, once set, from being changed by non-admin users. This is described in "Configuring collection's managed properties":{{site.baseurl}}/admin/collection-managed-properties.html .
+
+The admin can require that certain properties must be non-empty before "freezing a project":methods/groups.html#frozen .
+
h2. Reserved properties
Components that ship with Arvados may automatically set properties on objects. These usually help track provenance or provide additional link metadata. These properties usually have a key that starts with @arv:@, and can always be set even when the system is configured with a strict vocabulary.
@@ -31,6 +39,7 @@ The arv:git* container properties, and the associated Git commands, primarily co
|arv:gitPath|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the absolute path of the checkout on the filesystem|
|arv:gitStatus|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with a machine-readable summary of files modified in the checkout since the most recent commit (the output of @git status --untracked-files=no --porcelain@)|
|arv:workflowMain|collection of type=workflow|string|Set on a collection containing a workflow created by @arvados-cwl-runner --create-workflow@, this is a relative reference inside the collection to the entry point of the workflow.|
+|arv:failed_container_resubmitted|container request|uuid|Set on container requests that were automatically resubmitted by the workflow runner with modified run options, such as when using the @PreemptionBehavior@ or @OutOfMemoryRetry@ CWL extensions. Set to the uuid of the new, resubmitted container request.|
The following system properties predate the @arv:@ key prefix, but are still reserved and can always be set.
@@ -40,9 +49,11 @@ table(table table-bordered table-condensed).
|container_request|collection|string|The UUID of the container request that produced an output or log collection.|
|docker-image-repo-tag|collection|string|For collections containing a Docker image, the repo/name:tag identifier|
|container_uuid|collection|string|The UUID of the container that produced a collection (set on collections with type=log)|
+|container|collection|string|(legacy) The UUID of the container that produced a collection. Set on intermediate collections created by arvados-cwl-runner. Starting with Arvados 2.6.0 arvados-cwl-runner uses @container_uuid@ instead, but older versions may still set the @container@ property.|
|cwl_input|container_request|object|On an intermediate container request, the CWL workflow-level input parameters used to generate the container request|
|cwl_output|container_request|object|On an intermediate container request, the CWL workflow-level output parameters collected from the container request|
|template_uuid|container_request|string|For a workflow runner container request, the workflow record that was used to launch it.|
+|workflowName|container_request|string|For a workflow runner container request, the "name" of the workflow record in @template_uuid@ at the time of launch (used for display only).|
|username|link|string|For a "can_login":permission-model.html#links permission link, the unix username on the VM that the user will have.|
|groups|link|array of string|For a "can_login":permission-model.html#links permission link, the unix groups on the VM that the user will be added to.|
|image_timestamp|link|string|When resolving a Docker image name and multiple links are found with @link_class=docker_image_repo+tag@ and same @link_name@, the @image_timestamp@ is used to determine precedence (most recent wins).|
@@ -58,11 +69,3 @@ table(table table-bordered table-condensed).
|output|The collection contains the output of a top-level container run (this is a container request where @requesting_container_uuid@ is null).|
|intermediate|The collection contains the output of a child container run (this is a container request where @requesting_container_uuid@ is non-empty).|
|workflow|A collection created by @arvados-cwl-runner --create-workflow@ containing a workflow definition.|
-
-h2. Controlling user-supplied properties
-
-Arvados can be configured with a vocabulary file that lists valid properties and the range of valid values for those properties. This is described in "Metadata vocabulary":{{site.baseurl}}/admin/metadata-vocabulary.html .
-
-Arvados offers options to set properties automatically and/or prevent certain properties, once set, from being changed by non-admin users. This is described in "Configuring collection's managed properties":{{site.baseurl}}/admin/collection-managed-properties.html .
-
-The admin can require that certain properties must be non-empty before "freezing a project":methods/groups.html#frozen .
diff --git a/doc/api/requests.html.textile.liquid b/doc/api/requests.html.textile.liquid
index fc5957af5f..55bb7cb22b 100644
--- a/doc/api/requests.html.textile.liquid
+++ b/doc/api/requests.html.textile.liquid
@@ -102,13 +102,11 @@ $ curl -v -X POST --data-urlencode 'collection={"name":"empty collection"}' -H "
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
- "href": "/collections/962eh-4zz18-m1ma0mxxfg3mbcc",
"kind": "arvados#collection",
"etag": "c5ifrv1ox2tu6alb559ymtkb7",
"uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-10-28T19:20:09.320771531Z",
- "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-10-28T19:20:09.319661000Z",
"name": "empty collection",
@@ -153,13 +151,11 @@ $ curl -X DELETE -v -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
- "href": "/collections/962eh-4zz18-m1ma0mxxfg3mbcc",
"kind": "arvados#collection",
"etag": "c5ifrv1ox2tu6alb559ymtkb7",
"uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-10-28T19:20:09.320771000Z",
- "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-10-28T19:20:09.319661000Z",
"name": "empty collection",
@@ -203,13 +199,11 @@ $ curl -v -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
- "href": "/collections/962eh-4zz18-xi32mpz2621o8km",
"kind": "arvados#collection",
"etag": "3mmn0s9e1z5s5opfofmtb9k8p",
"uuid": "962eh-4zz18-xi32mpz2621o8km",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-10-27T14:47:43.792587000Z",
- "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-10-27T14:47:43.792166000Z",
"name": "Saved at 2016-10-27 14:47:43 UTC by peter@debian",
@@ -262,13 +256,11 @@ $ curl -v -G --data-urlencode 'filters=[["created_at",">","2016-11-08T21:38:24.1
"limit": 100,
"items": [
{
- "href": "/collections/962eh-4zz18-ybggo9im899vv60",
"kind": "arvados#collection",
"etag": "bvgrrsg63zsenb9wnpnp0nsgl",
"uuid": "962eh-4zz18-ybggo9im899vv60",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-11-08T21:47:36.937106000Z",
- "modified_by_client_uuid": null,
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-11-08T21:47:36.936625000Z",
"name": "Log from cwl-runner job 962eh-8i9sb-45jww0k15fi5ldd",
@@ -282,13 +274,11 @@ $ curl -v -G --data-urlencode 'filters=[["created_at",">","2016-11-08T21:38:24.1
},
...
{
- "href": "/collections/962eh-4zz18-37i1tfl5de5ild9",
"kind": "arvados#collection",
"etag": "2fa07dx52lux8wa1loehwyrc5",
"uuid": "962eh-4zz18-37i1tfl5de5ild9",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-11-08T21:38:46.717798000Z",
- "modified_by_client_uuid": null,
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-11-08T21:38:46.717409000Z",
"name": null,
@@ -338,13 +328,11 @@ $ curl -v -X PUT --data-urlencode 'collection={"name":"rna.SRR948778.bam"}' -H "
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
- "href": "/collections/962eh-4zz18-xi32mpz2621o8km",
"kind": "arvados#collection",
"etag": "51509hhxo9qqjxqewnoz1b7og",
"uuid": "962eh-4zz18-xi32mpz2621o8km",
"owner_uuid": "962eh-tpzed-000000000000000",
"created_at": "2016-10-27T14:47:43.792587000Z",
- "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
"modified_by_user_uuid": "962eh-tpzed-000000000000000",
"modified_at": "2016-10-28T19:15:16.137814000Z",
"name": "rna.SRR948778.bam",
diff --git a/doc/api/resources.html.textile.liquid b/doc/api/resources.html.textile.liquid
index 2c4491f621..12ef14264f 100644
--- a/doc/api/resources.html.textile.liquid
+++ b/doc/api/resources.html.textile.liquid
@@ -11,19 +11,19 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This page describes the common attributes of Arvados resources.
+This page describes the common attributes shared by most or all Arvados resources.
h2(#resource). Resource
table(table table-bordered table-condensed).
|_. Attribute |_. Type |_. Description |_. Example|
-|uuid|string|universally unique object identifier, set on @create@|@mk2qn-4zz18-w3anr2hk2wgfpuo@|
-|owner_uuid|string|UUID of owner (must be a User or Group), set on @create@, controls who may access the resource, ownership may be changed explicitly with @update@, see "permission model":{{site.baseurl}}/api/permission-model.html for details.|@mk2qn-tpzed-a4lcehql0dv2u25@|
-|created_at|datetime|When resource was created, set on @create@|@2013-01-21T22:17:39Z@|
-|modified_by_client_uuid|string|API client software which most recently modified the resource, set on @create@ and @update@|@mk2qn-ozdt8-vq8l5qkzj7pr7h7@|
-|modified_by_user_uuid|string|Authenticated user, on whose behalf the client was acting when modifying the resource, set on @create@ and @update@|@mk2qn-tpzed-a4lcehql0dv2u25@|
-|modified_at|datetime|When resource was last modified, set on @create@ and @update@|@2013-01-25T22:29:32Z@|
-|href|string|a URL that can be used to address this resource||
+|uuid|string|universally unique object identifier. Set on @create@.|@mk2qn-4zz18-w3anr2hk2wgfpuo@|
+|owner_uuid|string|UUID of owner (must be a User or Group), set on @create@. Controls who may access the resource. Ownership may be changed explicitly with @update@, see "permission model":{{site.baseurl}}/api/permission-model.html for details.|@mk2qn-tpzed-a4lcehql0dv2u25@|
+|name|string|Human-assigned name. Not present on all object types, check individual API page. Uniqueness constraint varys by object type.||
+|description|string|Free text description of the object. Not present on all object types, check individual API page. May be HTML formatted, "see below for valid HTML tags and attributes":#descriptions .||
+|created_at|datetime|When resource was created. Set on @create@.|@2013-01-21T22:17:39Z@|
+|modified_at|datetime|When resource was last modified. Set on @create@ and @update@.|@2013-01-25T22:29:32Z@|
+|modified_by_user_uuid|string|The owner of the API token used to authenticate the @create@ or @update@ request.|@mk2qn-tpzed-a4lcehql0dv2u25@|
|kind|string|@arvados#{resource_type}@|@arvados#collection@|
|etag|string|The ETag[1] of the resource|@1xlmizzjq7wro3dlb2dirf505@|
@@ -35,6 +35,10 @@ Each object is assigned a UUID. This has the format @aaaaa-bbbbb-cccccccccccccc
# The second field (@bbbbb@ in the example) is the object type.
# The third field (@ccccccccccccccc@ in the example) uniquely identifies the object.
+h2(#descriptions). Descriptions
+
+{% include 'html_tags' %}
+
h2. Timestamps
All Arvados timestamps follow ISO 8601 datetime format with fractional seconds (microsecond precision). All timestamps are UTC. Date format: @YYYY-mm-ddTHH:MM:SS.SSSSZ@ example date: @2016-11-08T21:38:24.124834000Z@.
diff --git a/doc/api/tokens.html.textile.liquid b/doc/api/tokens.html.textile.liquid
index 99c5f58a21..edcc8d5c86 100644
--- a/doc/api/tokens.html.textile.liquid
+++ b/doc/api/tokens.html.textile.liquid
@@ -53,18 +53,6 @@ h2. Creating tokens via the API
The browser login method above issues a new token. Using that token, it is possible to make API calls to create additional tokens. To do so, use the @create@ method of the "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html resource.
-h2. Trusted API clients
-
-The "api_clients":{{site.baseurl}}/api/methods/api_clients.html resource determines if web applications that have gone through the browser login flow may create or list API tokens.
-
-After the user has authenticated, but before an authorization token is issued and browser redirect sent (sending the browser back to the @return_to@ login page bearing @api_token@), the server strips the path and query portion from @return_to@ to get @url_prefix@. The @url_prefix@ is used to find or create an ApiClient object. The newly issued API client authorization (API token) is associated with this ApiClient object.
-
-API clients may be marked as "trusted" by making an API call to create or update an "api_clients":{{site.baseurl}}/api/methods/api_clients.html resource and set the @is_trusted@ flag to @true@. An authorization token associated with a "trusted" client is permitted to list authorization tokens on "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html .
-
-A authorization token which is not associated with a trusted client may only use the @current@ method to query its own api_client_authorization object. The "untrusted" token is forbidden performing any other operations on API client authorizations, such as listing other authorizations or creating new authorizations.
-
-Authorization tokens which are not issued via the browser login flow (created directly via the API) inherit the api client of the token used to create them. They will always be "trusted" because untrusted API clients cannot create tokens.
-
h2(#scopes). Scopes
Scopes can restrict a token so it may only access certain resources. This is in addition to normal permission checks for the user associated with the token.
diff --git a/doc/architecture/Arvados_arch.odg b/doc/architecture/Arvados_arch.odg
index 03b9f3d353..f6eeb09ea8 100644
Binary files a/doc/architecture/Arvados_arch.odg and b/doc/architecture/Arvados_arch.odg differ
diff --git a/doc/architecture/index.html.textile.liquid b/doc/architecture/index.html.textile.liquid
index f5405c16e1..2112c0f92a 100644
--- a/doc/architecture/index.html.textile.liquid
+++ b/doc/architecture/index.html.textile.liquid
@@ -10,34 +10,59 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
+# "Services":#Services
+# "Arvados-server":#Arvados-server
+# "SDK":#SDK
+# "Tools":#Tools
+# "Arvados-client":#Arvados-client
+
!(full-width){{site.baseurl}}/images/Arvados_arch.svg!
-h3. Services
+h3(#Services). Services
-Located in @arvados/services@.
+Located in @arvados/services@. Many services have been incorporated into @arvados-server@, see below.
table(table table-bordered table-condensed).
|_. Component|_. Description|
-|api|The API server is the core of Arvados. It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.|
-|arvados-git-httpd|Provides a git+http interface to Arvados-managed git repositories, with permissions and authentication based on an Arvados API token.|
-|arvados-dispatch-cloud|Provide elastic computing by creating and destroying cloud based virtual machines on compute demand.|
+|api|Along with Controller, the API server is the core of Arvados. It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.|
|crunch-dispatch-local|Get compute requests submitted to the API server and execute them locally.|
-|crunch-dispatch-slurm|Get compute requests submitted to the API server and submit them to slurm.|
-|crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.|
|dockercleaner|Daemon for cleaning up Docker containers and images.|
-|fuse|Filesystem in USErspace (FUSE) filesystem driver for Keep.|
-|health|Health check proxy, contacts configured Arvados services at their health check endpoints and reports results.|
+|fuse|Filesystem in Userspace (FUSE) enabling users to mount Keep collections as a filesystem.|
+|login-sync|Synchronize virtual machine users with Arvados users and permissions.|
+|workbench2|Web application providing user interface to Arvados services.|
+
+h3(#Arvados-server). Arvados-server
+
+Located in @cmd/arvados-server@. It consists of a single @arvados-server@ binary with a number of different subcommands. Although the binary itself is monolithic, subcommands are each a standalone service and only handle requests for that specific service, i.e. a @arvados-server controller@ process will not respond to requests intended for a @arvados-server keep-web@.
+
+table(table table-bordered table-condensed).
+|_. Subcommand|_. Description |
+|boot|Boot an Arvados cluster from source, used by automated testing.|
+|check|Contact the a health check endpoint on services and print a report.|
+|cloudtest|Diagnostic tool which attempts to start a cloud instance using the current settings in the config file.|
+|config-check|Check that the config file is valid.|
+|config-defaults|Dump the default config options.|
+|config-dump|Dump the active config options that would be used by the other @arvados-server@ commands.|
+|controller|Controller works with the API server to make up the core of Arvados. It intercepts requests and implements additional features such as federation.|
+|crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.|
+|crunchstat|Run a program and collect resource usage stats using cgroups.|
+|dispatch-cloud|Get compute requests submitted to the API server and schedule them on elastic cloud compute, creating and destroying cloud based virtual machines on demand.|
+|dispatch-lsf|Get compute requests submitted to the API server and submit them to LSF HPC scheduler.|
+|dispatch-slurm|Get compute requests submitted to the API server and submit them to SLURM HPC scheduler.|
+|health|Service that aggregates the other health check results to provide a single cluster-wide health status.|
+|install|Install development dependencies to be able to build and run Arvados from source.|
+|init|Create an initial configuration file for a new cluster and perform database setup.|
|keep-balance|Perform storage utilization reporting, optimization and garbage collection. Moves data blocks to their optimum location, ensures correct replication and storage class, and trashes unreferenced blocks.|
+|keep-web|Provides high-level to files in collections as either a WebDAV or S3-compatible API endpoint.|
|keepproxy|Provides low-level access to keepstore services (block-level data access) for clients outside the internal (private) network.|
|keepstore|Provides access to underlying storage (filesystem or object storage such as Amazon S3 or Azure Blob) with Arvados permissions.|
-|keep-web|Provides high-level WebDAV access to collections (file-level data access).|
-|login-sync|Synchronize virtual machine users with Arvados users and permissions.|
-|arvados-ws|Publishes API server change events over websockets.|
-|workbench|Web application providing user interface to Arvados services.|
+|recover-collection|Recovers deleted collections. Recovery is possible when the collection's manifest is still available and all of its data blocks are still available or recoverable.|
+|workbench2|Serve the HTML/Javascript for the single-page Workbench application.|
+|ws|Publishes API server change events over websockets.|
-h3. Tools
+h3(#SDK). SDK
-The @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@, the rest are located in @arvados/tools@.
+The @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@.
table(table table-bordered table-condensed).
|_. Component|_. Description |
@@ -46,14 +71,36 @@ table(table table-bordered table-condensed).
|arv-get|Get files from a collection.|
|arv-keepdocker|Upload Docker images from local Docker daemon to Keep.|
|arv-ls|List files in a collection|
-|arv-migrate-docker19|Migrate Docker images in Keep from v1 format (Docker 1.9 or earlier) to v2 format (Docker 1.10 or later)|
-|arv-normalize|Read manifest text on stdin and produce normalized manifest text on stdout.|
|arv-put|Upload files to a collection.|
|arv-ws|Print events from Arvados websocket event source.|
+
+h3(#Tools). Tools
+
+Located in @arvados/tools@.
+
+table(table table-bordered table-condensed).
+|_. Component|_. Description |
|arvbash|Helpful @bash@ macros for using Arvados at the command line.|
|arvbox|Dockerized Arvados environment for development and testing.|
+|cluster-activity|Generate a HTML and/or CSV report of cluster activity over a time period.|
|crunchstat-summary|Read execution metrics (cpu %, ram, network, etc) collected from a compute container and produce a report.|
|keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.|
|keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.|
|keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.|
-|sync-groups|Take a CSV file listing with (group, user, permission) records and synchronize membership in Arvados groups.|
+|sync-groups|Takes a CSV file listing with rows in the form (group, user, permission) records and synchronize membership in Arvados groups.|
+|sync-users|Takes a CSV file listing with rows in the form (email, first name, last name, active, admin) and synchronize Arvados users.|
+|user-activity|Generate a text report of user activity over a time period.|
+
+h3(#Arvados-client). Arvados-client
+
+Located in @cmd/arvados-client@. It consists of a single @arvados-client@ binary with a number of different subcommands.
+
+table(table table-bordered table-condensed).
+|_. Subcommand|_. Description |
+|connect-ssh|Connects stdin/stdout to a container's gateway server. It is intended to be invoked with OpenSSH client's ProxyCommand config.|
+|deduplication-report|Analyzes the overlap in blocks used by 2 or more collections. It prints a deduplication report that shows the nominal space used by the collections, as well as the actual size and the amount of space that is saved by Keep's deduplication.|
+|diagnostics|Perform cluster diagnostics to check that all the services are available and responding normally to requests.|
+|logs|Prints live streaming logs for a container.|
+|mount|Alternate Keep FUSE mount written in Go.|
+|shell|Connects the terminal to an interactive shell on a running container.|
+|sudo|Runs another command using API connection info and SystemRootToken from the system config file instead of the caller's environment vars.|
diff --git a/doc/images/Arvados_arch.svg b/doc/images/Arvados_arch.svg
index 00a4e07d37..a490f0439e 100644
--- a/doc/images/Arvados_arch.svg
+++ b/doc/images/Arvados_arch.svg
@@ -44,17 +44,18 @@
-
+
+
-
+
@@ -71,6 +72,7 @@
+
@@ -79,7 +81,6 @@
-
@@ -88,7 +89,7 @@
-
+
@@ -122,7 +123,6 @@
-
@@ -136,352 +136,287 @@
-
-
+
+
+
+
-
+
-
-
+
+
+
-
+
-
-
+
+
+
-
+
-
-
+
+
+
-
+
-
-
+
+
+
+
+
+
+
+
-
+
-
-
+
+
+
+ Websocketsservice
-
+
-
-
+
+
+
+ Cloud or HPC dispatcher
-
+
-
-
+
+ User
-
-
+
+
-
+
-
-
+
+ External facing components
-
-
-
-
-
-
-
-
+
+ Internalcomponents (private network)
-
-
-
- keepproxy,keep-web
+
+
+
-
-
-
- arvados-git-httpd
+
+
+
-
-
-
- arv-ws
+
+
+
+ keepstoreContent-addressedobject storage
-
-
-
+
-
-
-
+
+
+
-
-
-
- keepstore
+
+
+
-
-
-
+
+
+
+ Elastic compute nodes
-
-
-
+
+ High level overview of Arvados components and how they interact
-
-
-
- compute0...
+
+
+
+ Web Workbench, CLI client tools
-
+
-
-
+
+
+
+
+
+ Storage backends(filesystem, S3)
-
+
-
-
-
- Cloud or HPC dispatcher
+
+
+
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
-
+
-
-
-
-
-
- git repos
+
+
+
-
+
-
- User
+
+
+
+
-
- Storage access
+
+
+
+
+
+ Postgres DB
-
+
-
- External facing services
+
+
+
-
- InternalServices (private network)
+
+
+
-
- Publish events
+
+
+
+ keep-web& keepproxy
-
- Storage metadata,Compute jobs,Permissions
+
+
+
-
- Content-addressed object storage
+
+
+
+ API Server and Controller
-
+
-
- Elastic compute nodes
+
+ Arrows represent outgoing connections between components.Bidirectional arrows mean that either component may initiate a connection depending on the operation.
-
+
-
- An Arvados cluster From 30000 feet
+
+
+
-
+
-
-
-
-
-
- Postgres db
+
+
+
-
-
+
+
+
-
+
-
-
-
- API
+
+
+
-
+
-
-
-
- Web Workbench,CLI client tools
+
+
+
-
-
-
-
-
-
-
-
-
-
-
- Storage backend(filesystem, S3)
+
+
+
diff --git a/doc/images/add-new-repository.png b/doc/images/add-new-repository.png
deleted file mode 100644
index d62a9869a2..0000000000
Binary files a/doc/images/add-new-repository.png and /dev/null differ
diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid
index 20e1c48eee..8a43160c5a 100644
--- a/doc/install/arvbox.html.textile.liquid
+++ b/doc/install/arvbox.html.textile.liquid
@@ -28,7 +28,7 @@ $ ./arvbox start localdemo
Arvados-in-a-box starting
-Waiting for workbench2 websockets workbench webshell keep-web controller keepproxy api keepstore1 arv-git-httpd keepstore0 sdk vm ...
+Waiting for workbench2 websockets workbench webshell keep-web controller keepproxy api keepstore1 keepstore0 sdk vm ...
...
@@ -152,11 +152,6 @@ h3. ARVADOS_ROOT
The root directory of the Arvados source tree
default: $ARVBOX_DATA/arvados
-h3. ARVADOS_DEV_ROOT
-
-The root directory of the Arvados-dev source tree
-default: $ARVBOX_DATA/arvados-dev
-
h3. ARVBOX_PUBLISH_IP
The IP address on which to publish services when running in public configuration. Overrides default detection of the host's IP address.
diff --git a/doc/install/automatic.html.textile.liquid b/doc/install/automatic.html.textile.liquid
index 398ebc20e0..01d2b161d7 100644
--- a/doc/install/automatic.html.textile.liquid
+++ b/doc/install/automatic.html.textile.liquid
@@ -20,7 +20,7 @@ A single-node installation supports all Arvados functionality at small scale. Su
h2. Prerequisites
You will need:
-* a server host running Debian 10 (buster) or Debian 11 (bullseye).
+* a server host running Debian 11 (bullseye) or Debian 12 (bookworm).
* a unique 5-character ID like @x9999@ for your cluster (first character should be @[a-w]@ for a long-lived / production cluster; all characters are @[a-z0-9]@).
* a DNS name like @x9999.example.com@ that resolves to your server host (or a load balancer / proxy that passes HTTP requests on port 80[1] and HTTPS requests on ports 443 and 4440-4460 through to the same port on your server host).
* a firewall setup that allows incoming connections to ports 80[1], 443, and 4440-4460.
@@ -49,26 +49,31 @@ Arvados needs a login backend. To get started quickly, add a user account on you
h2. Initialize the cluster
-
-# echo > /etc/apt/sources.list.d/arvados.list "deb http://apt.arvados.org/$(lsb_release -sc) $(lsb_release -sc) main"
-# apt update
-# apt install arvados-server-easy
-# arvados-server init -cluster-id x9999 -domain x9999.example.com -tls acme -login pam
-
+{% assign packages_to_install = "arvados-server-easy" %}
+{% include 'setup_debian_repo' %}
+
+Then initialize your cluster:
+
+
+# arvados-server init -cluster-id x9999 -domain x9999.example.com -tls acme -login pam
+
+
When the "init" command is finished, navigate to the link shown in the terminal (e.g., @https://x9999.example.com/@) and log in with the account you created above.
Activate your new Arvados user account. Copy the UUID (looks like @x9999-tpzed-xxxxxxxxxxxxxxx@) from your browser's location bar and run:
-
-# arv sudo user setup --uuid x9999-tpzed-xxxxxxxxxxxxxxx
-
+
+# arv sudo user setup --uuid x9999-tpzed-xxxxxxxxxxxxxxx
+
+
Run the diagnostics tool to ensure everything is working.
-
-# arv sudo diagnostics
-
+
+# arv sudo diagnostics
+
+
h2. Customize the cluster
diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
index 31ad994f0b..d602029045 100644
--- a/doc/install/configure-s3-object-storage.html.textile.liquid
+++ b/doc/install/configure-s3-object-storage.html.textile.liquid
@@ -38,16 +38,9 @@ h2(#example). Configuration example
# Bucket name.
Bucket: example-bucket-name
- # IAM role name to use when retrieving credentials from
- # instance metadata. It can be omitted, in which case the
- # role name itself will be retrieved from instance metadata
- # -- but setting it explicitly may protect you from using
- # the wrong credentials in the event of an
- # installation/configuration error.
- IAMRole: ""
-
- # If you are not using an IAM role for authentication,
- # specify access credentials here instead.
+ # Optionally, you can specify S3 access credentials here.
+ # If these are left blank, IAM role credentials will be
+ # retrieved from instance metadata (IMDSv2).
AccessKeyID: ""
SecretAccessKey: ""
@@ -70,6 +63,13 @@ h2(#example). Configuration example
# might be needed for other S3-compatible services.
V2Signature: false
+ # Use path-style requests instead of the default
+ # virtual-hosted-style requests. This might be needed for
+ # S3-compatible services other than AWS. If using AWS, see
+ # https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access
+ # for deprecation information.
+ UsePathStyle: false
+
# By default keepstore stores data using the MD5 checksum
# (32 hexadecimal characters) as the object name, e.g.,
# "0123456abc...". Setting PrefixLength to 3 changes this
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index c20e4855ad..559b34b50a 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -13,249 +13,247 @@ SPDX-License-Identifier: CC-BY-SA-3.0
@arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.
{% include 'notebox_end' %}
-# "Introduction":#introduction
-# "Install Packer":#install-packer
-# "Create an SSH keypair":#sshkeypair
-# "Compute image requirements":#requirements
-# "The build script":#building
-# "DNS resolution":#dns-resolution
-# "NVIDIA GPU support":#nvidia
-# "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration
-# "Build an AWS image":#aws
-## "Autoscaling compute node scratch space":#aws-ebs-autoscaler
-# "Build an Azure image":#azure
+p(#introduction). This page describes how to build a compute node image that can be used to run containers dispatched by Arvados in the cloud.
-h2(#introduction). Introduction
+# "Prerequisites":#prerequisites
+## "Check your distribution":#check-distro
+## "Create and configure an SSH keypair":#sshkeypair
+## "Get the Arvados source":#git-clone
+## "Install Ansible":#install-ansible
+## "Install Packer and the Ansible plugin":#install-packer
+# "Fully automated build with Packer and Ansible":#building
+## "Write Ansible settings for the compute node":#ansible-variables
+## "Set up Packer for your cloud":#packer-variables
+### "AWS":#aws-variables
+### "Azure":#azure-variables
+## "Run Packer":#run-packer
+# "Partially automated build with Ansible":#ansible-build
+## "Write Ansible settings for the compute node":#ansible-variables-standalone
+## "Write an Ansible inventory":#ansible-inventory
+## "Run Ansible":#run-ansible
+# "Manual build":#requirements
-This page describes how to build a compute node image that can be used to run containers dispatched by Arvados in the cloud.
+h2(#prerequisites). Prerequisites
-Packer templates for AWS and Azure are provided with Arvados. To use them, the following are needed:
+h3(#check-distro). Check your distribution
-* "Packer":https://www.packer.io/
-* credentials for your cloud account
-* configuration details for your cloud account
+These instructions work on all of our supported distributions *except* Ubuntu 20.04 "focal." Ubuntu 20.04 includes Python 3.8, which is too old to run Ansible 8 and our Ansible playbooks. If your cluster runs Ubuntu 20.04, you will need to use a system with a newer version of Python to build the compute node image. The system where you build the compute node image only needs to be able to communicate with your cloud provider. It does not need to be part of the Arvados cluster or have any Arvados client tools installed. Your Arvados cluster, and the compute node image you build, can all still be based on Ubuntu 20.04.
-h2(#install-packer). Install Packer
+h3(#sshkeypair). Create and configure an SSH keypair
-"Download Packer here":https://developer.hashicorp.com/packer/downloads
-
-h2(#sshkeypair). Create a SSH keypair
-
-@arvados-dispatch-cloud@ communicates with the compute nodes via SSH. To do this securely, a SSH keypair is needed.
-
-Generate a SSH keypair with no passphrase. The private key needs to be stored in the cluster configuration file (see @Containers/DispatchPrivateKey@) for use by @arvados-dispatch-cloud@, as described in the "next section":install-dispatch-cloud.html#update-config. The public key will be baked into the compute node images, see the cloud-specific documentation below.
+@arvados-dispatch-cloud@ communicates with the compute nodes via SSH. To do this securely, an SSH keypair is needed. The key type must be RSA or ED25519 to work with Amazon EC2. Generate an ED25519 keypair with no passphrase:
-~$ ssh-keygen -N '' -f ~/.ssh/id_dispatcher
-Generating public/private rsa key pair.
+~$ ssh-keygen -t ed25519 -N '' -f ~/.ssh/id_dispatcher
+Generating public/private ed25519 key pair.
Your identification has been saved in /home/user/.ssh/id_dispatcher.
Your public key has been saved in /home/user/.ssh/id_dispatcher.pub.
The key fingerprint is:
[...]
-~$ cat ~/.ssh/id_dispatcher
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
-ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
-...
-oFyAjVoexx0RBcH6BveTfQtJKbktP1qBO4mXo2dP0cacuZEtlAqW9Eb06Pvaw/D9
-foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
------END RSA PRIVATE KEY-----
-h2(#requirements). Compute image requirements
-
-Arvados comes with a build script to automate the creation of a suitable compute node image (see "The build script":#building below). It is provided as a convenience. It is also possible to create a compute node image via other means. These are the requirements:
-
-* for AWS: the SSH public key for @arvados-dispatch-cloud@ (the one that corresponds with @Containers.DispatchPrivateKey@ in the Arvados config file) needs to go into ~/.ssh/authorized_keys for the SSH user you want @arvados-dispatch-cloud@ to use (cf. @CloudVMs.DriverParameters.AdminUsername@ in the Arvados config file) and that user needs to be able to sudo without password prompt, unless you use `root` in which case sudo is not used.
-* for Azure: @arvados-dispatch-cloud@ automatically extracts the SSH public key from the value of @Containers.DispatchPrivateKey@ and uses an API call to create the user specified in @CloudVMs.DriverParameters.AdminUsername@ with that SSH public key and password-less sudo enabled.
-* SSH needs to be running and reachable by @arvados-dispatch-cloud@ on port 22 (or a custom port, see @CloudVMS.SSHPort@ to in the Arvados config file)
-* the @python3-arvados-fuse@ package needs to be installed
-* @Docker@ or @Singularity@ needs to be installed (cf. @Containers.RuntimeEngine@ in the Arvados config file).
-* all available scratch space should be made available under `/tmp`.
-
-h2(#building). The build script
-
-The necessary files are located in the @arvados/tools/compute-images@ directory in the source tree. A build script is provided to generate the image. The @--help@ argument lists all available options:
-
-~$ ./build.sh --help
-build.sh: Build cloud images for arvados-dispatch-cloud
-
-Syntax:
- build.sh [options]
-
-Options:
-
- --json-file <path>
- Path to the packer json file (required)
- --arvados-cluster-id <xxxxx>
- The ID of the Arvados cluster, e.g. zzzzz(required)
- --aws-profile <profile>
- AWS profile to use (valid profile from ~/.aws/config (optional)
- --aws-secrets-file <path>
- AWS secrets file which will be sourced from this script (optional)
- When building for AWS, either an AWS profile or an AWS secrets file
- must be provided.
- --aws-source-ami <ami-xxxxxxxxxxxxxxxxx>
- The AMI to use as base for building the images (required if building for AWS)
- --aws-region <region> (default: us-east-1)
- The AWS region to use for building the images
- --aws-vpc-id <vpc-id>
- VPC id for AWS, if not specified packer will derive from the subnet id or pick the default one.
- --aws-subnet-id <subnet-xxxxxxxxxxxxxxxxx>
- Subnet id for AWS, if not specified packer will pick the default one for the VPC.
- --aws-ebs-autoscale
- Install the AWS EBS autoscaler daemon (default: do not install the AWS EBS autoscaler).
- --aws-associate-public-ip <true|false>
- Associate a public IP address with the node used for building the compute image.
- Required when the machine running packer can not reach the node used for building
- the compute image via its private IP. (default: true if building for AWS)
- Note: if the subnet has "Auto-assign public IPv4 address" enabled, disabling this
- flag will have no effect.
- --aws-ena-support <true|false>
- Enable enhanced networking (default: true if building for AWS)
- --gcp-project-id <project-id>
- GCP project id (required if building for GCP)
- --gcp-account-file <path>
- GCP account file (required if building for GCP)
- --gcp-zone <zone> (default: us-central1-f)
- GCP zone
- --azure-secrets-file <patch>
- Azure secrets file which will be sourced from this script (required if building for Azure)
- --azure-resource-group <resouce-group>
- Azure resource group (required if building for Azure)
- --azure-location <location>
- Azure location, e.g. centralus, eastus, westeurope (required if building for Azure)
- --azure-sku <sku> (required if building for Azure, e.g. 16.04-LTS)
- Azure SKU image to use
- --ssh_user <user> (default: packer)
- The user packer will use to log into the image
- --resolver <resolver_IP>
- The dns resolver for the machine (default: host's network provided)
- --reposuffix <suffix>
- Set this to "-dev" to track the unstable/dev Arvados repositories
- --public-key-file <path>
- Path to the public key file that a-d-c will use to log into the compute node (required)
- --mksquashfs-mem (default: 256M)
- Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
- --nvidia-gpu-support
- Install all the necessary tooling for Nvidia GPU support (default: do not install Nvidia GPU support)
- --debug
- Output debug information (default: no debug output is printed)
-
+After you do this, the contents of the private key in @~/.ssh/id_dispatcher@ need to be stored in your "cluster configuration file":{{ site.baseurl }}/admin/config.html under @Containers.DispatchPrivateKey@.
-h2(#dns-resolution). DNS resolution
+The public key at @~/.ssh/id_dispatcher.pub@ will need to be authorized to access instances booted from the image. Keep this file; our Ansible playbook will read it to set this up for you.
-Compute nodes must be able to resolve the hostnames of the API server and any keepstore servers to your internal IP addresses. If you are on AWS and using Route 53 for your DNS, the default resolver configuration can be used with no extra options.
+h3(#git-clone). Get the Arvados source
-You can also run your own internal DNS resolver. In that case, the IP address of the resolver should be passed as the value for the @--resolver@ argument to "the build script":#building.
+Compute node templates are only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you:
-As a third option, the services could be hardcoded into an @/etc/hosts@ file. For example:
+{% include 'branchname' %}
+
+~$ git clone --depth=1 --branch={{ branchname }} git://git.arvados.org/arvados.git ~/arvados
+
+
-10.20.30.40 ClusterID.example.com
-10.20.30.41 keep1.ClusterID.example.com
-10.20.30.42 keep2.ClusterID.example.com
-
+h3(#install-ansible). Install Ansible
-Adding these lines to the @/etc/hosts@ file in the compute node image could be done with a small change to the Packer template and the @scripts/base.sh@ script, which will be left as an exercise for the reader.
+{% include 'install_ansible' header_level: 'h4' %}
-h2(#nvidia). NVIDIA GPU support
+h3(#install-packer). Install Packer and the Ansible plugin
-If you plan on using instance types with NVIDIA GPUs, add @--nvidia-gpu-support@ to the build command line. Arvados uses the same compute image for both GPU and non-GPU instance types. The GPU tooling is ignored when using the image with a non-GPU instance type.
+We provide Packer templates that can automatically create a compute instance, configure it with Ansible, shut it down, and create a cloud image from the result. "Install Packer following their instructions.":https://developer.hashicorp.com/packer/docs/install After you do, install Packer's Ansible provisioner by running:
-{% assign show_docker_warning = true %}
+
+~$ packer plugins install github.com/hashicorp/ansible
+
+
-{% include 'singularity_mksquashfs_configuration' %}
+h2(#building). Fully automated build with Packer and Ansible
-The desired amount of memory to make available for @mksquashfs@ can be configured in an argument to "the build script":#building. It defaults to @256M@.
+After you have both tools installed, you can configure both with information about your Arvados cluster and cloud environment and then run a fully automated build.
-h2(#aws). Build an AWS image
+h3(#ansible-variables). Write Ansible settings for the compute node
-For @ClusterID@, fill in your cluster ID.
+In the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments.
-@AWSProfile@ is the name of an AWS profile in your "credentials file":https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file (@~/.aws/credentials@) listing the @aws_access_key_id@ and @aws_secret_access_key@ to use.
+h3(#packer-variables). Set up Packer for your cloud
-The @AMI@ is the identifier for the base image to be used. Current AMIs are maintained by "Debian":https://wiki.debian.org/Cloud/AmazonEC2Image/Buster and "Ubuntu":https://cloud-images.ubuntu.com/locator/ec2/.
+You need to provide different configuration to Packer depending on which cloud you're deploying Arvados in.
-The @VPC@ and @Subnet@ should be configured for where you want the compute image to be generated and stored.
+h4(#aws-variables). AWS
-@ArvadosDispatchCloudPublicKeyPath@ should be replaced with the path to the ssh *public* key file generated in "Create an SSH keypair":#sshkeypair, above.
+Install Packer's AWS builder by running:
-~$ ./build.sh --json-file arvados-images-aws.json \
- --arvados-cluster-id ClusterID \
- --aws-profile AWSProfile \
- --aws-source-ami AMI \
- --aws-vpc-id VPC \
- --aws-subnet-id Subnet \
- --ssh_user admin \
- --public-key-file ArvadosDispatchCloudPublicKeyPath
-
-
+
+~$ packer plugins install github.com/hashicorp/amazon
+
+
+In the @tools/compute-images@ directory of your Arvados source checkout, copy @aws_config.example.json@ to @aws_config.json@. Fill in values for the configuration settings as follows:
-h3(#aws-ebs-autoscaler). Autoscaling compute node scratch space
-
-Arvados supports "AWS EBS autoscaler":https://github.com/awslabs/amazon-ebs-autoscale. This feature automatically expands the scratch space on the compute node on demand by 200 GB at a time, up to 5 TB.
-
-If you want to add the daemon in your images, add the @--aws-ebs-autoscale@ flag to the "the build script":#building.
-
-The AWS EBS autoscaler daemon will be installed with this configuration:
-
-{
- "mountpoint": "/tmp",
- "filesystem": "lvm.ext4",
- "lvm": {
- "volume_group": "autoscale_vg",
- "logical_volume": "autoscale_lv"
- },
- "volume": {
- "type": "gp3",
- "iops": 3000,
- "encrypted": 1
- },
- "detection_interval": 2,
- "limits": {
- "max_ebs_volume_size": 1500,
- "max_logical_volume_size": 8000,
- "max_ebs_volume_count": 16
- },
- "logging": {
- "log_file": "/var/log/ebs-autoscale.log",
- "log_interval": 300
- }
-}
-
+* If you already have AWS credentials configured that Packer can use to create and manage an EC2 instance, set @aws_profile@ to the name of those credentials in your configuration. Otherwise, set @aws_access_key@ and @aws_secret_key@ with information from an API token with those permissions.
+* Set @aws_region@, @vpc_id@, and @subnet_id@ with identifiers for the network where Packer should create the EC2 instance.
+* Set @aws_source_ami@ to the AMI of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account that is used on that image.
+* Set @aws_volume_gb@ to the size of of the image you want to create in GB. The default 20 should be sufficient for most installs. You may increase this if you're using a custom source AMI with more software pre-installed.
+* Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration.
+* If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}"{{env `HOME`}}/ansible/bin/ansible-playbook"
{% endraw %}.
-Changing the ebs-autoscale configuration is left as an exercise for the reader.
+When you finish writing your configuration, "run Packer":#run-packer.
-This feature also requires a few Arvados configuration changes, described in "EBS Autoscale configuration":install-dispatch-cloud.html#aws-ebs-autoscaler.
+h4(#azure-variables). Azure
-h2(#azure). Build an Azure image
+{% comment %}
+FIXME: Incomplete
+{% endcomment %}
-~$ ./build.sh --json-file arvados-images-azure.json \
- --arvados-cluster-id ClusterID \
- --azure-resource-group ResourceGroup \
- --azure-location AzureRegion \
- --azure-sku AzureSKU \
- --azure-secrets-file AzureSecretsFilePath \
- --resolver ResolverIP \
- --public-key-file ArvadosDispatchCloudPublicKeyPath
-
-
+Install Packer's Azure builder by running:
-For @ClusterID@, fill in your cluster ID. The @ResourceGroup@ and @AzureRegion@ (e.g. 'eastus2') should be configured for where you want the compute image to be generated and stored. The @AzureSKU@ is the SKU of the base image to be used, e.g. '18.04-LTS' for Ubuntu 18.04.
+
+~$ packer plugins install github.com/hashicorp/azure
+
+
-@AzureSecretsFilePath@ should be replaced with the path to a shell script that loads the Azure secrets with sufficient permissions to create the image. The file would look like this:
+In the @tools/compute-images@ directory of your Arvados source checkout, copy @azure_config.example.json@ to @azure_config.json@. Fill in values for the configuration settings as follows:
-export ARM_CLIENT_ID=...
-export ARM_CLIENT_SECRET=...
-export ARM_SUBSCRIPTION_ID=...
-export ARM_TENANT_ID=...
+* The settings load credentials from Azure's standard environment variables. As long as you have these environment variables set in the shell before you run Packer, they will be loaded as normal. Alternatively, you can set them directly in the configuration file. These secrets can be generated from the Azure portal, or with the CLI using a command like:~$ az ad sp create-for-rbac --name Packer --password ...
+* Set @location@ and @resource_group@ with identifiers for where Packer should create the cloud instance.
+* Set @image_sku@ to the identifier of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account you want to use on that image.
+* Set @ssh_private_key_file@ to the path with the private key you generated earlier for the dispatcher to use. For example, {% raw %}"{{env `HOME`}}/.ssh/id_dispatcher"
{% endraw %}.
+* Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration.
+* If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}"{{env `HOME`}}/ansible/bin/ansible-playbook"
{% endraw %}.
-These secrets can be generated from the Azure portal, or with the cli using a command like this:
+When you finish writing your configuration, "run Packer":#run-packer.
-~$ az ad sp create-for-rbac --name Packer --password ...
-
+h3(#run-packer). Run Packer
+
+In the @tools/compute-images@ directory of your Arvados source checkout, run Packer with your configuration and the template appropriate for your cloud. For example, to build an image on AWS, run:
+
+
+arvados/tools/compute-images$ packer build -var-file=aws_config.json aws_template.json
+
+
+
+To build an image on Azure, replace both instances of *@aws@* with *@azure@*, and run that command.
+
+{% include 'notebox_begin_warning' %}
+If @packer build@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:
+
+
+TASK [Gathering Facts] *********************************************************
+fatal: [default]: FAILED! => {"msg": "failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\n\n"}
+
+PLAY RECAP *********************************************************************
+default : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
+
+
+
+This might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:
+
+
+$ export ANSIBLE_SCP_EXTRA_ARGS="'-O'"
+
+
+
+Then rerun your full @packer build@ command from the same shell.
+{% include 'notebox_end' %}
+
+If the build succeeds, it will report the identifier of your image at the end of the process. For example, when you build an AWS image, it will look like this:
+
+
+==> Builds finished. The artifacts of successful builds are:
+--> amazon-ebs: AMIs were created:
+us-east-1: ami-012345abcdef56789
+
+
+
+That identifier can now be set as @CloudVMs.ImageID@ in your cluster configuration. You do not need to run any other compute node build process on this page; continue to "installing the cloud dispatcher":install-dispatch-cloud.html.
+
+h2(#ansible-build). Partially automated build with Ansible
+
+If Arvados does not include a template for your cloud, or you do not have permission to run Packer, you can run the Ansible playbook by itself. This can set up a base Debian or Ubuntu system with all the software and configuration necessary to do Arvados compute work. After it's done, you can manually snapshot the node and create a cloud image from it.
+
+h3(#ansible-variables-standalone). Write Ansible settings for the compute node
+
+In the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments. Note that you *must set* @arvados_cluster_id@ in this file since you are not running Packer.
+
+h3(#ansible-inventory). Write an Ansible inventory
+
+The compute node playbook runs on a host named @default@. In the @tools/compute-images@ directory of your Arvados source checkout, write a file named @inventory.ini@ with information about how to connect to this node via SSH. It should be one line like this:
+
+
+# Example inventory.ini for an Arvados compute node
+default ansible_host=192.0.2.9 ansible_user=admin
+
+
+
+* @ansible_host@ can be the running node's hostname or IP address. You need to be able to reach this host from the system where you're running Ansible.
+* @ansible_user@ names the user account that Ansible should use for the SSH connection. It needs to have permission to use @sudo@ on the running node.
+
+You can add other Ansible configuration options like @ansible_port@ to your inventory if needed. Refer to the "Ansible inventory documentation":https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html for details.
+
+h3(#run-ansible). Run Ansible
+
+If you installed Ansible inside a virtualenv, activate that virtualenv now. Then, in the @tools/compute-images@ directory of your Arvados source checkout, run @ansible-playbook@ with your inventory and configuration:
+
+
+arvados/tools/compute-images$ ansible-playbook --ask-become-pass --inventory=inventory.ini --extra-vars=@host_config.yml ../ansible/build-compute-image.yml
+
+
+
+You'll be prompted with @BECOME password:@. Enter the password for the @ansible_user@ you defined in the inventory to use sudo on the running node.
+
+{% include 'notebox_begin_warning' %}
+If @ansible-playbook@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:
+
+
+TASK [Gathering Facts] *********************************************************
+fatal: [default]: FAILED! => {"msg": "failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\n\n"}
+
+PLAY RECAP *********************************************************************
+default : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
+
+
+
+This might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:
+
+
+$ export ANSIBLE_SCP_EXTRA_ARGS="'-O'"
+
+
+
+Then rerun your full @ansible-playbook@ command from the same shell.
+{% include 'notebox_end' %}
+
+If it succeeds, Ansible should report a "PLAY RECAP" with @failed=0@:
+
+
+PLAY RECAP *********************************************************************
+default : ok=41 changed=37 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0
+
+
+
+Your node is now ready to run Arvados compute work. You can snapshot the node, create an image from it, and set that image as @CloudVMs.ImageID@ in your Arvados cluster configuration. The details of that process are cloud-specific and out of scope for this documentation. You do not need to run any other compute node build process on this page; continue to "installing the cloud dispatcher":install-dispatch-cloud.html.
+
+h2(#requirements). Manual build
+
+If you cannot run Ansible, you can create a cloud instance, manually set it up to be a compute node, and then create an image from it. The details of this process depend on which distribution you use on the cloud instance and which cloud you use; all these variations are out of scope for this documentation. These are the requirements:
-@ArvadosDispatchCloudPublicKeyPath@ should be replaced with the path to the ssh *public* key file generated in "Create an SSH keypair":#sshkeypair, above.
+* Except on Azure, the SSH public key you generated previously must be an authorized key for the user that Crunch is configured to use. For example, if your cluster's @CloudVMs.DriverParameters.AdminUsername@ setting is *@crunch@*, then the dispatcher's public key should be listed in ~crunch/.ssh/authorized_keys
in the image. This user must also be allowed to use sudo without a password unless the user is @root@.
+ (On Azure, the dispatcher makes additional calls to automatically set up and authorize the user, making these steps unnecessary.)
+* SSH needs to be running and reachable by @arvados-dispatch-cloud@ on the port named by @CloudVMs.SSHPort@ in your cluster's configuration file (default 22).
+* Install the @python3-arvados-fuse@ package. Enable the @user_allow_other@ option in @/etc/fuse.conf@.
+* Install either "Docker":https://docs.docker.com/engine/install/ or "Singularity":https://docs.sylabs.io/guides/3.0/user-guide/installation.html as appropriate based on the @Containers.RuntimeEngine@ setting in your cluster's configuration file. If you install Docker, you may also want to install and set up the @arvados-docker-cleaner@ package to conserve space on long-running instances, but it's not strictly required.
+* All available scratch space should be made available under @/tmp@.
diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
index 579ec6e1b3..1fac5f6c0c 100644
--- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
@@ -76,7 +76,7 @@ Add or update the following portions of your cluster configuration file, @config
h3(#GPUsupport). NVIDIA GPU support
-To specify instance types with NVIDIA GPUs, "the compute image must be built with CUDA support":install-compute-node.html#nvidia , and you must include an additional @CUDA@ section:
+To specify instance types with NVIDIA GPUs, the compute image must be built with CUDA support (this means setting @arvados_compute_nvidia: true@ in @host_config.yml@ when "building the compute image":install-compute-node.html). You must include an additional @GPU@ section for each instance type that includes GPUs:
InstanceTypes:
@@ -86,14 +86,51 @@ To specify instance types with NVIDIA GPUs, "the compute image must be built wit
RAM: 16GiB
IncludedScratch: 125GB
Price: 0.56
- CUDA:
+ GPU:
+ Stack: "cuda"
DriverVersion: "11.4"
- HardwareCapability: "7.5"
+ HardwareTarget: "7.5"
DeviceCount: 1
+ VRAM: 16GiB
-The @DriverVersion@ is the version of the CUDA toolkit installed in your compute image (in X.Y format, do not include the patchlevel). The @HardwareCapability@ is the "CUDA compute capability of the GPUs available for this instance type":https://developer.nvidia.com/cuda-gpus. The @DeviceCount@ is the number of GPU cores available for this instance type.
+The @DriverVersion@ is the version of the CUDA toolkit installed in your compute image (in "X.Y" format, do not include the patchlevel).
+
+The @HardwareTarget@ is the "CUDA compute capability of the GPUs available for this instance type":https://developer.nvidia.com/cuda-gpus in "X.Y" format.
+
+The @DeviceCount@ is the number of GPU cores available for this instance type.
+
+@VRAM@ is the amount of VRAM available per GPU device.
+
+h3(#ROCmGPUsupport). AMD GPU support
+
+To specify instance types with AMD GPUs, the compute image must be built with ROCm support (currently, installing ROCm automatically is not supported by the Arvados compute image Ansible playbook, but can be added manually after the fact). You must include an additional @GPU@ section for each instance type that includes GPUs:
+
+
+ InstanceTypes:
+ g4dn:
+ ProviderType: g4da.xlarge
+ VCPUs: 4
+ RAM: 16GiB
+ IncludedScratch: 125GB
+ Price: 0.56
+ GPU:
+ Stack: "rocm"
+ DriverVersion: "6.2"
+ HardwareTarget: "gfx1100"
+ DeviceCount: 1
+ VRAM: 16GiB
+
+
+
+@DriverVersion@ is the version of the ROCm toolkit installed in your compute image (in "X.Y" format, do not include the patchlevel).
+
+@HardwareTarget@ (e.g. gfx1100) corresponds to the GPU architecture of the device. Use @rocminfo@ to determine your hardware target. See also "Accelerator and GPU hardware specifications":https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html (use the column "LLVM target name") and "LLVM AMDGPU backend documentation":https://llvm.org/docs/AMDGPUUsage.html .
+
+@DeviceCount@ is the number of GPU cores available for this instance type.
+
+@VRAM@ is the amount of VRAM available per GPU device.
h3(#aws-ebs-autoscaler). EBS Autoscale configuration
@@ -148,11 +185,9 @@ When @Containers.LocalKeepBlobBuffersPerVCPU@ is non-zero, the compute node will
If the AWS credentials for S3 access are configured in @config.yml@ (i.e. @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@), these credentials will be made available to the local Keepstore on the compute node to access S3 directly and no further configuration is necessary.
-Alternatively, if an IAM role is configured in @config.yml@ (i.e. @Volumes.DriverParameters.IAMRole@), the name of an instance profile that corresponds to this role ("often identical to the name of the IAM role":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile) must be configured in the @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter.
-
-*If you are also using EBS Autoscale feature, the role in IAMInstanceProfile must have both ec2 and s3 permissions.*
+If @config.yml@ does not have @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@ defined, Keepstore uses instance metadata to retrieve IAM role credentials. The @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter must be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). With this setup, @arvados-dispatch-cloud@ will attach the IAM role to the compute node as it is created. The instance profile name is "often identical to the name of the IAM role":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile.
-Finally, if @config.yml@ does not have @Volumes.DriverParameters.AccessKeyID@, @Volumes.DriverParameters.SecretAccessKey@ or @Volumes.DriverParameters.IAMRole@ defined, Keepstore uses the IAM role attached to the node, whatever it may be called. The @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter must then still be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). That way, @arvados-dispatch-cloud@ can attach the IAM role to the compute node as it is created.
+*If you are also using EBS Autoscale feature, the role in @IAMInstanceProfile@ must have both ec2 and s3 permissions.*
h3. Minimal configuration example for Amazon EC2
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
index 16af80d127..85d555991b 100644
--- a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
@@ -22,7 +22,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
h2(#introduction). Introduction
-This assumes you already have a Slurm cluster, and have set up all of your compute nodes with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html. Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
+This assumes you already have a Slurm cluster, and have set up all of your compute nodes with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html. Slurm packages are available on all distributions supported by Arvados. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@). It is not resource-intensive, so you can run it on the API server node.
diff --git a/doc/install/diagnostics.html.textile.liquid b/doc/install/diagnostics.html.textile.liquid
index 7d03e8b0d2..cf3e960f41 100644
--- a/doc/install/diagnostics.html.textile.liquid
+++ b/doc/install/diagnostics.html.textile.liquid
@@ -19,11 +19,8 @@ Depending on where you are running the installer, you need to provide @-internal
Here is an example of it in action:
-
-root@api:~$ apt-get install arvados-client
-root@api:~$ export ARVADOS_API_HOST=ClusterID.example.com
-root@api:~$ export ARVADOS_API_TOKEN=YourSytemRootTokenHere
-root@api:~$ arvados-client diagnostics -external-client
+
+$ ARVADOS_API_HOST=ClusterID.example.com ARVADOS_API_TOKEN=YourSystemTokenHere arvados-client diagnostics -external-client
INFO 10: getting discovery document from https://ClusterID.example.com/discovery/v1/apis/arvados/v1/rest
INFO 20: getting exported config from https://ClusterID.example.com/arvados/v1/config
INFO 30: getting current user record
@@ -55,4 +52,5 @@ INFO 160: running a container
INFO ... container request submitted, waiting up to 10m for container to run
INFO 9990: deleting temporary collection
INFO --- no errors ---
-
+
+
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
index 9b7a533407..7f287441b9 100644
--- a/doc/install/index.html.textile.liquid
+++ b/doc/install/index.html.textile.liquid
@@ -23,8 +23,8 @@ Arvados components can be installed and configured in a number of different ways
table(table table-bordered table-condensed).
||_. Setup difficulty|_. Arvados Evaluation|_. Development|_. Production Data Management|_. Production Workflows|
|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|yes|limited|no|no|
-|"Arados Installer":salt-single-host.html (single host)|Easy|yes|limited|limited|limited|
-|"Arados Installer":salt-multi-host.html (multi host)|Moderate|yes|yes|yes|yes|
+|"Single-host install":salt-single-host.html|Easy|yes|limited|limited|limited|
+|"Multi-host install":salt-multi-host.html|Moderate|yes|yes|yes|yes|
|"Manual installation":install-manual-prerequisites.html|Difficult|yes|yes|yes|yes|
|"Cluster Operation Subscription supported by Curii":https://curii.com|N/A ^1^|yes|yes|yes|yes|
diff --git a/doc/install/install-api-server.html.textile.liquid b/doc/install/install-api-server.html.textile.liquid
index 06f94a8a5f..a27aba12e8 100644
--- a/doc/install/install-api-server.html.textile.liquid
+++ b/doc/install/install-api-server.html.textile.liquid
@@ -28,9 +28,7 @@ Here is a simplified diagram showing the relationship between the core services.
h2(#dependencies). Install dependencies
# "Install PostgreSQL":install-postgresql.html
-# "Install Ruby and Bundler":ruby.html
# "Install nginx":nginx.html
-# "Install Phusion Passenger":https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/install_passenger_main.html
h2(#database-setup). Set up database
@@ -77,7 +75,7 @@ h3. PostgreSQL.Connection
-Replace the @$postgres_password@ placeholder with the password you generated during "database setup":#database-setup .
+Replace the @$postgres_password@ placeholder with the password you generated during "database setup":#database-setup.
h3. Services
@@ -91,6 +89,9 @@ h3. Services
# Does not have an ExternalURL
InternalURLs:
"http://localhost:8004": {}
+ ContainerWebServices:
+ # Does not have InternalURLs
+ ExternalURL: "https://*.containers.ClusterID.example.com"
@@ -100,7 +101,7 @@ The @Services@ section of the configuration helps Arvados components contact one
h2(#update-nginx). Update nginx configuration
-Use a text editor to create a new file @/etc/nginx/conf.d/arvados-api-and-controller.conf@ with the following configuration. Options that need attention are marked in red.
+Use a text editor to create a new file @/etc/nginx/conf.d/arvados-controller.conf@ with the following configuration. Options that need attention are marked in red.
proxy_http_version 1.1;
@@ -138,7 +139,28 @@ server {
# the request is reverse proxied to the upstream 'controller'
listen 443 ssl;
- server_name ClusterID.example.com;
+ server_name ClusterID.example.com
+ *.containers.ClusterID.example.com;
+
+ ## If a wildcard name like *.containers.ClusterID.example.com is not
+ ## available, and Services.ContainerWebServices.ExternalPortMin and
+ ## ExternalPortMax are configured instead, then the "listen" and
+ ## "server_name" directives should be adjusted accordingly. Example:
+ #
+ # listen 443 ssl;
+ # listen 2000-2999 ssl;
+ # server_name ClusterID.example.com
+ # containers.ClusterID.example.com;
+ #
+ ## The number of ports in the range (1000 in this example) should be
+ ## added to the worker_connections setting in the events section of
+ ## your Nginx configuration (default 512). If the system-supplied
+ ## RLIMIT_NOFILE value is low (some systems default to 1024), the
+ ## worker_rlimit_nofile setting in the main section should also be
+ ## increased by the same amount.
+ #
+ # events { worker_connections: 1512; }
+ # worker_rlimit_nofile: 2024;
ssl_certificate /YOUR/PATH/TO/cert.pem;
ssl_certificate_key /YOUR/PATH/TO/cert.key;
@@ -166,38 +188,42 @@ server {
proxy_set_header X-Real-IP $remote_addr;
}
}
+
+
-server {
- # This configures the Arvados API server. It is written using Ruby
- # on Rails and uses the Passenger application server.
+h2. Enable development repository
+
+Skip to the next section if you are installing on Debian or Ubuntu.
- listen localhost:8004;
- server_name localhost-api;
+On Red Hat, AlmaLinux, and Rocky Linux, the API server package depends on development headers available from a separate repository. The repository you need depends on which version of the distribution you're running. Run the command given for your distribution below:
- root /var/www/arvados-api/current/public;
- index index.html index.htm index.php;
+|_. Distribution and version|_. Command to enable repository|
+|Red Hat/AlmaLinux/Rocky Linux 9|@# dnf config-manager --set-enabled devel@|
+|Red Hat/AlmaLinux/Rocky Linux 8|@# dnf config-manager --set-enabled powertools@|
- passenger_enabled on;
+{% assign arvados_component = 'arvados-api-server arvados-controller' %}
- # If you are using RVM, uncomment the line below.
- # If you're using system ruby, leave it commented out.
- #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+{% include 'install_packages' %}
- # This value effectively limits the size of API objects users can
- # create, especially collections. If you change this, you should
- # also ensure the following settings match it:
- # * `client_max_body_size` in the previous server section
- # * `API.MaxRequestSize` in config.yml
- client_max_body_size 128m;
-}
+h3(#railsapi-config). Configure Rails API server
+
+By default, the Rails API server is configured to listen on @localhost:8004@, matching the example cluster configuration above. If you need to change this, edit the @arvados-railsapi.service@ definition to redefine the @PASSENGER_ADDRESS@ and @PASSENGER_PORT@ environment variables, like this:
+
+
+# systemctl edit arvados-railsapi.service
+### Editing /etc/systemd/system/arvados-railsapi.service.d/override.conf
+### Anything between here and the comment below will become the new contents of the file
+[Service]
+Environment=PASSENGER_ADDRESS=0.0.0.0
+Environment=PASSENGER_PORT=8040
+### Lines below this comment will be discarded
+[...]
-{% assign arvados_component = 'arvados-api-server arvados-controller' %}
+You can similarly define other Passenger settings if desired. The "Passenger Standalone reference":https://www.phusionpassenger.com/library/config/standalone/reference/ documents all the available settings.
-{% include 'install_packages' %}
-
-{% assign arvados_component = 'arvados-controller' %}
+{% assign arvados_component = 'arvados-railsapi arvados-controller' %}
{% include 'start_service' %}
@@ -224,8 +250,6 @@ h3. Confirm that you can use the system root token to act as the system root use
h3. Troubleshooting
-If you are getting TLS errors, make sure the @ssl_certificate@ directive in your nginx configuration has the "full certificate chain":http://nginx.org/en/docs/http/configuring_https_servers.html#chains
-
-Logs can be found in @/var/www/arvados-api/current/log/production.log@ and using @journalctl -u arvados-controller@.
+If you are getting TLS errors, make sure the @ssl_certificate@ directive in your nginx configuration has the "full certificate chain":http://nginx.org/en/docs/http/configuring_https_servers.html#chains.
-See also the admin page on "Logging":{{site.baseurl}}/admin/logging.html .
+Logs can be found in @/var/www/arvados-api/current/log/production.log@ and using @journalctl -u arvados-controller@. See also the admin page on "Logging":{{site.baseurl}}/admin/logging.html.
diff --git a/doc/install/install-arv-git-httpd.html.textile.liquid b/doc/install/install-arv-git-httpd.html.textile.liquid
deleted file mode 100644
index 476c89005f..0000000000
--- a/doc/install/install-arv-git-httpd.html.textile.liquid
+++ /dev/null
@@ -1,298 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Install the Git server
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-# "Introduction":#introduction
-# "Install dependencies":#dependencies
-# "Create "git" user and storage directory":#create
-# "Install gitolite":#gitolite
-# "Configure gitolite":#config-gitolite
-# "Configure git synchronization":#sync
-# "Update config.yml":#update-config
-# "Update nginx configuration":#update-nginx
-# "Install arvados-git-httpd package":#install-packages
-# "Restart the API server and controller":#restart-api
-# "Confirm working installation":#confirm-working
-
-h2(#introduction). Introduction
-
-Arvados support for git repository management enables using Arvados permissions to control access to git repositories. Users can create their own private and public git repositories and share them with others.
-
-The git hosting setup involves three components.
-* The "arvados-git-sync.rb" script polls the API server for the current list of repositories, creates bare repositories, and updates the local permission cache used by gitolite.
-* Gitolite provides SSH access. Users authenticate by SSH keys.
-* arvados-git-http provides HTTPS access. Users authenticate by Arvados tokens.
-
-Git services must be installed on the same host as the Arvados Rails API server.
-
-h2(#dependencies). Install dependencies
-
-h3. Alma/CentOS/Red Hat/Rocky
-
-
-# dnf install git perl-Data-Dumper openssh-server
-
-
-
-h3. Debian and Ubuntu
-
-
-# apt-get --no-install-recommends install git openssh-server
-
-
-
-h2(#create). Create "git" user and storage directory
-
-Gitolite and some additional scripts will be installed in @/var/lib/arvados/git@, which means hosted repository data will be stored in @/var/lib/arvados/git/repositories@. If you choose to install gitolite in a different location, make sure to update the @git_repositories_dir@ entry in your API server's @application.yml@ file accordingly: for example, if you install gitolite at @/data/gitolite@ then your @git_repositories_dir@ will be @/data/gitolite/repositories@.
-
-A new UNIX account called "git" will own the files. This makes git URLs look familiar to users (git@[...]:username/reponame.git
).
-
-On Debian- or Red Hat-based systems:
-
-
-gitserver:~$ sudo mkdir -p /var/lib/arvados/git
-gitserver:~$ sudo useradd --comment git --home-dir /var/lib/arvados/git git
-gitserver:~$ sudo chown -R git:git ~git
-
-
-
-The git user needs its own SSH key. (It must be able to run ssh git@localhost
from scripts.)
-
-
-gitserver:~$ sudo -u git -i bash
-git@gitserver:~$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
-git@gitserver:~$ cp .ssh/id_rsa.pub .ssh/authorized_keys
-git@gitserver:~$ ssh -o stricthostkeychecking=no localhost cat .ssh/id_rsa.pub
-Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7aBIDAAgMQN16Pg6eHmvc+D+6TljwCGr4YGUBphSdVb25UyBCeAEgzqRiqy0IjQR2BLtSirXr+1SJAcQfBgI/jwR7FG+YIzJ4ND9JFEfcpq20FvWnMMQ6XD3y3xrZ1/h/RdBNwy4QCqjiXuxDpDB7VNP9/oeAzoATPZGhqjPfNS+RRVEQpC6BzZdsR+S838E53URguBOf9yrPwdHvosZn7VC0akeWQerHqaBIpSfDMtaM4+9s1Gdsz0iP85rtj/6U/K/XOuv2CZsuVZZ52nu3soHnEX2nx2IaXMS3L8Z+lfOXB2T6EaJgXF7Z9ME5K1tx9TSNTRcYCiKztXLNLSbp git@gitserver
-git@gitserver:~$ rm .ssh/authorized_keys
-
-
-
-h2(#gitolite). Install gitolite
-
-Check "https://github.com/sitaramc/gitolite/tags":https://github.com/sitaramc/gitolite/tags for the latest stable version. This guide was tested with @v3.6.11@. _Versions below 3.0 are missing some features needed by Arvados, and should not be used._
-
-Download and install the version you selected.
-
-
-$ sudo -u git -i bash
-git@gitserver:~$ echo 'PATH=$HOME/bin:$PATH' >.profile
-git@gitserver:~$ . .profile
-git@gitserver:~$ git clone --branch v3.6.11 https://github.com/sitaramc/gitolite
-...
-Note: checking out '5d24ae666bfd2fa9093d67c840eb8d686992083f'.
-...
-git@gitserver:~$ mkdir bin
-git@gitserver:~$ gitolite/install -ln ~git/bin
-git@gitserver:~$ bin/gitolite setup -pk .ssh/id_rsa.pub
-Initialized empty Git repository in /var/lib/arvados/git/repositories/gitolite-admin.git/
-Initialized empty Git repository in /var/lib/arvados/git/repositories/testing.git/
-WARNING: /var/lib/arvados/git/.ssh/authorized_keys missing; creating a new one
- (this is normal on a brand new install)
-
-
-
-_If this didn't go well, more detail about installing gitolite, and information about how it works, can be found on the "gitolite home page":http://gitolite.com/._
-
-Clone the gitolite-admin repository. The arvados-git-sync.rb script works by editing the files in this working directory and pushing them to gitolite. Here we make sure "git push" won't produce any errors or warnings.
-
-
-git@gitserver:~$ git clone git@localhost:gitolite-admin
-Cloning into 'gitolite-admin'...
-remote: Counting objects: 6, done.
-remote: Compressing objects: 100% (4/4), done.
-remote: Total 6 (delta 0), reused 0 (delta 0)
-Receiving objects: 100% (6/6), done.
-Checking connectivity... done.
-git@gitserver:~$ cd gitolite-admin
-git@gitserver:~/gitolite-admin$ git config user.email arvados
-git@gitserver:~/gitolite-admin$ git config user.name arvados
-git@gitserver:~/gitolite-admin$ git config push.default simple
-git@gitserver:~/gitolite-admin$ git push
-Everything up-to-date
-
-
-
-h2(#config-gitolite). Configure gitolite
-
-Configure gitolite to look up a repository name like @username/reponame.git@ and find the appropriate bare repository storage directory.
-
-Add the following lines to the top of @~git/.gitolite.rc@:
-
-
-my $repo_aliases;
-my $aliases_src = "$ENV{HOME}/.gitolite/arvadosaliases.pl";
-if ($ENV{HOME} && (-e $aliases_src)) {
- $repo_aliases = do $aliases_src;
-}
-$repo_aliases ||= {};
-
-
-
-Add the following lines inside the section that begins @%RC = (@:
-
-
- REPO_ALIASES => $repo_aliases,
-
-
-
-Inside that section, adjust the 'UMASK' setting to @022@, to ensure the API server has permission to read repositories:
-
-
- UMASK => 022,
-
-
-
-Uncomment the 'Alias' line in the section that begins @ENABLE => [@:
-
-
- # access a repo by another (possibly legacy) name
- 'Alias',
-
-
-
-h2(#sync). Configure git synchronization
-
-Create a configuration file @/var/www/arvados-api/current/config/arvados-clients.yml@ using the following template, filling in the appropriate values for your system.
-* For @arvados_api_token@, use @SystemRootToken@
-* For @gitolite_arvados_git_user_key@, provide the public key you generated above, i.e., the contents of @~git/.ssh/id_rsa.pub@.
-
-
-production:
- gitolite_url: /var/lib/arvados/git/repositories/gitolite-admin.git
- gitolite_tmp: /var/lib/arvados/git
- arvados_api_host: ClusterID.example.com
- arvados_api_token: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
- arvados_api_host_insecure: false
- gitolite_arvados_git_user_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7aBIDAAgMQN16Pg6eHmvc+D+6TljwCGr4YGUBphSdVb25UyBCeAEgzqRiqy0IjQR2BLtSirXr+1SJAcQfBgI/jwR7FG+YIzJ4ND9JFEfcpq20FvWnMMQ6XD3y3xrZ1/h/RdBNwy4QCqjiXuxDpDB7VNP9/oeAzoATPZGhqjPfNS+RRVEQpC6BzZdsR+S838E53URguBOf9yrPwdHvosZn7VC0akeWQerHqaBIpSfDMtaM4+9s1Gdsz0iP85rtj/6U/K/XOuv2CZsuVZZ52nu3soHnEX2nx2IaXMS3L8Z+lfOXB2T6EaJgXF7Z9ME5K1tx9TSNTRcYCiKztXLNLSbp git@gitserver"
-
-
-
-
-$ sudo chown git:git /var/www/arvados-api/current/config/arvados-clients.yml
-$ sudo chmod og-rwx /var/www/arvados-api/current/config/arvados-clients.yml
-
-
-h3. Test configuration
-
-notextile. $ sudo -u git -i bash -c 'cd /var/www/arvados-api/current && bin/bundle exec script/arvados-git-sync.rb production'
-
-h3. Enable the synchronization script
-
-The API server package includes a script that retrieves the current set of repository names and permissions from the API, writes them to @arvadosaliases.pl@ in a format usable by gitolite, and triggers gitolite hooks which create new empty repositories if needed. This script should run every 2 to 5 minutes.
-
-Create @/etc/cron.d/arvados-git-sync@ with the following content:
-
-
-*/5 * * * * git cd /var/www/arvados-api/current && bin/bundle exec script/arvados-git-sync.rb production
-
-
-
-h2(#update-config). Update config.yml
-
-Edit the cluster config at @config.yml@ .
-
-
- Services:
- GitSSH:
- ExternalURL: "ssh://git@git.ClusterID.example.com"
- GitHTTP:
- ExternalURL: https://git.ClusterID.example.com/
- InternalURLs:
- "http://localhost:9001": {}
- Git:
- GitCommand: /var/lib/arvados/git/gitolite/src/gitolite-shell
- GitoliteHome: /var/lib/arvados/git
- Repositories: /var/lib/arvados/git/repositories
-
-
-
-h2(#update-nginx). Update nginx configuration
-
-Use a text editor to create a new file @/etc/nginx/conf.d/arvados-git.conf@ with the following configuration. Options that need attention are marked in red.
-
-
-upstream arvados-git-httpd {
- server 127.0.0.1:9001;
-}
-server {
- listen 443 ssl;
- server_name git.ClusterID.example.com;
- proxy_connect_timeout 90s;
- proxy_read_timeout 300s;
-
- ssl_certificate /YOUR/PATH/TO/cert.pem;
- ssl_certificate_key /YOUR/PATH/TO/cert.key;
-
- # The server needs to accept potentially large refpacks from push clients.
- client_max_body_size 128m;
-
- location / {
- proxy_pass http://arvados-git-httpd;
- }
-}
-
-
-
-h2(#install-packages). Install the arvados-git-httpd package
-
-The arvados-git-httpd package provides HTTP access, using Arvados authentication tokens instead of passwords. It must be installed on the system where your git repositories are stored.
-
-h3. Alma/CentOS/Red Hat/Rocky
-
-
-# dnf install arvados-git-httpd
-
-
-
-h3. Debian and Ubuntu
-
-
-# apt-get --no-install-recommends install arvados-git-httpd
-
-
-
-h2(#restart-api). Restart the API server and controller
-
-After adding Workbench to the Services section, make sure the cluster config file is up to date on the API server host, and restart the API server and controller processes to ensure the changes are applied.
-
-
-# systemctl restart nginx arvados-controller
-
-
-
-h2(#confirm-working). Confirm working installation
-
-Create 'testrepo' in the Arvados database.
-
-
-~$ arv --format=uuid repository create --repository '{"name":"myusername/testrepo"}'
-
-
-The arvados-git-sync cron job will notice the new repository record and create a repository on disk. Because it is on a timer (default 5 minutes) you may have to wait a minute or two for it to show up.
-
-h3. SSH
-
-Before you do this, go to Workbench and choose *SSH Keys* from the menu, and upload your public key. Arvados uses the public key to identify you when you access the git repo.
-
-
-~$ git clone git@git.ClusterID.example.com:username/testrepo.git
-
-
-
-h3. HTTP
-
-Set up git credential helpers as described in "install shell server":install-shell-server.html#config-git for the git command to use your API token instead of prompting you for a username and password.
-
-
-~$ git clone https://git.ClusterID.example.com/username/testrepo.git
-
-
diff --git a/doc/install/install-composer.html.textile.liquid b/doc/install/install-composer.html.textile.liquid
deleted file mode 100644
index 58ba5d03a0..0000000000
--- a/doc/install/install-composer.html.textile.liquid
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Install Composer
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-Arvados Composer is a web-based javascript application for building Common Workflow Languge (CWL) Workflows.
-
-# "Install dependencies":#dependencies
-# "Update config.yml":#update-config
-# "Update Nginx configuration":#update-nginx
-# "Install arvados-composer":#install-packages
-# "Restart the API server and controller":#restart-api
-# "Confirm working installation":#confirm-working
-
-h2(#dependencies). Install dependencies
-
-In addition to Arvados core services, Composer requires "Arvados hosted git repositories":install-arv-git-httpd.html which are used for storing workflow files.
-
-h2(#configure). Update config.yml
-
-Edit @config.yml@ and set @Services.Composer.ExternalURL@ to the location from which it is served:
-
-
- Services:
- Composer:
- ExternalURL: https://workbench.CusterID.example.com/composer
-
-
-h2(#update-nginx). Update nginx configuration
-
-Composer may be served from the same host as Workbench. Composer communicates directly with the Arvados API server. It does not require its own backend and should be served as a static file.
-
-Add the following @location@ sections to @/etc/nginx/conf.d/arvados-workbench.conf@ .
-
-
-server {
- [...]
-
- location /composer {
- root /var/www/arvados-composer;
- index index.html;
- }
-
- location /composer/composer.yml {
- return 200 '{ "API_HOST": "ClusterID.example.com" }';
- }
-}
-
-
-
-{% assign arvados_component = 'arvados-composer' %}
-
-{% include 'install_packages' %}
-
-{% include 'restart_api' %}
-
-h2(#confirm-working). Confirm working installation
-
-Visit @https://workbench.ClusterID.example.com/composer@ in a browser. You should be able to log in using the login method you configured previously.
diff --git a/doc/install/install-jobs-image.html.textile.liquid b/doc/install/install-jobs-image.html.textile.liquid
deleted file mode 100644
index efd8c9649f..0000000000
--- a/doc/install/install-jobs-image.html.textile.liquid
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Install arvados/jobs image
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-h2. Create a project for Docker images
-
-Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
-
-
-~$ uuid_prefix=$(arv --format=uuid user current | cut -d- -f1)
-~$ project_uuid=$(arv --format=uuid group create --group '{"owner_uuid":"'$uuid_prefix'-tpzed-000000000000000", "group_class":"project", "name":"Arvados Standard Docker Images"}')
-~$ echo "Arvados project uuid is '$project_uuid'"
-~$ read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
-{
- "tail_uuid":"${uuid_prefix}-j7d0g-fffffffffffffff",
- "head_uuid":"$project_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF
-
-
-h2. Import the arvados/jobs docker image
-
-In order to start workflows from workbench, there needs to be Docker image @arvados/jobs@ tagged with the version of Arvados you are installing. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep. In this example @$project_uuid@ should be the UUID of the "Arvados Standard Docker Images" project.
-
-
-~$ arv-keepdocker --pull arvados/jobs latest --project-uuid $project_uuid
-
-
-If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
diff --git a/doc/install/install-keep-web.html.textile.liquid b/doc/install/install-keep-web.html.textile.liquid
index 0b051e715d..f69239a62b 100644
--- a/doc/install/install-keep-web.html.textile.liquid
+++ b/doc/install/install-keep-web.html.textile.liquid
@@ -20,7 +20,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
h2(#introduction). Introduction
-The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":https://pkg.go.dev/git.arvados.org/arvados.git/services/keep-web for more detail.
+The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, controller, and the PostgreSQL server. It is typically installed behind a web proxy that provides TLS support. See the "godoc page":https://pkg.go.dev/git.arvados.org/arvados.git/services/keep-web for more detail.
h2(#dns). Configure DNS
@@ -133,7 +133,7 @@ server {
server_name download.ClusterID.example.com
collections.ClusterID.example.com
*.collections.ClusterID.example.com
- ~.*--collections.ClusterID.example.com;
+ ~.*--collections\.ClusterID\.example\.com;
proxy_connect_timeout 90s;
proxy_read_timeout 300s;
diff --git a/doc/install/install-manual-prerequisites.html.textile.liquid b/doc/install/install-manual-prerequisites.html.textile.liquid
index 8819b0210f..dc2f095d9e 100644
--- a/doc/install/install-manual-prerequisites.html.textile.liquid
+++ b/doc/install/install-manual-prerequisites.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Before attempting installation, you should begin by reviewing supported platforms, choosing backends for identity, storage, and scheduling, and decide how you will distribute Arvados services onto machines. You should also choose an Arvados Cluster ID, choose your hostnames, and aquire TLS certificates. It may be helpful to make notes as you go along using one of these worksheets: "New cluster checklist for AWS":new_cluster_checklist_AWS.xlsx - "New cluster checklist for Azure":new_cluster_checklist_Azure.xlsx - "New cluster checklist for on premises Slurm":new_cluster_checklist_slurm.xlsx
-The installation guide describes how to set up a basic standalone Arvados instance. Additional configuration for features including "federation,":{{site.baseurl}}/admin/federation.html "collection versioning,":{{site.baseurl}}/admin/collection-versioning.html "managed properties,":{{site.baseurl}}/admin/collection-managed-properties.html and "storage classes":{{site.baseurl}}/admin/collection-managed-properties.html are described in the "Admin guide.":{{site.baseurl}}/admin
+The installation guide describes how to set up a basic standalone Arvados instance. Additional configuration for features including "federation,":{{site.baseurl}}/admin/federation.html "collection versioning,":{{site.baseurl}}/admin/collection-versioning.html "managed properties,":{{site.baseurl}}/admin/collection-managed-properties.html and "storage classes":{{site.baseurl}}/admin/collection-managed-properties.html are described in the "Admin guide.":{{site.baseurl}}/admin/
The Arvados storage subsystem is called "keep". The compute subsystem is called "crunch".
@@ -31,7 +31,7 @@ h2(#supportedlinux). Supported GNU/Linux distributions
h2(#components). Choosing which components to install
-Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.) It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture to understand how these components interact.
+Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.) It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture/ to understand how these components interact.
table(table table-bordered table-condensed).
|\3=. *Core*|
@@ -47,7 +47,6 @@ table(table table-bordered table-condensed).
|\3=. *Additional services*|
|"Websockets server":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.|
|"Shell server":install-shell-server.html |Grant Arvados users access to Unix shell accounts on dedicated shell nodes.|Optional.|
-|"Git server":install-arv-git-httpd.html |Arvados-hosted git repositories, with Arvados-token based authentication.|Optional|
|\3=. *Crunch (running containers)*|
|"arvados-dispatch-cloud":crunch2-cloud/install-dispatch-cloud.html |Run analysis workflows on cloud by allocating and freeing cloud VM instances on demand.|Optional|
|"crunch-dispatch-slurm":crunch2-slurm/install-dispatch.html |Run analysis workflows distributed across a Slurm cluster.|Optional|
@@ -96,7 +95,7 @@ For a production installation, this is a reasonable starting point:
table(table table-bordered table-condensed).
|_. Function|_. Number of nodes|_. Recommended specs|
-|PostgreSQL database, Arvados API server, Arvados controller, Git, Websockets, Container dispatcher|1|16+ GiB RAM, 4+ cores, fast disk for database|
+|PostgreSQL database, Arvados API server, Arvados controller, Websockets, Container dispatcher|1|16+ GiB RAM, 4+ cores, fast disk for database|
|Workbench, Keepproxy, Keep-web, Keep-balance|1|8 GiB RAM, 2+ cores|
|Keepstore servers ^1^|2+|4 GiB RAM|
|Compute worker nodes ^1^|0+ |Depends on workload; scaled dynamically in the cloud|
@@ -138,7 +137,6 @@ It is possible to use custom DNS names for the Arvados services.
table(table table-bordered table-condensed).
|_. Function|_. DNS name|
|Arvados API|@ClusterID.example.com@|
-|Arvados Git server|git.@ClusterID.example.com@|
|Arvados Webshell|webshell.@ClusterID.example.com@|
|Arvados Websockets endpoint|ws.@ClusterID.example.com@|
|Arvados Workbench|workbench.@ClusterID.example.com@|
@@ -149,9 +147,13 @@ _and_
*.collections.@ClusterID.example.com@ _or_
*--collections.@ClusterID.example.com@ _or_
collections.@ClusterID.example.com@ (see the "keep-web install docs":install-keep-web.html)|
+|Container web services|*.containers.@ClusterID.example.com@ _or_
+*--containers.@ClusterID.example.com@|
-Setting up Arvados is easiest when Wildcard TLS and wildcard DNS are available. It is also possible to set up Arvados without wildcard TLS and DNS, but not having a wildcard for @keep-web@ (i.e. not having *.collections.@ClusterID.example.com@) comes with a tradeoff: it will disable some features that allow users to view Arvados-hosted data in their browsers. More information on this tradeoff caused by the CORS rules applied by modern browsers is available in the "keep-web URL pattern guide":../api/keep-web-urls.html.
+Setting up Arvados is easiest when Wildcard TLS and wildcard DNS are available. It is also possible to set up Arvados without wildcard TLS and DNS, but some functionality will be unavailable:
+* A wildcard for @keep-web@ (e.g., *.collections.@ClusterID.example.com@) is needed to allow users to view Arvados-hosted data in their browsers. More information on this tradeoff caused by the CORS rules applied by modern browsers is available in the "keep-web URL pattern guide":../api/keep-web-urls.html.
+* A wildcard for @controller@ (e.g., *.containers.@ClusterID.example.com@) is needed to allow users to connect to Arvados-hosted services in their browsers.
The table below lists the required TLS certificates and DNS names in each scenario.
@@ -160,7 +162,8 @@ table(table table-bordered table-condensed).
||_. Wildcard TLS and DNS available|_. Wildcard TLS available|_. Other|
|TLS|@ClusterID.example.com@
*.@ClusterID.example.com@
-*.collections.@ClusterID.example.com@|*.@ClusterID.example.com@
+*.collections.@ClusterID.example.com@
+*.containers.@ClusterID.example.com@|*.@ClusterID.example.com@
@ClusterID.example.com@|@ClusterID.example.com@
git.@ClusterID.example.com@
webshell.@ClusterID.example.com@
@@ -178,7 +181,8 @@ workbench.@ClusterID.example.com@
workbench2.@ClusterID.example.com@
keep.@ClusterID.example.com@
download.@ClusterID.example.com@
-*.collections.@ClusterID.example.com@|@ClusterID.example.com@
+*.collections.@ClusterID.example.com@
+*.containers.@ClusterID.example.com@|@ClusterID.example.com@
git.@ClusterID.example.com@
webshell.@ClusterID.example.com@
ws.@ClusterID.example.com@
diff --git a/doc/install/install-postgresql.html.textile.liquid b/doc/install/install-postgresql.html.textile.liquid
index 56ad95635c..ef952dd67f 100644
--- a/doc/install/install-postgresql.html.textile.liquid
+++ b/doc/install/install-postgresql.html.textile.liquid
@@ -12,31 +12,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Arvados requires at least version *9.4* of PostgreSQL. We recommend using version 10 or newer.
* "AWS":#aws
-* "CentOS 7":#centos7
-* "Alma/CentOS/Red Hat/Rocky 8":#rh8
+* "Red Hat, AlmaLinux, and Rocky Linux":#rh8
* "Debian or Ubuntu":#debian
h3(#aws). AWS
When deploying on AWS, Arvados can use an Aurora RDS PostgreSQL database. Aurora Serverless is not recommended.
-h3(#centos7). CentOS 7
-{% assign rh_version = "7" %}
-{% include 'note_python_sc' %}
-
-# Install PostgreSQL
- # yum install rh-postgresql12 rh-postgresql12-postgresql-contrib
-~$ scl enable rh-postgresql12 bash
-# Initialize the database
- # postgresql-setup initdb
-# Configure the database to accept password connections from localhost
- # sed -ri -e 's/^(host +all +all +(127\.0\.0\.1\/32|::1\/128) +)ident$/\1md5/' /var/lib/pgsql/data/pg_hba.conf
-# Configure the database to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)
- # echo 'host all all 10.9.8.0/24 md5' | tee -a /var/lib/pgsql/data/pg_hba.conf
-# Configure the database to launch at boot and start now
- # systemctl enable --now rh-postgresql12-postgresql
-
-h3(#rh8). Alma/CentOS/Red Hat/Rocky 8
+h3(#rh8). Red Hat, AlmaLinux, and Rocky Linux
{% comment %}
The default version on RH8 is PostgreSQL 10. You can install up to PostgreSQL 13.
@@ -55,10 +38,10 @@ The default version on RH8 is PostgreSQL 10. You can install up to PostgreSQL 13
h3(#debian). Debian or Ubuntu
-Debian 10 (Buster) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
+All supported versions of Debian and Ubuntu include a version of PostgreSQL you can use with Arvados.
# Install PostgreSQL
-# apt-get --no-install-recommends install postgresql postgresql-contrib
+# apt --no-install-recommends install postgresql postgresql-contrib
# Configure PostgreSQL to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)
# echo 'host all all 10.9.8.0/24 md5' | tee -a /etc/postgresql/*/main/pg_hba.conf
# Configure the database to launch at boot and start now
diff --git a/doc/install/install-shell-server.html.textile.liquid b/doc/install/install-shell-server.html.textile.liquid
index f864f37563..9520c08397 100644
--- a/doc/install/install-shell-server.html.textile.liquid
+++ b/doc/install/install-shell-server.html.textile.liquid
@@ -12,7 +12,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
# "Introduction":#introduction
# "Install Dependencies and SDKs":#dependencies
# "Install git and curl":#install-packages
-# "Update Git Config":#config-git
# "Create record for VM":#vm-record
# "Install arvados-login-sync":#arvados-login-sync
# "Confirm working installation":#confirm-working
@@ -44,17 +43,6 @@ h2(#dependencies). Install Dependencies and SDKs
{% include 'install_packages' %}
-h2(#config-git). Update Git Config
-
-Configure git to use the ARVADOS_API_TOKEN environment variable to authenticate to arvados-git-httpd. We use the @--system@ flag so it takes effect for all current and future user accounts. It does not affect git's behavior when connecting to other git servers.
-
-
-
-# git config --system 'credential.https://git.ClusterID.example.com/.username' none
-# git config --system 'credential.https://git.ClusterID.example.com/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'
-
-
-
h2(#vm-record). Create record for VM
As an admin, create an Arvados virtual_machine object representing this shell server. This will return a uuid.
diff --git a/doc/install/install-webshell.html.textile.liquid b/doc/install/install-webshell.html.textile.liquid
index 12b413d5d3..5f2eee2325 100644
--- a/doc/install/install-webshell.html.textile.liquid
+++ b/doc/install/install-webshell.html.textile.liquid
@@ -105,7 +105,7 @@ For additional shell nodes with @shell-in-a-box@, add @location@ and @upstream@
h2(#config-shellinabox). Configure shellinabox
-h3. Alma/CentOS/Red Hat/Rocky
+h3. Red Hat, AlmaLinux, and Rocky Linux
Edit @/etc/sysconfig/shellinaboxd@:
diff --git a/doc/install/install-workbench2-app.html.textile.liquid b/doc/install/install-workbench2-app.html.textile.liquid
index bbcbd7ef1d..41afd248d8 100644
--- a/doc/install/install-workbench2-app.html.textile.liquid
+++ b/doc/install/install-workbench2-app.html.textile.liquid
@@ -86,20 +86,3 @@ Workbench2 will load, if available, a vocabulary definition which lists availabl
h2(#confirm-working). Confirm working installation
Visit @https://workbench2.ClusterID.example.com@ in a browser. You should be able to log in using the login method you configured in the previous step. If @Users.AutoAdminFirstUser@ is true, you will be an admin user.
-
-h2(#trusted_client). Trusted client flag
-
-Log in to Workbench once to ensure that the Arvados API server has a record of the Workbench client. (It's OK if Workbench says your account hasn't been activated yet. We'll deal with that next.)
-
-In the API server project root, start the Rails console. {% include 'install_rails_command' %}
-
-At the console, enter the following commands to locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
-
-irb(main):001:0> wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]
-=> ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
-irb(main):002:0> include CurrentApiClient
-=> true
-irb(main):003:0> act_as_system_user do wb.update!(is_trusted: true) end
-=> true
-
-
diff --git a/doc/install/migrate-docker19.html.textile.liquid b/doc/install/migrate-docker19.html.textile.liquid
deleted file mode 100644
index 7b7e2a83cf..0000000000
--- a/doc/install/migrate-docker19.html.textile.liquid
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: default
-navsection: admin
-title: Migrating from Docker 1.9
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-If you have an existing Arvados installation using Docker 1.9 and wish to update to Docker 1.10+, you must migrate the Docker images stored in Keep.
-
-The @arv-migrate-docker19@ tool converts Docker images stored in Arvados from image format v1 (Docker <= 1.9) to image format v2 (Docker >= 1.10).
-
-Requires Docker running on the local host (can be either 1.9 or 1.10+). Linux kernel >= 3.18-rc6 to support overlayfs.
-
-Usage:
-
-# Install arvados/migrate-docker19 image: @docker pull arvados/migrate-docker19:1.0@. If you're unable to do this, you can run @arvados/docker/migrate-docker19/build.sh@ to create @arvados/migrate-docker19@ Docker image.
-# Make sure you have the right modules installed: @sudo modprobe overlayfs bridge br_netfilter nf_nat@
-# Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
-# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tempdir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
-# Run @arv-migrate-docker19 --dry-run@ from the Arvados Python SDK on the host (not in a container). This will print out some information useful for the migration.
-# Finally to make the migration run @arv-migrate-docker19@ from the Arvados Python SDK on the host (not in a container).
-
-This will query Arvados for v1 format Docker images. For each image that does not already have a corresponding v2 format image (as indicated by a docker_image_migration tag) it will perform the following process:
-
-i) download the image from Arvados
-ii) load it into Docker
-iii) update the Docker version, which updates the image
-iv) save the v2 format image and upload to Arvados
-v) create a migration link
-
-Once the Docker images in Keep have been migrated, upgrade the version of Docker used across the cluster. Finally, update the API server configuration from "v1" to "v2" to reflect the supported Docker image version:
-
-
-docker_image_formats: ["v2"]
-
diff --git a/doc/install/nginx.html.textile.liquid b/doc/install/nginx.html.textile.liquid
index 7d97c3e383..62f31b1647 100644
--- a/doc/install/nginx.html.textile.liquid
+++ b/doc/install/nginx.html.textile.liquid
@@ -9,14 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-h3. CentOS 7
-
-
-# yum install epel-release
-# yum install nginx
-
-
-h3. Alma/CentOS/Red Hat/Rocky 8
+h3. Red Hat, AlmaLinux, and Rocky Linux
# dnf install nginx
@@ -25,5 +18,5 @@ h3. Alma/CentOS/Red Hat/Rocky 8
h3. Debian and Ubuntu
-# apt-get --no-install-recommends install nginx
+# apt --no-install-recommends install nginx
diff --git a/doc/install/packages.html.textile.liquid b/doc/install/packages.html.textile.liquid
index f867381cff..dcf83d71ab 100644
--- a/doc/install/packages.html.textile.liquid
+++ b/doc/install/packages.html.textile.liquid
@@ -11,65 +11,34 @@ SPDX-License-Identifier: CC-BY-SA-3.0
On any host where you install Arvados software, you'll need to add the Arvados package repository. They're available for several popular distributions.
-* "AlmaLinux, CentOS, RHEL, and Rocky Linux":#redhat
+* "Red Hat, AlmaLinux, and Rocky Linux":#redhat
* "Debian and Ubuntu":#debian
-h3(#redhat). AlmaLinux, CentOS, RHEL, and Rocky Linux
+h3(#redhat). Red Hat, AlmaLinux, and Rocky Linux
Packages are available for the following Red Hat-based distributions:
-* AlmaLinux 8
-* CentOS 7
-* CentOS 8
-* RHEL 8
-* Rocky Linux 8
+* AlmaLinux 9
+* AlmaLinux 8 (since 8.4)
+* RHEL 9
+* RHEL 8 (since 8.4)
+* Rocky Linux 9
+* Rocky Linux 8 (since 8.4)
-To install them with dnf or yum, save this configuration block in @/etc/yum.repos.d/arvados.repo@:
-
-
-[arvados]
-name=Arvados
-baseurl=http://rpm.arvados.org/CentOS/$releasever/os/$basearch/
-gpgcheck=1
-gpgkey=http://rpm.arvados.org/CentOS/RPM-GPG-KEY-arvados
-
-
-
-{% include 'gpg_key_fingerprint' %}
+{% include 'setup_redhat_repo' %}
h3(#debian). Debian and Ubuntu
-Packages are available for recent versions of Debian and Ubuntu.
-
-First, register the Arvados signing key in apt's database:
-
-{% include 'install_debian_key' %}
-
-{% include 'gpg_key_fingerprint' %}
-
-As root, add the Arvados package repository to your sources. This command depends on your OS vendor and version:
+Packages are available for the following Debian-based distributions:
-table(table table-bordered table-condensed).
-|_. OS version|_. Command|
-|Debian 11 ("bullseye")|echo "deb http://apt.arvados.org/bullseye bullseye main" | tee /etc/apt/sources.list.d/arvados.list
|
-|Debian 10 ("buster")|echo "deb http://apt.arvados.org/buster buster main" | tee /etc/apt/sources.list.d/arvados.list
|
-|Ubuntu 20.04 ("focal")[1]|echo "deb http://apt.arvados.org/focal focal main" | tee /etc/apt/sources.list.d/arvados.list
|
-|Ubuntu 18.04 ("bionic")[1]|echo "deb http://apt.arvados.org/bionic bionic main" | tee /etc/apt/sources.list.d/arvados.list
|
+* Debian 12 ("bookworm")
+* Debian 11 ("bullseye")
+* Ubuntu 24.04 ("noble")
+* Ubuntu 22.04 ("jammy")
+* Ubuntu 20.04 ("focal")
-
-{% include 'notebox_begin' %}
-
-fn1. Arvados packages for Ubuntu may depend on third-party packages in Ubuntu's "universe" repository. If you're installing on Ubuntu, make sure you have the universe sources uncommented in @/etc/apt/sources.list@.
-
-{% include 'notebox_end' %}
-
-Retrieve the package list:
-
-
-# apt-get update
-
-
+{% include 'setup_debian_repo' %}
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index a3cdd03300..029991940f 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -36,7 +36,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
h2(#introduction). Introduction
-This multi host installer is the recommendend way to set up a production Arvados cluster. These instructions include specific details for installing on Amazon Web Services (AWS), which are marked as "AWS specific". However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.
+This multi host installer is the recommended way to set up a production Arvados cluster. These instructions include specific details for installing on Amazon Web Services (AWS), which are marked as "AWS specific". However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.
h2(#prerequisites). Prerequisites and planning
@@ -60,7 +60,8 @@ In the default configuration these are:
# @keep1.${DOMAIN}@
# @keep.${DOMAIN}@
# @download.${DOMAIN}@
-# @*.collections.${DOMAIN}@ -- important note, this must be a wildcard DNS, resolving to the @keepweb@ service
+# @*.collections.${DOMAIN}@ -- a wildcard DNS resolving to the @keepweb@ service
+# @*.containers.${DOMAIN}@ -- a wildcard DNS resolving to the @controller@ service
# @workbench.${DOMAIN}@
# @workbench2.${DOMAIN}@
# @webshell.${DOMAIN}@
@@ -137,6 +138,7 @@ compute_subnet_id = "subnet-abcdef12345"
deploy_user = "admin"
domain_name = "xarv1.example.com"
letsencrypt_iam_access_key_id = "AKAA43MAAAWAKAADAASD"
+loki_iam_access_key_id = "AKAABCDEFGJKLMNOP1234"
private_ip = {
"controller" = "10.1.1.1"
"keep0" = "10.1.1.3"
@@ -163,6 +165,8 @@ route53_dns_ns = tolist([
ssl_password_secret_name = "xarv1-arvados-ssl-privkey-password"
vpc_id = "vpc-0999994998399923a"
letsencrypt_iam_secret_access_key = "XXXXXSECRETACCESSKEYXXXX"
+database_password =
+loki_iam_secret_access_key = "YYYYYYSECRETACCESSKEYYYYYYY"
@@ -229,18 +233,18 @@ The installer will set up the Arvados services on your machines. Here is the de
# API node
## postgresql server
## arvados api server
-## arvados controller (recommendend hostname @controller.${DOMAIN}@)
+## arvados controller (recommended hostname @controller.${DOMAIN}@ and @*.containers.${DOMAIN}@)
# KEEPSTORE nodes (at least 1 if using S3 as a Keep backend, else 2)
-## arvados keepstore (recommendend hostnames @keep0.${DOMAIN}@ and @keep1.${DOMAIN}@)
+## arvados keepstore (recommended hostname @keep0.${DOMAIN}@ and @keep1.${DOMAIN}@)
# WORKBENCH node
-## arvados legacy workbench URLs (recommendend hostname @workbench.${DOMAIN}@)
-## arvados workbench2 (recommendend hostname @workbench2.${DOMAIN}@)
-## arvados webshell (recommendend hostname @webshell.${DOMAIN}@)
-## arvados websocket (recommendend hostname @ws.${DOMAIN}@)
+## arvados legacy workbench URLs (recommended hostname @workbench.${DOMAIN}@)
+## arvados workbench2 (recommended hostname @workbench2.${DOMAIN}@)
+## arvados webshell (recommended hostname @webshell.${DOMAIN}@)
+## arvados websocket (recommended hostname @ws.${DOMAIN}@)
## arvados cloud dispatcher
## arvados keepbalance
-## arvados keepproxy (recommendend hostname @keep.${DOMAIN}@)
-## arvados keepweb (recommendend hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@)
+## arvados keepproxy (recommended hostname @keep.${DOMAIN}@)
+## arvados keepweb (recommended hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@)
# SHELL node (optional)
## arvados shell (recommended hostname @shell.${DOMAIN}@)
@@ -291,6 +295,8 @@ DATABASE_PASSWORD=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
For example, if the password is @Lq&MZDATABASE_PASSWORD="Lq\&MZ\
+# Set @LE_AWS_*@ credentials to allow Let's Encrypt do authentication through Route53
+# Set @LOKI_AWS_*@ credentials to enable the Loki service to store centralized logs on its dedicated S3 bucket.
# Set @DISPATCHER_SSH_PRIVKEY@ to a SSH private key that @arvados-dispatch-cloud@ will use to connect to the compute nodes:
DISPATCHER_SSH_PRIVKEY="-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
@@ -313,9 +319,9 @@ The @multi_host/aws@ template uses S3 for storage. Arvados also supports "files
h3. Object storage in S3 (AWS Specific)
-If you "followed the recommendend naming scheme":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done.
+If you "followed the recommended naming scheme":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done.
-If you did not follow the recommendend naming scheme for either the bucket or role, you'll need to update these parameters in @local.params@:
+If you did not follow the recommended naming scheme for either the bucket or role, you'll need to update these parameters in @local.params@:
# Set @KEEP_AWS_S3_BUCKET@ to the value of "keepstore bucket you created earlier":#keep-bucket
# Set @KEEP_AWS_IAM_ROLE@ to "keepstore role you created earlier":#keep-bucket
@@ -392,16 +398,22 @@ This will install and configure Arvados on all the nodes. It will take a while
h2(#test-install). Confirm the cluster is working
-When everything has finished, you can run the diagnostics.
+When everything has finished, you can run the diagnostics. There's a couple ways of doing this listed below.
-Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@.
+h3. Running diagnostics from the same system as the installer
-If you are running the diagnostics from one of the Arvados machines inside the private network, you want @-internal-client@ .
+The requirements to run diagnostics are having @arvados-client@ and @docker@ installed. If this is not possible you can run them on your Arvados shell node as explained in the next section.
-You are an "external client" if you running the diagnostics from your workstation outside of the private network.
+Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@. If you are running the installer from a host connected to the Arvados private network, use @-internal-client@. Otherwise, use @-external-client@.
./installer.sh diagnostics (-internal-client|-external-client)
+h3. Running diagnostics from a cluster node
+
+You can run the diagnostics from the cluster's shell node. This has the advantage that you don't need to manage any software on your local system, but might not be a possibility if your Arvados cluster doesn't include a shell node.
+
+./installer.sh diagnostics-internal
+
h3(#debugging). Debugging issues
The installer records log files for each deployment.
diff --git a/doc/install/salt-single-host.html.textile.liquid b/doc/install/salt-single-host.html.textile.liquid
index 92c1aa2645..604a8eeb45 100644
--- a/doc/install/salt-single-host.html.textile.liquid
+++ b/doc/install/salt-single-host.html.textile.liquid
@@ -12,18 +12,13 @@ SPDX-License-Identifier: CC-BY-SA-3.0
# "Limitations of the single host install":#limitations
# "Prerequisites and planning":#prerequisites
# "Download the installer":#download
-# "Edit local.params* files":#localparams
-# "Choose the SSL configuration":#certificates
-## "Using a self-signed certificate":#self-signed
-## "Using a Let's Encrypt certificate":#lets-encrypt
-## "Bring your own certificate":#bring-your-own
-# "Configure your authentication provider":#authentication
-# "Further customization of the installation":#further_customization
-# "Begin installation":#installation
-# "Install the CA root certificate":#ca_root_certificate
-# "Confirm the cluster is working":#test-install
-# "Initial user and login":#initial_user
-# "After the installation":#post_install
+# "Install Ansible":#install-ansible
+# "Set up cluster configuration":#localparams
+# "Set up cluster inventory":#inventory
+# "Run the installer playbook":#run-playbook
+# "Test the cluster":#test-install
+# "Changing your configuration":#further_customization
+# "Upgrading your Arvados cluster":#post_install
h2(#limitations). Limitations of the single host install
@@ -31,231 +26,185 @@ h2(#limitations). Limitations of the single host install
Using the default configuration, the single host install has scaling limitations compared to a production multi-host install:
-* It uses the local disk for Keep storage (under the @/var/lib/arvados@ directory).
+* It uses the local @/var@ partition to store all user data and logs.
* It uses the @crunch-dispatch-local@ dispatcher, which has a limit of eight concurrent jobs.
* Because jobs and Arvados services all run on the same machine, they will compete for CPU/RAM resources.
h2(#prerequisites). Prerequisites and planning
-h3. Cluster ID and base domain
+h3. Cluster ID
-Choose a 5-character cluster identifier that will represent the cluster. Here are "guidelines on choosing a cluster identifier":../architecture/federation.html#cluster_id . Only lowercase letters and digits 0-9 are allowed. Examples will use @xarv1@ or @${CLUSTER}@, you should substitute the cluster id you have selected.
+Choose a 5-character cluster identifier that will represent the cluster. Refer to "our guidelines on choosing a cluster identifier":../architecture/federation.html#cluster_id. Only lowercase letters and digits 0-9 are allowed. Our documentation uses @xurid@ throughout. You should replace this each time it appears with your chosen cluster identifier.
-Determine if you will use a single hostname, or multiple hostnames.
-
-* Single hostname is simpler to set up and can even be used without a hostname at all, just a bare IP address.
-* Multiple hostnames is more similar to the recommended production configuration may make it easier to migrate to a multi-host production configuration in the future, but is more complicated as it requires adding a number of DNS entries.
-
-If you are using multiple hostnames, determine the base domain for the cluster. This will be referred to as @${DOMAIN}@.
-
-For example, if CLUSTER is @xarv1@ and DOMAIN is @example.com@, then @controller.${CLUSTER}.${DOMAIN}@ means @controller.xarv1.example.com@.
-
-h3. Machine specification
+h3. Cluster host
You will need a dedicated (virtual) machine for your Arvados server with at least 2 cores and 8 GiB of RAM (4+ cores / 16+ GiB recommended if you are running workflows) running a supported Linux distribution:
{% include 'supportedlinux' %}
-Note: if you want to try out Arvados inside a Docker container, use "Arvbox":arvbox.html. The package-based install method uses @systemd@ to manage services; lightweight container images generally lack an init system and other tools that the installer requires.
-
-The single host install stores user data in a PostgreSQL database (usually found under @/var/lib/postgresql@) and as Keep blocks that are stored as files under @/var/lib/arvados/@.
-Arvados logs are also kept in @/var/log@ and @/var/www/arvados-api/shared/log@. Accordingly, you should ensure that the disk partition containing @/var@ has adequate storage for your planned usage. We suggest starting with at least 50GiB of free space.
-
-h3(#DNS). DNS hostnames for each service (multi-hostname only)
-
-If you are using a single hostname for all services (they will be distingushed by listening port), you can skip this section.
-
-If you are using the multi-hostname configuration, you will need a DNS entry for each service. If you are using "bring-your-own" TLS certificates, your certificate will need to include all of these hostnames.
-
-In the default configuration these are:
-
-# @controller.${CLUSTER}.${DOMAIN}@
-# @ws.${CLUSTER}.${DOMAIN}@
-# @keep0.${CLUSTER}.${DOMAIN}@
-# @keep1.${CLUSTER}.${DOMAIN}@
-# @keep.${CLUSTER}.${DOMAIN}@
-# @download.${CLUSTER}.${DOMAIN}@
-# @*.collections.${CLUSTER}.${DOMAIN}@ -- important note, this must be a wildcard DNS, resolving to the @keepweb@ service
-# @workbench.${CLUSTER}.${DOMAIN}@
-# @workbench2.${CLUSTER}.${DOMAIN}@
-# @webshell.${CLUSTER}.${DOMAIN}@
-# @shell.${CLUSTER}.${DOMAIN}@
-# @prometheus.${CLUSTER}.${DOMAIN}@
-# @grafana.${CLUSTER}.${DOMAIN}@
+The single host install stores all user data and logs under @/var@. You should ensure that this partition has adequate storage for your planned usage. We suggest starting with at least 50GiB of free space.
-This is described in more detail in "DNS entries and TLS certificates":install-manual-prerequisites.html#dnstls.
-
-h3. Additional prerequisites
-
-# root or passwordless @sudo@ access on the account where you are doing the install
-this usually means adding the account to the @sudo@ group and having a rule like this in @/etc/sudoers.d/arvados_passwordless@ that allows members of group @sudo@ to execute any command without entering a password.
-%sudo ALL=(ALL:ALL) NOPASSWD:ALL
-# @git@ installed on the machine
-# Port 443 reachable by clients
-# For the single-host install, ports 8800-8805 also need to be reachable from your client (configurable in @local.params@, see below)
-# When using "Let's Encrypt":#lets-encrypt port 80 needs to be reachable from everywhere on the internet
-# When using "bring your own certificate":#bring-your-own you need TLS certificate(s) covering the hostname(s) used by Arvados
+You must be able to connect to this host via SSH. Your account must have permission to run arbitrary commands with @sudo@.
h2(#download). Download the installer
-{% assign local_params_src = 'single_host_single_hostname' %}
-{% assign config_examples_src = 'single_host/single_hostname' %}
-{% include 'download_installer' %}
-
-If you are using multiple hostname configuration, substitute 'multiple_hostnames' where it says 'single_hostname' in the command above.
+The Ansible installer is only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you:
-h2(#localparams). Edit @local.params*@ files
-
-The cluster configuration parameters are included in two files: @local.params@ and @local.params.secrets@. These files can be found wherever you choose to initialize the installation files (e.g., @~/setup-arvados-xarv1@ in these examples).
+{% include 'branchname' %}
+
+~$ git clone --depth=1 --branch={{ branchname }} git://git.arvados.org/arvados.git ~/arvados
+
+
-The @local.params.secrets@ file is intended to store security-sensitive data such as passwords, private keys, tokens, etc. Depending on the security requirements of the cluster deployment, you may wish to store this file in a secrets store like AWS Secrets Manager or Jenkins credentials.
+h2(#install-ansible). Install Ansible
-h3. Parameters from @local.params@:
+{% include 'install_ansible' header_level: 'h3' %}
-# Set @CLUSTER@ to the 5-character cluster identifier (e.g "xarv1")
-# Set @DOMAIN@ to the base DNS domain of the environment, e.g. "example.com"
-# Single hostname only: set @IP_INT@ to the host's IP address.
-# Single hostname only: set @HOSTNAME_EXT@ to the hostname that users will use to connect.
-# Set @INITIAL_USER_EMAIL@ to your email address, as you will be the first admin user of the system.
+h2(#localparams). Set up cluster configuration
-h3. Parameters from @local.params.secrets@:
+Copy the example cluster configuration from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:
-# Set each @KEY@ / @TOKEN@ to a random string
- Here's an easy way to create five random tokens:
-for i in 1 2 3 4 5; do
- tr -dc A-Za-z0-9
+$ cp arvados/tools/ansible/examples/simple-cluster-config.yml ~/xurid-config.yml
-# Set @DATABASE_PASSWORD@ to a random string
- Important! If this contains any non-alphanumeric characters, in particular ampersand ('&'), it is necessary to add backslash quoting.
- For example, if the password is @Lq&MZDATABASE_PASSWORD="Lq\&MZ\
-# Set @DISPATCHER_SSH_PRIVKEY@ to @"no"@, as it isn't needed.
-{% include 'ssl_config_single' %}
+
-h2(#authentication). Configure your authentication provider (optional, recommended)
+Open the copy you created in your editor, and make changes following the instructions at the top of the file.
-By default, the installer will use the "Test" provider, which is a list of usernames and cleartext passwords stored in the Arvados config file. *This is low security configuration and you are strongly advised to configure one of the other "supported authentication methods":setup-login.html* .
+h2(#inventory). Set up cluster inventory
-h2(#further_customization). Further customization of the installation (optional)
+Copy the example cluster inventory from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:
-If you want to customize the behavior of Arvados, this may require editing the Saltstack pillars and states files found in @local_config_dir@. In particular, @local_config_dir/pillars/arvados.sls@ contains the template (in the @arvados.cluster@ section) used to produce the Arvados configuration file. Consult the "Configuration reference":config.html for a comprehensive list of configuration keys.
-
-Any extra Salt "state" files you add under @local_config_dir/states@ will be added to the Salt run and applied to the hosts.
-
-h2(#installation). Begin installation
+
+$ cp arvados/tools/ansible/examples/simple-cluster-inventory.yml ~/xurid-inventory.yml
+
+
-At this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation.
+Open the copy you created in your editor and make these changes noted in comments:
-Run this in the @~/arvados-setup-xarv1@ directory:
+* Under @hosts:@, change @hostname.example@ to the hostname or address of your cluster node.
+* Change @arvados_config_file@ to the path of the cluster configuration you created in the previous step.
+* Change @arvados_cluster_id@ to your chosen cluster ID.
-
-./installer.sh deploy
-
+You may make other changes noted in comments, but the changes listed above are required.
-h2(#ca_root_certificate). Install the CA root certificate (SSL_MODE=self-signed only)
+h2(#run-playbook). Run the installer playbook
-*If you are not using self-signed certificates (you selected SSL_MODE=lets-encrypt or SSL_MODE=bring-your-own), skip this section.*
+With your cluster configuration and inventory complete, you can use them to run the installer playbook:
-Arvados uses SSL to encrypt communications. The web interface uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
+
+$ cd arvados/tools/ansible
+arvados/tools/ansible $ ansible-playbook -Ki ~/xurid-inventory.yml install-arvados-cluster.yml
+
+
-For this reason, the installer has the option to create its own a root certificate to authorize Arvados services. The installer script will leave a copy of the generated CA's certificate (something like @xarv1.example.com-arvados-snakeoil-ca.crt@) in the script's directory so you can add it to your workstation.
+This will prompt you for a @BECOME password:@. Enter your sudo password on the cluster node. Ansible will use this to perform privileged system configuration. You will see it start to log tasks like:
-{% assign ca_cert_name = 'xarv1.example.com-arvados-snakeoil-ca.crt' %}
+
+PLAY [Bootstrap nodes] *********************************************************
-{% include 'install_ca_cert' %}
+TASK [Load Arvados configuration file] *****************************************
+ok: [hostname.example -> localhost]
-h2(#test-install). Confirm the cluster is working
+TASK [Load Arvados cluster configuration] **************************************
+ok: [hostname.example]
-When everything has finished, you can run the diagnostics. This requires the `arvados-client` package:
+TASK [ansible.builtin.include_role : distro_bootstrap] *************************
-
-apt-get install arvados-client
+TASK [distro_bootstrap : Get distribution IDs] *********************************
+changed: [hostname.example]
+
-Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@.
-
-If you are running the diagnostics on the same machine where you installed Arvados, you want @-internal-client@ .
-
-You are an "external client" if you running the diagnostics from your workstation outside of the private network.
+If all goes well, it will log finish with a @PLAY RECAP@ reporting @failed=0@, which indicates all tasks were successful:
-
-./installer.sh diagnostics (-internal-client|-external-client)
+
+PLAY RECAP *********************************************************************
+hostname.example : ok=161 changed=34 unreachable=0 failed=0 skipped=23 rescued=0 ignored=0
+
-h3(#debugging). Debugging issues
-
-The installer records log files for each deployment.
-
-Most service logs go to @/var/log/syslog@.
-
-The logs for Rails API server can be found in @/var/www/arvados-api/current/log/production.log@ on the appropriate instance.
-
-Workbench 2 is a client-side Javascript application. If you are having trouble loading Workbench 2, check the browser's developer console (this can be found in "Tools → Developer Tools").
-
-h3(#iterating). Iterating on config changes
-
-You can iterate on the config and maintain the cluster by making changes to @local.params@ and @local_config_dir@ and running @installer.sh deploy@ again.
+h3(#playbook-problems). Diagnosing problems with the playbook run
-h3(#common-problems). Common problems and solutions
+If the @PLAY RECAP@ indicates that a task failed, that will typically be logged with a message like this:
-h4. PG::UndefinedTable: ERROR: relation \"api_clients\" does not exist
-
-The arvados-api-server package sets up the database as a post-install script. If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.
-
-This will manifest as an error like this:
-
-
-#
+TASK [arvados_controller : Start and enable arvados-controller.service] ********
+fatal: [hostname.example]: FAILED! => {"changed": false, "msg": "Unable to restart service arvados-controller.service: Job for arvados-controller.service failed because the control process exited with error code.\nSee \"systemctl status arvados-controller.service\" and \"journalctl -xeu arvados-controller.service\" for details.\n"}
+
-If this happens, you need to
+The @TASK@ line gives you some context for what failed. The first part (@arvados_controller@ in this example) describes generally what Arvados service it was configuring. The rest of the line describes the specific step it was taking (starting @arvados-controller.service@ in this example). This context can suggest where you might check your configuration for problems or look on the cluster node for additional information. This example problem was caused by the Controller service in the cluster configuration trying to use an already-claimed port in one of the @InternalURLs@.
-# correct the database information
-# run @./installer.sh deploy@ to update the configuration
-# Log in to the server, then run this command to re-run the post-install script, which will set up the database:
-dpkg-reconfigure arvados-api-server
-# Re-run @./installer.sh deploy@ again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.
+h2(#test-install). Test the cluster
-h2(#initial_user). Initial user and login
+h3. Run diagnostics
-At this point you should be able to log into the Arvados cluster. The initial URL for the single hostname install will use the hostname or IP address you put in @HOSTNAME_EXT@:
+The @arvados-client diagnostics@ command can check all services on a cluster to identify problems with inconsistent configuration. *On your cluster node*, install and run it like this:
-https://${HOSTNAME_EXT}
+
+$ sudo apt install arvados-client
+$ sudo arvados-client sudo diagnostics -internal-client
+INFO 5: running health check (same as `arvados-server check`)
+INFO 10: getting discovery document from https://hostname.example:8443/discovery/v1/apis/arvados/v1/rest
+INFO 20: getting exported config from https://hostname.example:8443/arvados/v1/config
+[â¦]
+INFO 160: running a container
+INFO ... container request uuid = xurid-xvhdp-12345abcde67890
+INFO ... container request submitted, waiting up to 10m for container to run
+INFO 9990: deleting temporary collection
+INFO --- no errors ---
+
+
-For the multi-hostname install, it will be:
+h3. Access Workbench
-https://workbench.@${CLUSTER}.${DOMAIN}@
+The default Ansible inventory deploys Arvados with a self-signed certificate. If you deployed this way, you will have the best Workbench experience if you configure your browser to trust that certificate for it and supporting services. Follow the instructions for your specific browser below.
-If you did *not* "configure a different authentication provider":#authentication you will be using the "Test" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster. It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ the @local.params@ file.
+If you configured the inventory with a different certificate that is already trusted by your browser, you can skip these steps. You should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser.
-If you *did* configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges.
+h4. Trusting self-signed certificates in Chrome
-h2(#monitoring). Monitoring and Metrics
+{% comment %}
+Last updated for Chrome v138
+{% endcomment %}
+
+# Find the @arvados_tls.Default@ setting in your Ansible inventory.
+# If those options specify @remote: true@, copy the @cert@ path from your cluster host to the host where you're running the browser. Note you _only_ need the @cert@ file, not the @key@ file.
+# In the URL bar, enter @chrome://certificate-manager/@ and open that.
+# Under the "Custom" header, open "Installed by you."
+# Next to "Trusted Cerficates," press the "Import" button.
+# In the file picker dialog, open your copy of the @arvados_tls.Default.cert@ file.
-You can monitor the health and performance of the system using the admin dashboard.
+Now you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser. You can skip the next section unless you also want to set up Firefox.
-For the multi-hostname install, it will be:
+h4. Trusting self-signed certificates in Firefox
-https://grafana.@${CLUSTER}.${DOMAIN}@
+{% comment %}
+Last updated for Firefox 140
+{% endcomment %}
-To log in, use username "admin" and @${INITIAL_USER_PASSWORD}@ from @local.conf@.
+# Open the "Edit" menu and select "Settings."
+# Find and press the "View Certificatesâ¦" button to open the Certificate Manager.
+# Open the "Servers" tab.
+# Press the "Add Exceptionâ¦" button.
+# Enter the @ExternalURL@ in your cluster configuration for the @Workbench2@ service.
+# Press the "Get Certificate" button.
+# Press the "Confirm Security Exception" button.
+# Repeat the process from step 4 with your configured URLs for the @Controller@, @Keepproxy@, @WebDAV@, and @WebDAVDownload@ services.
-Once logged in, you will want to add the dashboards to the front page.
+Now you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser.
-# On the left icon bar, click on "Browse"
-# If the check box next to "Starred" is selected, click on it to de-select it
-# You should see a folder with "Arvados cluster overview", "Node exporter" and "Postgres exporter"
-# You can visit each dashboard and click on the star next to the title to "Mark as favorite"
-# They should now be linked on the front page.
+h2(#further_customization). Changing your configuration
-h2(#post_install). After the installation
+In the future, if you want to make changes to your Arvados cluster or Ansible inventory configuration, simply edit those files and "run the playbook again":#run-playbook. The playbook will deploy your changes to all the component services.
-As part of the operation of @installer.sh@, it automatically creates a @git@ repository with your configuration templates. You should retain this repository but be aware that it contains sensitive information (passwords and tokens used by the Arvados services).
+h2(#post_install). Upgrading your Arvados cluster
-As described in "Iterating on config changes":#iterating you may use @installer.sh deploy@ to re-run the Salt to deploy configuration changes and upgrades. However, be aware that the configuration templates created for you by @installer.sh@ are a snapshot which are not automatically kept up to date.
+When a new version of Arvados is released, the general process to upgrade the cluster is:
-When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls@. To specify the version to upgrade to, set the @VERSION@ parameter in @local.params@.
+# In your Arvados checkout directory, @git fetch@ and then @git switch@ to the branch or tag that corresponds to the release you want to use.
+# Consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if you need or want to make change to your cluster configuration file.
+# "Run the playbook again":#run-playbook with your cluster inventory.
See also "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
diff --git a/doc/install/salt-vagrant.html.textile.liquid b/doc/install/salt-vagrant.html.textile.liquid
deleted file mode 100644
index 0e400759c0..0000000000
--- a/doc/install/salt-vagrant.html.textile.liquid
+++ /dev/null
@@ -1,129 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Arvados in a VM with Vagrant
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-# "Vagrant":#vagrant
-# "Final steps":#final_steps
-## "DNS configuration":#dns_configuration
-## "Install root certificate":#ca_root_certificate
-# "Initial user and login":#initial_user
-# "Test the installed cluster running a simple workflow":#test_install
-
-h2(#vagrant). Vagrant
-
-{% include 'branchname' %}
-
-This is a package-based installation method. Start by cloning the @{{ branchname }}@ branch from "https://git.arvados.org/arvados.git":https://git.arvados.org/arvados.git . The Salt scripts are available in the @tools/salt-install@ directory.
-
-A @Vagrantfile@ is provided to install Arvados in a virtual machine on your computer using "Vagrant":https://www.vagrantup.com/.
-
-To get it running, install Vagrant in your computer, edit the variables at the top of the @provision.sh@ script as needed, and run
-
-
-vagrant up
-
-
-
-If you want to reconfigure the running box, you can just:
-
-1. edit the pillars to suit your needs
-2. run
-
-
-vagrant reload --provision
-
-
-
-h2(#final_steps). Final configuration steps
-
-h3(#dns_configuration). DNS configuration
-
-After the setup is done, you need to set up your DNS to be able to access the cluster.
-
-The simplest way to do this is to edit your @/etc/hosts@ file (as root):
-
-
-export CLUSTER="arva2"
-export DOMAIN="arv.local"
-export HOST_IP="127.0.0.2" # This is valid either if installing in your computer directly
- # or in a Vagrant VM. If you're installing it on a remote host
- # just change the IP to match that of the host.
-echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
-
-
-
-h3(#ca_root_certificate). Install root certificate
-
-Arvados uses SSL to encrypt communications. Its UI uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
-
-For this reason, the @arvados-formula@ has a helper state to create a root certificate to authorize Arvados services. The @provision.sh@ script will leave a copy of the generated CA's certificate (@arvados-snakeoil-ca.pem@) in the script's directory so you can add it to your workstation.
-
-Installing the root certificate into your web browser will prevent security errors when accessing Arvados services with your web browser.
-
-# Go to the certificate manager in your browser.
-#* In Chrome, this can be found under "Settings → Advanced → Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
-#* In Firefox, this can be found under "Preferences → Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
-# Select the "Authorities" tab, then press the "Import" button. Choose @arvados-snakeoil-ca.pem@
-
-The certificate will be added under the "Arvados Formula".
-
-To access your Arvados instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage.
-
-* On Debian/Ubuntu:
-
-
-cp arvados-root-cert.pem /usr/local/share/ca-certificates/
-/usr/sbin/update-ca-certificates
-
-
-
-* On Alma/CentOS/Red Hat/Rocky:
-
-
-cp arvados-root-cert.pem /etc/pki/ca-trust/source/anchors/
-/usr/bin/update-ca-trust
-
-
-
-h2(#initial_user). Initial user and login
-
-At this point you should be able to log into the Arvados cluster.
-
-If you didn't change the defaults, the initial URL will be:
-
-* https://workbench.arva2.arv.local:8443
-
-or, in general, the url format will be:
-
-* https://workbench.@.:8443@
-
-By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
-
-Assuming you didn't change the defaults, the initial credentials are:
-
-* User: 'admin'
-* Password: 'password'
-* Email: 'admin@arva2.arv.local'
-
-h2(#test_install). Test the installed cluster running a simple workflow
-
-As documented in the Single Host installation page, You can run a test workflow to verify the installation finished correctly. To do so, you can follow these steps:
-
-
-vagrant ssh
-
-
-and once in the instance:
-
-
-cd /tmp/cluster_tests
-./run-test.sh
-
-
diff --git a/doc/install/setup-login.html.textile.liquid b/doc/install/setup-login.html.textile.liquid
index a9991f642e..86f8068582 100644
--- a/doc/install/setup-login.html.textile.liquid
+++ b/doc/install/setup-login.html.textile.liquid
@@ -23,17 +23,17 @@ With this configuration, users will sign in with their Google accounts.
Use the Google Developers Console to create a set of client credentials.
# Select or create a project.
# Click *+ Enable APIs and Services*.
-#* Search for *People API* and click *Enable API*.
+#* Search for *Google People API* and click *Enable API*.
#* Navigate back to the main "APIs & Services" page.
# On the sidebar, click *OAuth consent screen*.
#* On consent screen settings, enter your identifying details.
-#* Under *Authorized domains* add your domain (@example.com@).
+#* Under *Branding* → *Authorized domains* add your domain (@example.com@).
#* Click *Save*.
-# On the sidebar, click *Credentials*, then click *Create credentials*→*OAuth client ID*
+# On the sidebar, click *Clients*, then click *+ Create client*, arriving at the *OAuth client ID* setup page.
# Under *Application type* select *Web application*.
-# Add the JavaScript origin: @https://ClusterID.example.com/@
-# Add the Redirect URI: @https://ClusterID.example.com/login@
-# Copy the values of *Client ID* and *Client secret* to the @Login.Google@ section of @config.yml@.
+# Add the JavaScript origin: @https://workbench2.ClusterID.example.com@. This should match the Web origin where you will host Workbench. Note that it can only include the schema, hostname, and port parts; the path, in particular a trailing @/@, is not allowed.
+# Add the Redirect URI: @https://ClusterID.example.com/login@. The host part of this URI should match the @ExternalURL@ of the Arvados controller service as specified in the configuration file @/etc/arvados/config.yml@, including the port if specified.
+# Copy the values of *Client ID* and *Client secret* to the @Login.Google@ section of @/etc/arvados/config.yml@.
{% codeblock as yaml %}
Login:
diff --git a/doc/install/workbench.html.textile.liquid b/doc/install/workbench.html.textile.liquid
index b3e5d6975c..0e23f5bfe1 100644
--- a/doc/install/workbench.html.textile.liquid
+++ b/doc/install/workbench.html.textile.liquid
@@ -66,13 +66,9 @@ You can have box pop up when users load Workbench to give information such as li
The banner appears when a user loads workbench and have not yet viewed the current banner text. Users can also view the banner after dismissing it by selecting the *Restore Banner* option from the *Notifications* menu.
-The banner text (HTML formatted) is loaded from the file @banner.html@ in the collection provided in @BannerUUID@.
+The banner text (HTML formatted) is loaded from the file @banner.html@ in the collection provided in @BannerUUID@. The banner does _not_ need to be wrapped by *html* or *body* tags (if present, they will be removed).
-The following HTML tags are allowed in banner.html: a, b, blockquote, br, code, del, dd, dl, dt, em, h1-h6, hr, i, img, kbd, li, ol, p, pre, s, del, section, span, strong, sub, sup, and ul.
-
-The following attributes are allowed: src, width, height, href, alt, title, and style.
-
-All styling must be made in-line with the style attribute. Disallowed tags and attributes will not render.
+{% include 'html_tags' %}
h3. Tooltips
diff --git a/doc/pysdk_pdoc.py b/doc/pysdk_pdoc.py
index b246a83fd6..be254b626d 100755
--- a/doc/pysdk_pdoc.py
+++ b/doc/pysdk_pdoc.py
@@ -32,6 +32,10 @@ else:
DEFAULT_ARGLIST = [
'--output-directory=sdk/python',
'../sdk/python/build/lib/arvados/',
+ # Because the module is prviate, pdoc does not build documentation for any
+ # of it. The exclusion below additionally prevents pdoc from hyperlinking
+ # references under arvados._internal that appear in method signatures, etc.
+ '!arvados._internal',
]
MD_EXTENSIONS = {
'admonitions': None,
diff --git a/doc/sdk/cli/index.html.textile.liquid b/doc/sdk/cli/index.html.textile.liquid
index ea10c830bc..827f1d0876 100644
--- a/doc/sdk/cli/index.html.textile.liquid
+++ b/doc/sdk/cli/index.html.textile.liquid
@@ -31,12 +31,12 @@ Available flags:
Use 'arv subcommand|resource --help' to get more information about a particular
command or resource.
-Available subcommands: copy, create, edit, keep, pipeline, run, tag, ws
+Available subcommands: copy, create, edit, keep, run, tag, ws
-Available resources: api_client_authorization, api_client, authorized_key,
-collection, user_agreement, group, job_task, link, log, keep_disk,
-pipeline_instance, node, repository, specimen, pipeline_template, user,
-virtual_machine, trait, human, job, keep_service
+Available resources: api_client_authorization, api_client,
+authorized_key, collection, container, container_request,
+user_agreement, group, keep_service, link, log, user, virtual_machine,
+workflow
Additional options:
-e, --version Print version and exit
diff --git a/doc/sdk/cli/install.html.textile.liquid b/doc/sdk/cli/install.html.textile.liquid
index e0d50b874b..8baa32c6ea 100644
--- a/doc/sdk/cli/install.html.textile.liquid
+++ b/doc/sdk/cli/install.html.textile.liquid
@@ -17,11 +17,13 @@ h2. Prerequisites
# "Install Ruby":../../install/ruby.html
# "Install the Python SDK":../python/sdk-python.html
-The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 10 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On supported versions of Debian and Ubuntu, run:
-
-$ apt-get install build-essential libcurl4-openssl-dev
-
+
+
+# apt install build-essential libcurl4-openssl-dev
+
+
h2. Install from RubyGems
diff --git a/doc/sdk/cli/subcommands.html.textile.liquid b/doc/sdk/cli/subcommands.html.textile.liquid
index dadb1d56c7..cc11a4a5d6 100644
--- a/doc/sdk/cli/subcommands.html.textile.liquid
+++ b/doc/sdk/cli/subcommands.html.textile.liquid
@@ -64,48 +64,69 @@ h3(#arv-copy). arv copy
$ arv copy --help
-usage: arv_copy.py [-h] [-v] [--progress] [--no-progress] [-f] --src
- SOURCE_ARVADOS --dst DESTINATION_ARVADOS [--recursive]
- [--no-recursive] [--dst-git-repo DST_GIT_REPO]
- [--project-uuid PROJECT_UUID] [--retries RETRIES]
- object_uuid
+usage: arv-copy [-h] [--version] [-v] [--progress] [--no-progress] [-f]
+ [--src SOURCE_ARVADOS] [--dst DESTINATION_ARVADOS]
+ [--recursive] [--no-recursive] [--project-uuid PROJECT_UUID]
+ [--replication N] [--storage-classes STORAGE_CLASSES]
+ [--varying-url-params VARYING_URL_PARAMS]
+ [--prefer-cached-downloads] [--retries RETRIES]
+ object_uuid
-Copy a pipeline instance, template or collection from one Arvados instance to
-another.
+Copy a workflow, collection or project from one Arvados instance to another.
+On success, the uuid of the copied object is printed to stdout.
positional arguments:
object_uuid The UUID of the object to be copied.
optional arguments:
-h, --help show this help message and exit
+ --version Print version and exit.
-v, --verbose Verbose output.
--progress Report progress on copying collections. (default)
--no-progress Do not report progress on copying collections.
-f, --force Perform copy even if the object appears to exist at
the remote destination.
- --src SOURCE_ARVADOS The name of the source Arvados instance (required) -
- points at an Arvados config file. May be either a
- pathname to a config file, or (for example) "foo" as
- shorthand for $HOME/.config/arvados/foo.conf.
+ --src SOURCE_ARVADOS Client configuration location for the source Arvados
+ cluster. May be either a configuration file path, or a
+ plain identifier like `foo` to search for a
+ configuration file `foo.conf` under a systemd or XDG
+ configuration directory. If not provided, will search
+ for a configuration file named after the cluster ID of
+ the source object UUID.
--dst DESTINATION_ARVADOS
- The name of the destination Arvados instance
- (required) - points at an Arvados config file. May be
- either a pathname to a config file, or (for example)
- "foo" as shorthand for $HOME/.config/arvados/foo.conf.
- --recursive Recursively copy any dependencies for this object.
- (default)
- --no-recursive Do not copy any dependencies. NOTE: if this option is
- given, the copied object will need to be updated
- manually in order to be functional.
- --dst-git-repo DST_GIT_REPO
- The name of the destination git repository. Required
- when copying a pipeline recursively.
+ Client configuration location for the destination
+ Arvados cluster. May be either a configuration file
+ path, or a plain identifier like `foo` to search for a
+ configuration file `foo.conf` under a systemd or XDG
+ configuration directory. If not provided, will use the
+ default client configuration from the environment or
+ `settings.conf`.
+ --recursive Recursively copy any dependencies for this object, and
+ subprojects. (default)
+ --no-recursive Do not copy any dependencies or subprojects.
--project-uuid PROJECT_UUID
The UUID of the project at the destination to which
- the pipeline should be copied.
+ the collection or workflow should be copied.
+ --replication N
+ Number of replicas per storage class for the copied
+ collections at the destination. If not provided (or if
+ provided with invalid value), use the destination's
+ default replication-level setting (if found), or the
+ fallback value 2.
+ --storage-classes STORAGE_CLASSES
+ Comma separated list of storage classes to be used
+ when saving data to the destinaton Arvados instance.
+ --varying-url-params VARYING_URL_PARAMS
+ A comma separated list of URL query parameters that
+ should be ignored when storing HTTP URLs in Keep.
+ --prefer-cached-downloads
+ If a HTTP URL is found in Keep, skip upstream URL
+ freshness check (will not notice if the upstream has
+ changed, but also not error if upstream is
+ unavailable).
--retries RETRIES Maximum number of times to retry server requests that
encounter temporary failures (e.g., server down).
- Default 3.
+ Default 10.
diff --git a/doc/sdk/fuse/options.html.textile.liquid b/doc/sdk/fuse/options.html.textile.liquid
index 1ebfa242a5..9cae2c5a5e 100644
--- a/doc/sdk/fuse/options.html.textile.liquid
+++ b/doc/sdk/fuse/options.html.textile.liquid
@@ -122,7 +122,7 @@ table(table table-bordered table-condensed).
|_. Option(s)|_. Description|
|@--disk-cache@|Cache data on the local filesystem (default)|
|@--ram-cache@|Cache data in memory|
-|@--disk-cache-dir DIRECTORY@|Filesystem cache location (default @~/.cache/arvados/keep@)|
+|@--disk-cache-dir DIRECTORY@|Set custom filesystem cache location|
|@--directory-cache BYTES@|Size of directory data cache in bytes (default 128 MiB)|
|@--file-cache BYTES@|Size of file data cache in bytes (default 8 GiB for filesystem cache, 256 MiB for memory cache)|
@@ -164,8 +164,21 @@ Documentation={{ site.baseurl }}/sdk/fuse/options.html
[Service]
Type=simple
-CacheDirectory=arvados/keep
-CacheDirectoryMode=0700
+
+# arv-mount will cache data under a `keep` subdirectory of CacheDirectory.
+# If this is a system service installed under /etc/systemd/system,
+# the cache will be at /var/cache/arvados/keep.
+# The default value of `arvados` lets arv-mount share the cache with other
+# tools.
+CacheDirectory=arvados
+
+# arv-mount will get Arvados API credentials from the `settings.conf` file
+# under ConfigurationDirectory.
+# If this is a system service installed under /etc/systemd/system,
+# the configuration will be read from /etc/arvados/settings.conf.
+# The default value of `arvados` lets arv-mount read configuration from the
+# same location as other tools.
+ConfigurationDirectory=arvados
# This unit makes the mount available as `Arvados` under the runtime directory root.
# If this is a system service installed under /etc/systemd/system,
@@ -175,19 +188,13 @@ CacheDirectoryMode=0700
# If you want to mount at another location on the filesystem, remove RuntimeDirectory
# and replace both instances of %t/Arvados with your desired path.
RuntimeDirectory=Arvados
+
# The arv-mount path must be the absolute path where you installed the command.
# If you installed from a distribution package, make this /usr/bin/arv-mount.
# If you installed from pip, replace ... with the path to your virtualenv.
# You can add options to select what gets mounted, access permissions,
# cache size, log level, etc.
-ExecStart=.../bin/arv-mount --foreground --disk-cache-dir %C/arvados/keep %t/Arvados
+ExecStart=.../bin/arv-mount --foreground %t/Arvados
ExecStop=/usr/bin/fusermount -u %t/Arvados
-
-# This unit assumes the running user has a ~/.config/arvados/settings.conf
-# with ARVADOS_API_HOST and ARVADOS_API_TOKEN defined.
-# If not, you can write those in a separate file
-# and set its path as EnvironmentFile.
-# Make sure that file is owned and only readable by the running user (mode 0600).
-#EnvironmentFile=...
diff --git a/doc/sdk/python/api-client.html.textile.liquid b/doc/sdk/python/api-client.html.textile.liquid
index dabd2d37f8..62275aa476 100644
--- a/doc/sdk/python/api-client.html.textile.liquid
+++ b/doc/sdk/python/api-client.html.textile.liquid
@@ -26,7 +26,14 @@ import arvados
arv_client = arvados.api('v1')
{% endcodeblock %}
-This will connect to the Arvados API server using the @ARVADOS_API_HOST@, @ARVADOS_API_TOKEN@, and @ARVADOS_API_HOST_INSECURE@ settings from environment variables or @~/.config/arvados/settings.conf@. You can alternatively pass these settings as arguments:
+When called this way, the SDK gets Arvados API credentials from the first source it finds in this list:
+
+# The environment variables @ARVADOS_API_HOST@, @ARVADOS_API_TOKEN@, and @ARVADOS_API_HOST_INSECURE@.
+# The @settings.conf@ file under the directories listed in systemd's @CONFIGURATION_DIRECTORY@ environment variable.
+# The @arvados/settings.conf@ file under the directory in the @XDG_CONFIG_HOME@ environment variable. This defaults to @~/.config/arvados/settings.conf@ if @XDG_CONFIG_HOME@ is not set.
+# The @arvados/settings.conf@ file under the directories in the @XDG_CONFIG_DIRS@ environment variable.
+
+You can alternatively pass these settings as arguments:
{% codeblock as python %}
import arvados
diff --git a/doc/sdk/ruby/index.html.textile.liquid b/doc/sdk/ruby/index.html.textile.liquid
index b3b97244ba..ea0fc78797 100644
--- a/doc/sdk/ruby/index.html.textile.liquid
+++ b/doc/sdk/ruby/index.html.textile.liquid
@@ -22,11 +22,13 @@ h3. Prerequisites
# "Install Ruby":../../install/ruby.html
-The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 10 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On supported versions of Debian and Ubuntu, run:
-
-$ apt-get install build-essential libcurl4-openssl-dev
-
+
+
+# apt install build-essential libcurl4-openssl-dev
+
+
h3. Install with RubyGems
diff --git a/doc/user/cwl/costanalyzer.html.textile.liquid b/doc/user/cwl/costanalyzer.html.textile.liquid
index 1d20c85f54..ed65834e50 100644
--- a/doc/user/cwl/costanalyzer.html.textile.liquid
+++ b/doc/user/cwl/costanalyzer.html.textile.liquid
@@ -13,86 +13,101 @@ SPDX-License-Identifier: CC-BY-SA-3.0
{% include 'notebox_begin' %}
-This is only applicable when Arvados runs in a cloud environment and @arvados-dispatch-cloud@ is used to dispatch @crunch@ jobs. The per node-hour price for each defined InstanceType most be supplied in "config.yml":{{site.baseurl}}/admin/config.html.
+Cost information is generally only available when Arvados runs in a cloud environment and @arvados-dispatch-cloud@ is used to dispatch containers. The per node-hour price for each defined InstanceType must be supplied in "config.yml":{{site.baseurl}}/admin/config.html.
{% include 'notebox_end' %}
-The @arvados-client@ program can be used to analyze the cost of a workflow. It can be installed from packages (@apt install arvados-client@ or @yum install arvados-client@). The @arvados-client costanalyzer@ command analyzes the cost accounting information associated with Arvados container requests.
+The @arv-cluster-activity@ program can be used to analyze cluster usage and cost over a time period.
+
+h2. Installation
+
+The @arv-cluster-activity@ tool can be installed from a distribution package or PyPI.
+
+h2. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/packages.html.
+
+{% assign arvados_component = 'python3-arvados-cluster-activity' %}
+
+{% include 'install_packages' %}
+
+h2. Option 2: Install with pip
+
+Run @pip install arvados-cluster-activity[prometheus]@ in an appropriate installation environment, such as a virtualenv.
+
+Note:
+
+Support for fetching Prometheus metrics depends on Pandas and NumPy. If these dependencies pose a problem, you can install the cluster activity tool without Prometheus support by omitting it from @pip install@.
+
+The Cluster Activity report uses the Arvados Python SDK, which uses @pycurl@, which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian-based distributions you can install them by running:
+
+
+# apt install git build-essential python3-dev libcurl4-openssl-dev libssl-dev
+
+
h2(#syntax). Syntax
-The @arvados-client costanalyzer@ tool has a number of command line arguments:
+The @arv-cluster-activity@ tool has a number of command line arguments:
-~$ arvados-client costanalyzer -h
-Usage:
- ./arvados-client costanalyzer [options ...] [UUID ...]
-
- This program analyzes the cost of Arvados container requests and calculates
- the total cost across all requests. At least one UUID or a timestamp range
- must be specified.
-
- When the '-output' option is specified, a set of CSV files with cost details
- will be written to the provided directory. Each file is a CSV report that lists
- all the containers used to fulfill the container request, together with the
- machine type and cost of each container.
-
- When supplied with the UUID of a container request, it will calculate the
- cost of that container request and all its children.
-
- When supplied with the UUID of a collection, it will see if there is a
- container_request UUID in the properties of the collection, and if so, it
- will calculate the cost of that container request and all its children.
-
- When supplied with a project UUID or when supplied with multiple container
- request or collection UUIDs, it will calculate the total cost for all
- supplied UUIDs.
-
- When supplied with a 'begin' and 'end' timestamp (format:
- 2006-01-02T15:04:05), it will calculate the cost for all top-level container
- requests whose containers finished during the specified interval.
-
- The total cost calculation takes container reuse into account: if a container
- was reused between several container requests, its cost will only be counted
- once.
-
- Caveats:
-
- - This program uses the cost data from config.yml at the time of the
- execution of the container, stored in the 'node.json' file in its log
- collection. If the cost data was not correctly configured at the time the
- container was executed, the output from this program will be incorrect.
-
- - If a container was run on a preemptible ("spot") instance, the cost data
- reported by this program may be wildly inaccurate, because it does not have
- access to the spot pricing in effect for the node then the container ran. The
- UUID report file that is generated when the '-output' option is specified has
- a column that indicates the preemptible state of the instance that ran the
- container.
-
- - This program does not take into account overhead costs like the time spent
- starting and stopping compute nodes that run containers, the cost of the
- permanent cloud nodes that provide the Arvados services, the cost of data
- stored in Arvados, etc.
-
- - When provided with a project UUID, subprojects will not be considered.
-
- In order to get the data for the UUIDs supplied, the ARVADOS_API_HOST and
- ARVADOS_API_TOKEN environment variables must be set.
-
- This program prints the total dollar amount from the aggregate cost
- accounting across all provided UUIDs on stdout.
-
-Options:
- -begin begin
- timestamp begin for date range operation (format: 2006-01-02T15:04:05)
- -cache
- create and use a local disk cache of Arvados objects (default true)
- -end end
- timestamp end for date range operation (format: 2006-01-02T15:04:05)
- -log-level level
- logging level (debug, info, ...) (default "info")
- -output directory
- output directory for the CSV reports
+~$ arv-cluster-activity --help
+usage: arv-cluster-activity [-h] [--start START] [--end END] [--days DAYS] [--cost-report-file COST_REPORT_FILE] [--include-workflow-steps] [--columns COLUMNS] [--exclude EXCLUDE]
+ [--html-report-file HTML_REPORT_FILE] [--version] [--cluster CLUSTER] [--prometheus-auth PROMETHEUS_AUTH]
+
+options:
+ -h, --help show this help message and exit
+ --start START Start date for the report in YYYY-MM-DD format (UTC) (or use --days)
+ --end END End date for the report in YYYY-MM-DD format (UTC), default "now"
+ --days DAYS Number of days before "end" to start the report (or use --start)
+ --cost-report-file COST_REPORT_FILE
+ Export cost report to specified CSV file
+ --include-workflow-steps
+ Include individual workflow steps (optional)
+ --columns COLUMNS Cost report columns (optional), must be comma separated with no spaces between column names. Available columns are:
+ Project, ProjectUUID, Workflow,
+ WorkflowUUID, Step, StepUUID, Sample, SampleUUID, User, UserUUID, Submitted, Started, Runtime, Cost
+ --exclude EXCLUDE Exclude workflows containing this substring (may be a regular expression)
+ --html-report-file HTML_REPORT_FILE
+ Export HTML report to specified file
+ --version Print version and exit.
+ --cluster CLUSTER Cluster to query for prometheus stats
+ --prometheus-auth PROMETHEUS_AUTH
+ Authorization file with prometheus info
+
+h2(#Credentials). Credentials
+
+To access the Arvados host, the tool will read default credentials from @~/.config/aravdos/settings.conf@ or use the standard @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables.
+
+The cluster report tool will also fetch metrics from Prometheus, if available. This can be passed in an environment file using @--prometheus-auth@, or set as environment variables.
+
+
+PROMETHEUS_HOST=https://your.prometheus.server.example.com
+PROMETHEUS_USER=admin
+PROMETHEUS_PASSWORD=password
+
+
+@PROMETHEUS_USER@ and @PROMETHEUS_PASSWORD@ will be passed in an @Authorization@ header using HTTP Basic authentication.
+
+Alternately, instead of @PROMETHEUS_USER@ and @PROMETHEUS_PASSWORD@ you can provide @PROMETHEUS_APIKEY@. This will be passed in as a Bearer token (@Authorization: Bearer @).
+
+h2(#example). Example usage
+
+
+~$ arv-cluster-activity \
+ --days 90
+ --include-workflow-steps \
+ --prometheus-auth prometheus.env \
+ --cost-report-file report.csv \
+ --html-report-file report.html
+INFO:root:Exporting workflow runs 0 - 5
+INFO:root:Getting workflow steps
+INFO:root:Got workflow steps 0 - 2
+INFO:root:Getting container hours time series
+INFO:root:Getting data usage time series
+
+
+
+!sample-cluster-activity-report.png!
diff --git a/doc/user/cwl/crunchstat-summary.html.textile.liquid b/doc/user/cwl/crunchstat-summary.html.textile.liquid
index a28acd56ec..b8d92ae473 100644
--- a/doc/user/cwl/crunchstat-summary.html.textile.liquid
+++ b/doc/user/cwl/crunchstat-summary.html.textile.liquid
@@ -13,7 +13,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
*Note:* Starting from Arvados 2.7.2, these reports are generated automatically by @arvados-cwl-runner@ and can be found as @usage_report.html@ in a container request's log collection.
-The @crunchstat-summary@ tool can be used to analyze workflow and container performance. It can be installed from packages (@apt install python3-crunchstat-summary@ or @yum install rh-python36-python-crunchstat-summary@), or in a Python virtualenv (@pip install crunchstat_summary@). @crunchstat-summary@ analyzes the crunchstat lines from the logs of a container or workflow and generates a report in text or html format.
+The @crunchstat-summary@ tool can be used to analyze workflow and container performance. It can be installed from packages (@apt install python3-crunchstat-summary@ or @dnf install python3-crunchstat-summary@), or in a Python virtualenv (@pip install crunchstat_summary@). @crunchstat-summary@ analyzes the crunchstat lines from the logs of a container or workflow and generates a report in text or html format.
h2(#syntax). Syntax
diff --git a/doc/user/cwl/cwl-extensions.html.textile.liquid b/doc/user/cwl/cwl-extensions.html.textile.liquid
index 3c8366721d..ecc1d94fc6 100644
--- a/doc/user/cwl/cwl-extensions.html.textile.liquid
+++ b/doc/user/cwl/cwl-extensions.html.textile.liquid
@@ -21,10 +21,31 @@ $namespaces:
For portability, most Arvados extensions should go into the @hints@ section of your CWL file. This makes it possible for your workflows to run other CWL runners that do not recognize Arvados hints. The difference between @hints@ and @requirements@ is that @hints@ are optional features that can be ignored by other runners and still produce the same output, whereas @requirements@ will fail the workflow if they cannot be fulfilled. For example, @arv:IntermediateOutput@ should go in @hints@ as it will have no effect on non-Arvados platforms, however if your workflow explicitly accesses the Arvados API and will fail without it, you should put @arv:APIRequirement@ in @requirements@.
+* "RunInSingleContainer":#RunInSingleContainer
+* "SeparateRunner":#SeparateRunner
+* "RuntimeConstraints":#RuntimeConstraints
+* "PartitionRequirement":#PartitionRequirement
+* "APIRequirement":#APIRequirement
+* "IntermediateOutput":#IntermediateOutput
+* "Secrets":#Secrets
+* "WorkflowRunnerResources":#WorkflowRunnerResources
+* "ClusterTarget":#ClusterTarget
+* "OutputStorageClass":#OutputStorageClass
+* "ProcessProperties":#ProcessProperties
+* "OutputCollectionProperties":#OutputCollectionProperties
+* "CUDARequirement":#CUDARequirement
+* "ROCmRequirement":#ROCmRequirement
+* "UsePreemptible":#UsePreemptible
+* "PreemptionBehavior":#PreemptionBehavior
+* "OutOfMemoryRetry":#OutOfMemoryRetry
+
{% codeblock as yaml %}
hints:
arv:RunInSingleContainer: {}
+ arv:SeparateRunner:
+ runnerProcessName: $(inputs.sample_id)
+
arv:RuntimeConstraints:
keep_cache: 123456
outputDirType: keep_output_dir
@@ -68,10 +89,21 @@ hints:
cudaComputeCapability: "9.0"
cudaDeviceCountMin: 1
cudaDeviceCountMax: 1
+ cudaVram: 8000
+
+ arv:ROCmRequirement:
+ rocmDriverVersion: "6.2"
+ rocmTarget: ["gfx1100", "gfx1103"]
+ rocmDeviceCountMin: 1
+ rocmDeviceCountMax: 1
+ rocmVram: 8000
arv:UsePreemptible:
usePreemptible: true
+ arv:PreemptionBehavior:
+ resubmitNonPreemptible: true
+
arv:OutOfMemoryRetry:
memoryRetryMultiplier: 2
memoryErrorRegex: "custom memory error"
@@ -81,7 +113,21 @@ h2(#RunInSingleContainer). arv:RunInSingleContainer
Apply this to a workflow step that runs a subworkflow. Indicates that all the steps of the subworkflow should run together in a single container and not be scheduled separately. If you have a sequence of short-running steps (less than 1-2 minutes each) this enables you to avoid scheduling and data transfer overhead by running all the steps together at once. To use this feature, @cwltool@ must be installed in the container image.
-h2. arv:RuntimeConstraints
+h2(#SeparateRunner). arv:SeparateRunner
+
+Apply this to a workflow step that runs a subworkflow. Indicates that Arvados should launch a new workflow runner to manage that specific subworkflow instance. If used on a scatter step, each scatter item is launched separately. Using this option has three benefits:
+
+* Better organization in the "Subprocesses" table of the main workflow, including the ability to provide a custom name for the step
+* When re-running a batch that has run before, an entire subworkflow may be reused as a unit, which is faster than determining reuse for each step.
+* Significantly faster submit rate compared to invoking @arvados-cwl-runner@ to launch individual workflow instances separately.
+
+The disadvantage of this option is that because it does launch an additional workflow runner, that workflow runner consumes more compute resources compared to having all the steps managed by a single runner.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|runnerProcessName|optional string|Name to assign to the subworkflow process. May be an expression with an input context of the post-scatter workflow step invocation.|
+
+h2(#RuntimeConstraints). arv:RuntimeConstraints
Set Arvados-specific runtime hints.
@@ -94,7 +140,7 @@ table(table table-bordered table-condensed).
*keep_output_dir*: Use writable Keep mount. Files are streamed to Keep as they are written. Does not consume local scratch space, but does consume RAM for output buffers (up to 192 MiB per file simultaneously open for writing.) Best suited to processes which produce sequential output of large files (non-sequential writes may produced fragmented file manifests). Supports regular files and directories, does not support special files such as symlinks, hard links, named pipes, named sockets, or device nodes.|
-h2. arv:PartitionRequirement
+h2(#PartitionRequirement). arv:PartitionRequirement
Select preferred compute partitions on which to run jobs.
@@ -110,7 +156,7 @@ Indicates that process wants to access to the Arvados API. Will be granted netw
Use @arv:APIRequirement@ in @hints@ to enable general (non-Arvados-specific) network access for a tool.
-h2. arv:IntermediateOutput
+h2(#IntermediateOutput). arv:IntermediateOutput
Specify desired handling of intermediate output collections.
@@ -119,7 +165,7 @@ table(table table-bordered table-condensed).
|outputTTL|int|If the value is greater than zero, consider intermediate output collections to be temporary and should be automatically trashed. Temporary collections will be trashed @outputTTL@ seconds after creation. A value of zero means intermediate output should be retained indefinitely (this is the default behavior).
Note: arvados-cwl-runner currently does not take workflow dependencies into account when setting the TTL on an intermediate output collection. If the TTL is too short, it is possible for a collection to be trashed before downstream steps that consume it are started. The recommended minimum value for TTL is the expected duration of the entire workflow.|
-h2. cwltool:Secrets
+h2(#Secrets). cwltool:Secrets
Indicate that one or more input parameters are "secret". Must be applied at the top level Workflow. Secret parameters are not stored in keep, are hidden from logs and API responses, and are wiped from the database after the workflow completes.
@@ -129,7 +175,7 @@ table(table table-bordered table-condensed).
|_. Field |_. Type |_. Description |
|secrets|array|Input parameters which are considered "secret". Must be strings.|
-h2. arv:WorkflowRunnerResources
+h2(#WorkflowRunnerResources). arv:WorkflowRunnerResources
Specify resource requirements for the workflow runner process (arvados-cwl-runner) that manages a workflow run. Must be applied to the top level workflow. Will also be set implicitly when using @--submit-runner-ram@ on the command line along with @--create-workflow@ or @--update-workflow@. Use this to adjust the runner's allocation if the workflow runner is getting "out of memory" exceptions or being killed by the out-of-memory (OOM) killer.
@@ -139,7 +185,7 @@ table(table table-bordered table-condensed).
|coresMin|int|Number of cores to reserve to the arvados-cwl-runner process. Default 1 core.|
|keep_cache|int|Size of collection metadata cache for the workflow runner, in MiB. Default 256 MiB. Will be added on to the RAM request when determining node size to request.|
-h2(#clustertarget). arv:ClusterTarget
+h2(#ClusterTarget). arv:ClusterTarget
Specify which Arvados cluster should execute a container or subworkflow, and the parent project for the container request.
@@ -183,6 +229,19 @@ table(table table-bordered table-condensed).
|cudaComputeCapability|string|Required. The minimum CUDA hardware capability (in 'X.Y' format) required by the application's PTX or C++ GPU code (will be JIT compiled for the available hardware).|
|cudaDeviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|
|cudaDeviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional. If not specified, same as @cudaDeviceCountMin@.|
+|cudaVram|integer|Requested amount of VRAM per device, in mebibytes (2**20)|
+
+h2(#ROCmRequirement). cwltool:ROCmRequirement
+
+Request support for AMD ROCm GPU acceleration in the container. Assumes that the ROCm runtime (SDK) is installed in the container, and the host will inject the AMD devices (@/dev/kfd@ and @/dev/dri/renderD*@) container.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|rocmDriverVersion|string|Required. The ROCm SDK version corresponding to the minimum driver version supported by the container (generally, the SDK version 'X.Y' the application was compiled against).|
+|rocmTarget|array of string|Required. A list of one or more hardware targets (e.g. gfx1100) corresponding to the GPU architectures supported by the container. Use @rocminfo@ to determine what hardware targets you have. See also "Accelerator and GPU hardware specifications":https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html (use the column "LLVM target name") and "LLVM AMDGPU backend documentation":https://llvm.org/docs/AMDGPUUsage.html .|
+|rocmDeviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|
+|rocmDeviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional. If not specified, same as @rocmDeviceCountMin@.|
+|rocmVram|integer|Requested amount of VRAM per device, in mebibytes (2**20)|
h2(#UsePreemptible). arv:UsePreemptible
@@ -192,6 +251,17 @@ table(table table-bordered table-condensed).
|_. Field |_. Type |_. Description |
|usePreemptible|boolean|Required, true to opt-in to using preemptible instances, false to opt-out.|
+h2(#PreemptionBehavior). arv:PreemptionBehavior
+
+This option determines the behavior when @arvados-cwl-runner@ detects that a workflow step was cancelled because the preemptible (spot market) instance it was running on was reclaimed by the cloud provider. If 'true', instead of the retry behavior described above in 'UsePreemptible', on the first failure the workflow step will be re-submitted with preemption disabled, so it will be scheduled to run on non-preemptible (on-demand) instances.
+
+When preemptible instances are reclaimed, this is a signal that the cloud provider has restricted capacity for low priority preemptible instance. As a result, the default behavior of turning around and rescheduling or launching on another preemptible instance has higher risk of being preempted a second or third time, spending more time and money but making no progress. This option provides an alternate fallback behavior, by attempting to run the step on a preemptible instance the first time (saving money), but re-running the step as non-preemptible if the first attempt was preempted (ensuring continued progress).
+
+This behavior applied to each step individually. If a step is preempted, then successfully re-run as non-preemptible, it does not affect the behavior of the next step, which will first be launched as preemptible, and so forth.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|resubmitNonPreemptible|boolean|Required. If true, then when a workflow step is cancelled because the instance was preempted, re-submit the step with preemption disabled.|
h2(#OutOfMemoryRetry). arv:OutOfMemoryRetry
diff --git a/doc/user/cwl/cwl-run-options.html.textile.liquid b/doc/user/cwl/cwl-run-options.html.textile.liquid
index 27db90fbd3..9f8d927f37 100644
--- a/doc/user/cwl/cwl-run-options.html.textile.liquid
+++ b/doc/user/cwl/cwl-run-options.html.textile.liquid
@@ -64,11 +64,17 @@ table(table table-bordered table-condensed).
|==--priority== PRIORITY|Workflow priority (range 1..1000, higher has precedence over lower)|
|==--thread-count== THREAD_COUNT|Number of threads to use for job submit and output collection.|
|==--http-timeout== HTTP_TIMEOUT|API request timeout in seconds. Default is 300 seconds (5 minutes).|
-|==--defer-downloads==|When submitting a workflow, defer downloading HTTP URLs to workflow launch instead of downloading to Keep before submit.|
+|==--defer-downloads==|When submitting a workflow, defer downloading HTTP or S3 URLs to launch of the workflow runner container instead of downloading to Keep before submit.|
+|==--enable-aws-credential-capture==|When submitting a workflow that requires AWS credentials, capture them from the local environment for use by the workflow runner container.|
+|==--disable-aws-credential-capture==|Do not capture AWS credentials from the local environment, must use credentials registered with Arvados.|
+|==--s3-public-bucket==|Downloads are from a public bucket, so no AWS credentials are required.|
+|==--use-credential== SELECTED_CREDENTIAL|Name or uuid of a credential registered with Arvados that will be used to fetch external resources.|
|==--varying-url-params== VARYING_URL_PARAMS|A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.|
|==--prefer-cached-downloads==|If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).|
|==--enable-preemptible==|Use preemptible instances. Control individual steps with arv:UsePreemptible hint.|
|==--disable-preemptible==|Don't use preemptible instances.|
+|==--enable-resubmit-non-preemptible==|If a workflow step fails due to the instance it is running on being preempted, re-submit the container with the `preemptible` flag disabled. Control individual steps with arv:PreemptionBehavior hint.|
+|==--disable-resubmit-non-preemptible==|Don't resumbit when a preemptible instance is reclaimed.|
|==--copy-deps==| Copy dependencies into the destination project.|
|==--no-copy-deps==| Leave dependencies where they are.|
|==--skip-schemas==| Skip loading of schemas|
@@ -166,6 +172,6 @@ Within the workflow, you can control whether individual steps should be preempti
If a workflow requests preemptible instances with "arv:UsePreemptible":cwl-extensions.html#UsePreemptible , but you _do not_ want to use preemptible instances, you can override it for a specific run with the @arvados-cwl-runner --disable-preemptible@ option.
-h3(#gpu). Use CUDA GPU instances
+h3(#gpu). Use GPU instances
-See "cwltool:CUDARequirement":cwl-extensions.html#CUDARequirement .
+See "cwltool:CUDARequirement":cwl-extensions.html#CUDARequirement (for Nvidia) and "arv:ROCmRequirement":cwl-extensions.html#ROCmRequirement (for AMD).
diff --git a/doc/user/cwl/cwl-style.html.textile.liquid b/doc/user/cwl/cwl-style.html.textile.liquid
index 911c9ba5a5..880d6b404c 100644
--- a/doc/user/cwl/cwl-style.html.textile.liquid
+++ b/doc/user/cwl/cwl-style.html.textile.liquid
@@ -9,19 +9,40 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
+*Performance*
+# "Does your application support NVIDIA GPU acceleration?":#nvidiagpu
+# "Trying to reduce costs?":#preemptible
+# "You have a sequence of short-running steps":#RunInSingleContainer
+# "Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@":#avoidExcessRequirements
+# "Prefer text substitution to Javascript":#preferTextSubst
+# "Use @ExpressionTool@ to efficiently rearrange input files":#expressionTool
+# "Limit RAM requests to what you really need":#limitRAM
+# "Avoid scattering by step by step":#avoidScatterByStep
+
+*Portability*
+# "Always provide @DockerRequirement@":#DockerRequirement
+# "Build a reusable library of components":#reusecode
+# "Supply scripts as input parameters":#scriptsasinput
+# "Getting the temporary and output directories":#tempdirs
+# "Specifying @ResourceRequirement@":#ResourceRequirement
+
+*Data import*
+# "Importing data into Keep from HTTP":#httpimport
+# "Importing data into Keep from S3":#s3import
+
h2(#performance). Performance
To get the best perfomance from your workflows, be aware of the following Arvados features, behaviors, and best practices.
-h3. Does your application support NVIDIA GPU acceleration?
+h3(#nvidiagpu). Does your application support NVIDIA GPU acceleration?
Use "cwltool:CUDARequirement":cwl-extensions.html#CUDARequirement to request nodes with GPUs.
-h3. Trying to reduce costs?
+h3(#preemptible). Trying to reduce costs?
Try "using preemptible (spot) instances":cwl-run-options.html#preemptible .
-h3. You have a sequence of short-running steps
+h3(#RunInSingleContainer). You have a sequence of short-running steps
If you have a sequence of short-running steps (less than 1-2 minutes each), use the Arvados extension "arv:RunInSingleContainer":cwl-extensions.html#RunInSingleContainer to avoid scheduling and data transfer overhead by running all the steps together in the same container on the same node. To use this feature, @cwltool@ must be installed in the container image. Example:
@@ -50,15 +71,15 @@ steps:
run: subworkflow-with-short-steps.cwl
{% endcodeblock %}
-h3. Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@
+h3(#avoidExcessRequirements). Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@
Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@ unless you specifically need them. Don't include them "just in case" because they change the default behavior and may add extra overhead.
-h3. Prefer text substitution to Javascript
+h3(#preferTextSubst). Prefer text substitution to Javascript
When combining a parameter value with a string, such as adding a filename extension, write @$(inputs.file.basename).ext@ instead of @$(inputs.file.basename + 'ext')@. The first form is evaluated as a simple text substitution, the second form (using the @+@ operator) is evaluated as an arbitrary Javascript expression and requires that you declare @InlineJavascriptRequirement@.
-h3. Use @ExpressionTool@ to efficiently rearrange input files
+h3(#expressionTool). Use @ExpressionTool@ to efficiently rearrange input files
Use @ExpressionTool@ to efficiently rearrange input files between steps of a Workflow. For example, the following expression accepts a directory containing files paired by @_R1_@ and @_R2_@ and produces an array of Directories containing each pair.
@@ -94,11 +115,11 @@ expression: |
}
{% endcodeblock %}
-h3. Limit RAM requests to what you really need
+h3(#limitRAM). Limit RAM requests to what you really need
Available compute nodes types vary over time and across different cloud providers, so it is important to limit the RAM requirement to what the program actually needs. However, if you need to target a specific compute node type, see this discussion on "calculating RAM request and choosing instance type for containers.":{{site.baseurl}}/api/execution.html#RAM
-h3. Avoid scattering by step by step
+h3(#avoidScatterByStep). Avoid scattering by step by step
Instead of a scatter step that feeds into another scatter step, prefer to scatter over a subworkflow.
@@ -166,15 +187,15 @@ h2. Portability
To write workflows that are easy to modify and portable across CWL runners (in the event you need to share your workflow with others), there are several best practices to follow:
-h3. Always provide @DockerRequirement@
+h3(#DockerRequirement). Always provide @DockerRequirement@
Workflows should always provide @DockerRequirement@ in the @hints@ or @requirements@ section.
-h3. Build a reusable library of components
+h3(#reusecode). Build a reusable library of components
Share tool wrappers and subworkflows between projects. Make use of and contribute to "community maintained workflows and tools":https://github.com/common-workflow-library and tool registries such as "Dockstore":http://dockstore.org .
-h3. Supply scripts as input parameters
+h3(#scriptsasinput). Supply scripts as input parameters
CommandLineTools wrapping custom scripts should represent the script as an input parameter with the script file as a default value. Use @secondaryFiles@ for scripts that consist of multiple files. For example:
@@ -204,13 +225,13 @@ outputs:
glob: "*.fastq"
{% endcodeblock %}
-h3. Getting the temporary and output directories
+h3(#tempdirs). Getting the temporary and output directories
You can get the designated temporary directory using @$(runtime.tmpdir)@ in your CWL file, or from the @$TMPDIR@ environment variable in your script.
Similarly, you can get the designated output directory using @$(runtime.outdir)@, or from the @HOME@ environment variable in your script.
-h3. Specifying @ResourceRequirement@
+h3(#ResourceRequirement). Specifying @ResourceRequirement@
Avoid specifying resources in the @requirements@ section of a @CommandLineTool@, put it in the @hints@ section instead. This enables you to override the tool resource hint with a workflow step level requirement:
@@ -235,7 +256,9 @@ steps:
tmpdirMin: 90000
{% endcodeblock %}
-h3. Importing data into Keep
+h2. Data import
+
+h3(#httpimport). Importing data into Keep from HTTP
You can use HTTP URLs as File input parameters and @arvados-cwl-runner@ will download them to Keep for you:
@@ -250,14 +273,16 @@ fastq2:
Files are downloaded and stored in Keep collections with HTTP header information stored in metadata. If a file was previously downloaded, @arvados-cwl-runner@ uses HTTP caching rules to decide if a file should be re-downloaded or not.
-The default behavior is to transfer the files on the client, prior to submitting the workflow run. This guarantees the data is available when the workflow is submitted. However, if data transfer is time consuming and you are submitting multiple workflow runs in a row, or the node submitting the workflow has limited bandwidth, you can use the @--defer-download@ option to have the data transfer performed by workflow runner process on a compute node, after the workflow is submitted.
+The default behavior is to transfer the files on the client, prior to submitting the workflow run. This guarantees the data is available when the workflow is submitted. However, you can use the @--defer-download@ option to have the data transfer performed by workflow runner process on a compute node, after the workflow is submitted. There are a couple reasons you may want to do this:
+# You are submitting from a workstation, but expect the file will be downloaded faster by the compute node
+# You are submitting multiple workflow runs in a row and want to parallelize downloads
@arvados-cwl-runner@ provides two additional options to control caching behavior.
* @--varying-url-params@ will ignore the listed URL query parameters from any HTTP URLs when checking if a URL has already been downloaded to Keep.
* @--prefer-cached-downloads@ will search Keep for the previously downloaded URL and use that if found, without checking the upstream resource. This means changes in the upstream resource won't be detected, but it also means the workflow will not fail if the upstream resource becomes inaccessible.
-One use of this is to import files from "AWS S3 signed URLs":https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html
+One use of this is to import files from "AWS S3 signed URLs":https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html (but note that you can also import from S3 natively, see below).
Here is an example usage. The use of @--varying-url-params=AWSAccessKeyId,Signature,Expires@ is especially relevant, this removes these parameters from the cached URL, which means that if a new signed URL for the same object is generated later, it can be found in the cache.
@@ -267,3 +292,34 @@ arvados-cwl-runner --defer-download \
--prefer-cached-downloads \
workflow.cwl params.yml
{% endcodeblock %}
+
+h3(#s3import). Importing data into Keep from S3
+
+You can use S3 URLs as File input parameters and @arvados-cwl-runner@ will download them to Keep for you:
+
+{% codeblock as yaml %}
+fastq1:
+ class: File
+ location: s3://examplebucket/genomes/sampleA_1.fastq
+fastq2:
+ class: File
+ location: s3://examplebucket/genomes/sampleA_2.fastq
+{% endcodeblock %}
+
+Files are downloaded and stored in Keep collections. If the bucket is versioned, it will make note of the object version and last modified time. If a file was previously downloaded, @arvados-cwl-runner@ will use the object version and/or last modified time to decide if a file should be re-downloaded or not. The @--prefer-cached-downloads@ option will search Keep for the previously downloaded URL and use that if found, without checking the upstream resource. This means changes in the upstream resource won't be detected, but it also means the workflow will not fail if the upstream resource becomes inaccessible.
+
+The default behavior is to transfer the files on the client, prior to submitting the workflow run. This guarantees the data is available when the workflow is submitted. However, you can use the @--defer-download@ option to have the data transfer performed by workflow runner process on a compute node, after the workflow is submitted. There are several reasons you may want to do this:
+# You are submitting from a workstation, but expect the file will be downloaded faster by the compute node
+# You are submitting multiple workflow runs in a row and want to parallelize downloads
+# You don't have credentials to access the S3 bucket locally but do have read access to AWS credentials registered with Arvados
+
+When using the @--defer-download@ option, @arvados-cwl-runner@ use the following process to choose which AWS credentials to use to access the S3 bucket.
+
+# Arvados will first check to see if you have access to a "credential":{{site.baseurl}}/api/methods/credentials.html with @credential_class: aws_access_key@ where the s3 bucket is present in @scopes@ (the bucket name must be formatted as @s3://bucketname@).
+# Otherwise, Arvados will look for a "credential":{{site.baseurl}}/api/methods/credentials.html with @credential_class: aws_access_key@ where @scopes@ is empty.
+
+In each case, if more than one "credential":{{site.baseurl}}/api/methods/credentials.html matches, it will throw an error, and the user must provide @--use-credential@ on the command line with the name or uuid of the credential to specify precisely which one to use.
+
+If no AWS credentials are registered with Arvados, but you have AWS credentials available locally (for example, in @~/.aws/credentials@), you can use @--enable-aws-credential-capture@. This instructs @arvados-cwl-runner@ to capture the active AWS credentials from your environment and pass them to the workflow runner container as a secret file. In this case, these credentials are only stored in Arvados for the duration of the workflow run and are discarded when the workflow finishes. Arvados uses the @boto3@ library to access S3, which "has a list of locations where it will search for credentials.":https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
+
+If the source S3 bucket is a public bucket, you can download from it without AWS credentials by providing @--s3-public-bucket@ on the command line.
\ No newline at end of file
diff --git a/doc/user/cwl/federated-workflows.html.textile.liquid b/doc/user/cwl/federated-workflows.html.textile.liquid
index a93aac56b1..9116a0d46e 100644
--- a/doc/user/cwl/federated-workflows.html.textile.liquid
+++ b/doc/user/cwl/federated-workflows.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-To support running analysis on geographically dispersed data (avoiding expensive data transfers by sending the computation to the data), and "hybrid cloud" configurations where an on-premise cluster can expand its capabilities by delegating work to a cloud-hosted cluster, Arvados supports federated workflows. In a federated workflow, different steps of a workflow may execute on different clusters. Arvados manages data transfer and delegation of credentials, so that all that is required is adding "arv:ClusterTarget":cwl-extensions.html#clustertarget hints to your existing workflow.
+To support running analysis on geographically dispersed data (avoiding expensive data transfers by sending the computation to the data), and "hybrid cloud" configurations where an on-premise cluster can expand its capabilities by delegating work to a cloud-hosted cluster, Arvados supports federated workflows. In a federated workflow, different steps of a workflow may execute on different clusters. Arvados manages data transfer and delegation of credentials, so that all that is required is adding "arv:ClusterTarget":cwl-extensions.html#ClusterTarget hints to your existing workflow.
!(full-width)federated-workflow.svg!
diff --git a/doc/user/cwl/sample-cluster-activity-report.png b/doc/user/cwl/sample-cluster-activity-report.png
new file mode 100644
index 0000000000..ae1ea1fd18
Binary files /dev/null and b/doc/user/cwl/sample-cluster-activity-report.png differ
diff --git a/doc/user/debugging/container-shell-access.html.textile.liquid b/doc/user/debugging/container-shell-access.html.textile.liquid
index 9c24980049..f76db0593d 100644
--- a/doc/user/debugging/container-shell-access.html.textile.liquid
+++ b/doc/user/debugging/container-shell-access.html.textile.liquid
@@ -17,7 +17,7 @@ To use this feature, your Arvados installation must be configured to allow conta
{% include 'notebox_end' %}
-The @arvados-client@ program can be used to connect to a container in a running workflow. It can be installed from packages (@apt install arvados-client@ or @yum install arvados-client@). The @arvados-client shell@ command provides an ssh connection into a running container.
+The @arvados-client@ program can be used to connect to a container in a running workflow. It can be installed from packages (@apt install arvados-client@ or @dnf install arvados-client@). The @arvados-client shell@ command provides an ssh connection into a running container.
h2(#syntax). Syntax
diff --git a/doc/user/getting_started/check-environment.html.textile.liquid b/doc/user/getting_started/check-environment.html.textile.liquid
index 1097e4e9d8..3302a8f095 100644
--- a/doc/user/getting_started/check-environment.html.textile.liquid
+++ b/doc/user/getting_started/check-environment.html.textile.liquid
@@ -16,21 +16,19 @@ Check that you are able to access the Arvados API server using @arv user current
$ arv user current
{
- "href":"https://zzzzz.arvadosapi.com/arvados/v1/users/zzzzz-xioed-9z2p3pn12yqdaem",
"kind":"arvados#user",
"etag":"8u0xwb9f3otb2xx9hto4wyo03",
"uuid":"zzzzz-tpzed-92d3kxnimy3d4e8",
- "owner_uuid":"zzzzz-tpqed-23iddeohxta2r59",
+ "owner_uuid":"zzzzz-tpzed-000000000000000",
"created_at":"2013-12-02T17:05:47Z",
- "modified_by_client_uuid":"zzzzz-xxfg8-owxa2oa2s33jyej",
- "modified_by_user_uuid":"zzzzz-tpqed-23iddeohxta2r59",
+ "modified_by_user_uuid":"zzzzz-tpzed-23iddeohxta2r59",
"modified_at":"2013-12-02T17:07:08Z",
"updated_at":"2013-12-05T19:51:08Z",
"email":"you@example.com",
"full_name":"Example User",
"first_name":"Example",
"last_name":"User",
- "identity_url":"https://www.google.com/accounts/o8/id?id=AItOawnhlZr-pQ_Ic2f2W22XaO02oL3avJ322k1",
+ "identity_url":"",
"is_active": true,
"is_admin": false,
"prefs":{}
diff --git a/doc/user/getting_started/setup-cli.html.textile.liquid b/doc/user/getting_started/setup-cli.html.textile.liquid
index 18f675d04e..1a816fba82 100644
--- a/doc/user/getting_started/setup-cli.html.textile.liquid
+++ b/doc/user/getting_started/setup-cli.html.textile.liquid
@@ -25,21 +25,33 @@ h2. Option 2: Installing Arvados tools on your own system
This option gives you more flexibility in your work, but takes more time to set up.
-h3. Configure Arvados package repositories for your system
+h3. Install client tools on Red Hat, AlmaLinux, and Rocky Linux
-Doing this isn't strictly required for most tools, but will streamline the installation process. Follow the "Arvados package repository instructions":{{site.baseurl}}/install/packages.html.
+{% assign modules_to_enable = "python39:3.9" %}
+{% assign packages_to_install = "arvados-client python3-arvados-python-client python3-arvados-cwl-runner python3-arvados-fuse python3-crunchstat-summary" %}
+{% include 'setup_redhat_repo' %}
-h3. Install individual tool packages
+Proceed to build and install the Arvados CLI tools:
-Here are the client packages you can install on your system. You can skip any you don't want or need except for the Python SDK (most other tools require it).
+
+# dnf module enable ruby:3.1
+# dnf install ruby ruby-devel gcc-c++ make redhat-rpm-config glibc-devel glibc-headers curl-devel openssl-devel zlib-devel
+# gem install arvados-cli
+
+
-* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html: This provides an Arvados API client in Python, as well as low-level command line tools.
-* "Command-line SDK":{{site.baseurl}}/sdk/cli/install.html: This provides the high-level @arv@ command and user interface to the Arvados API.
-* "FUSE Driver":{{site.baseurl}}/sdk/fuse/install.html: This provides the @arv-mount@ command and FUSE driver that lets you access Keep using standard Linux filesystem tools.
-* "CWL Runner":{{site.baseurl}}/sdk/python/arvados-cwl-runner.html: This provides the @arvados-cwl-runner@ command to register and run workflows in Crunch.
-* "crunchstat-summary":{{site.baseurl}}/user/cwl/crunchstat-summary.html: This tool provides performance reports for Crunch containers.
-* "arvados-client":{{site.baseurl}}/user/debugging/container-shell-access.html: This tool provides subcommands for inspecting Crunch containers, both interactively while they're running and after they've finished.
+h3. Install client tools on Debian and Ubuntu
-h2. After Installation: Check your environment
+{% include 'setup_debian_repo' %}
-Once you are logged in or have command line tools installed, move on to "getting an API token":{{site.baseurl}}/user/reference/api-tokens.html and "checking your environment":{{site.baseurl}}/user/getting_started/check-environment.html.
+Proceed to build and install the Arvados CLI tools:
+
+
+# apt install ruby ruby-dev gcc g++ make libc-dev libcurl4-openssl-dev zlib1g-dev
+# gem install arvados-cli
+
+
+
+h3. Proceed to configuration
+
+Once you have the command line tools installed, proceed to "getting an API token":{{site.baseurl}}/user/reference/api-tokens.html.
diff --git a/doc/user/reference/api-tokens.html.textile.liquid b/doc/user/reference/api-tokens.html.textile.liquid
index 4c35530e60..f079940237 100644
--- a/doc/user/reference/api-tokens.html.textile.liquid
+++ b/doc/user/reference/api-tokens.html.textile.liquid
@@ -33,13 +33,18 @@ h2. settings.conf
Arvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment following the instructions above, you can use these commands to create an Arvados configuration file:
-$ echo "ARVADOS_API_HOST=$ARVADOS_API_HOST" > ~/.config/arvados/settings.conf
-$ echo "ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.config/arvados/settings.conf
-
+$ cat >~/.config/arvados/settings.conf <<EOF
+ARVADOS_API_HOST=$ARVADOS_API_HOST
+ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN
+EOF
+
-* The output-redirection operator @>@ in the first command will cause the target file @~/.config/arvados/settings.conf@ to be created anew, wiping out the content of any existing file at that path.
-* The @>>@ operator in the second command appends to the target file.
+{% include 'notebox_begin' %}
+This will overwrite the file @~/.config/arvados/settings.conf@.
+
+Arvados tools written in Python (most notably the @arv keep@ commands, @arv copy@, and @arv-mount@) search for configuration files following the XDG Base Directory Specification. This is uncommon, but if you have customized the @XDG_CONFIG_HOME@ environment variable, you may need to add @$HOME/.config@ to the @XDG_CONFIG_DIRS@ envirnoment variable to have all the tools find the same configuration.
+{% include 'notebox_end' %}
h2. .bashrc
diff --git a/doc/user/topics/arv-copy.html.textile.liquid b/doc/user/topics/arv-copy.html.textile.liquid
index a05620d62d..fce5bbd78b 100644
--- a/doc/user/topics/arv-copy.html.textile.liquid
+++ b/doc/user/topics/arv-copy.html.textile.liquid
@@ -21,36 +21,42 @@ For projects, @arv-copy@ will copy all the collections workflow definitions owne
For workflow definitions, @arv-copy@ will recursively go through the workflow and copy all associated dependencies (input collections and Docker images).
-For example, let's copy from the Arvados playground, also known as *pirca*, to *dstcl*. The names *pirca* and *dstcl* are interchangable with any cluster id. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *zzzzz*-4zz18-tci4vn4fa95w0zx, the cluster name is *zzzzz* .
-
-In order to communicate with both clusters, you must create custom configuration files for each cluster. The "Getting an API token":{{site.baseurl}}/user/reference/api-tokens.html page describes how to get a token and create a configuration file. However, instead of "settings.conf" in @~/.config/arvados@ you need two configuration files, one for each cluster, with filenames in the format of *ClusterID.conf*.
-
-In this example, navigate to the *Current token* page on each of *pirca* and *dstcl* to get the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
-
-The config file consists of two lines, one for ARVADOS_API_HOST and one for ARVADOS_API_TOKEN:
-
-
-ARVADOS_API_HOST=zzzzz.arvadosapi.com
-ARVADOS_API_TOKEN=v2/zzzzz-gj3su-xxxxxxxxxxxxxxx/123456789abcdefghijkl
-
-
-Copy your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ into the config files as shown below in the shell account from which you are executing the commands. In our example, you need two files, @~/.config/arvados/pirca.conf@ and @~/.config/arvados/dstcl.conf@.
-
-Now you're ready to copy between *pirca* and *dstcl*!
+For example, let's copy from the Arvados Playground, also known as *pirca*, to *dstcl*. The names *pirca* and *dstcl* are interchangable with any cluster ID. You can find the cluster ID from the prefix of the UUID of the object you want to copy. For example, in zzzzz-4zz18-tci4vn4fa95w0zx
, the cluster name is *zzzzz*.
+
+In order to communicate with both clusters, you must create custom configuration files for each cluster. The "Getting an API token":{{site.baseurl}}/user/reference/api-tokens.html page describes how to get a token and create a configuration file. However, instead of creating the default @~/.config/arvados/settings.conf@ you need two configuration files, one for each cluster, with filenames in the format of ClusterID.conf
. For this example, follow these steps:
+
+{% include 'notebox_begin' %}
+@arv-copy@ searches for configuration files following the XDG Base Directory Specification. This is uncommon, but if you have customized the @XDG_CONFIG_HOME@ environment variable, save both configuration files under @$XDG_CONFIG_HOME/arvados/@ instead of the default @~/.config/arvados/@ shown below.
+{% include 'notebox_end' %}
+
+# Open the "Arvados Playground Workbench":https://playground.arvados.org.
+# On the system where you'll run @arv-copy@, start a new file named @~/.config/arvados/pirca.conf@ in your editor.
+# In Workbench, open the user menu in the upper right, and select "Get API token."
+# In the Workbench "Get API Token" dialog, under the "API Host" header, copy the value to your clipboard using the button.
+# In your editor, write the text @ARVADOS_API_HOST=@, then paste the "API Host" value you copied in the previous step, and start a new line.
+# In the Workbench "Get API Token" dialog, under the "API Token" header, copy the value to your clipboard using the button.
+# In your editor, write the text @ARVADOS_API_TOKEN=@, then paste the "API Token" value you copied in the previous step, and start a new line.
+# Review your work. In your editor, @pirca.conf@ should look like this, with a different value for @ARVADOS_API_TOKEN@:
+ ARVADOS_API_HOST=pirca.arvadosapi.com
+ARVADOS_API_TOKEN=v2/jutro-gj3su-12345abcde67890/abcdefghijklmnopqrstuvwxyz1234567890
+
If it looks right, save and close the file.
+# Open Workbench for your destination cluster *dstcl*.
+# On the system where you'll run @arv-copy@, start a new file named ~/.config/arvados/dstcl.conf
in your editor. Replace *@dstcl@* in the filename with the actual cluster ID of your destination cluster.
+# Repeat steps 3-8 to create a settings file with credentials for *dsctl*.
h3. How to copy a collection
-First, determine the uuid or portable data hash of the collection you want to copy from the source cluster. The uuid can be found in the collection display page in the collection summary area (top left box), or from the URL bar (the part after @collections/...@)
+First, determine the UUID or portable data hash of the collection you want to copy from the source cluster. The UUID can be copied with the "Copy UUID" toolbar button, found on the collection details panel, or from the URL bar (the part after @collections/...@)
-Now copy the collection from *pirca* to *dstcl*. We will use the uuid @jutro-4zz18-tv416l321i4r01e@ as an example. You can find this collection on playground.arvados.org.
+Now copy the collection from *pirca* to *dstcl*. We will use the UUID @pirca-4zz18-xa0i7qjide8td5d@ as an example. You can find this collection on playground.arvados.org. Because the UUID starts with @pirca@, it will infer that the source cluster is @pirca@.
-~$ arv-copy --src pirca --dst dstcl jutro-4zz18-tv416l321i4r01e
-jutro-4zz18-tv416l321i4r01e: 6.1M / 6.1M 100.0%
+~$ arv-copy --dst dstcl pirca-4zz18-xa0i7qjide8td5d
+pirca-4zz18-xa0i7qjide8td5d: 6.1M / 6.1M 100.0%
arvados.arv-copy[1234] INFO: Success: created copy with uuid dstcl-4zz18-xxxxxxxxxxxxxxx
-You can also copy by content address:
+You can also copy by content address. In this case, the content address does not include a specific cluster id, and you need to include the source cluster with @--src@.
~$ arv-copy --src pirca --dst dstcl 2463fa9efeb75e099685528b3b9071e0+438
@@ -59,11 +65,11 @@ arvados.arv-copy[1234] INFO: Success: created copy with uuid dstcl-4zz18-xxxxxxx
-The output of arv-copy displays the uuid of the collection generated in the destination cluster. By default, the output is placed in your home project in the destination cluster. If you want to place your collection in an existing project, you can specify the project you want it to be in using the tag @--project-uuid@ followed by the project uuid.
+The output of arv-copy displays the UUID of the collection generated in the destination cluster. By default, the output is placed in your home project in the destination cluster. If you want to place your collection in an existing project, you can specify the project you want it to be in using the tag @--project-uuid@ followed by the project UUID.
-For example, this will copy the collection to project @dstcl-j7d0g-a894213ukjhal12@ in the destination cluster.
+For example, this will copy the collection to project @dstcl-j7d0g-a894213ukjhal12@ in the destination cluster. It will infer the destination cluster from the project UUID. Since it starts with @dstcl@, it will infer that the destination cluster is @dstcl@.
- ~$ arv-copy --src pirca --dst dstcl --project-uuid dstcl-j7d0g-a894213ukjhal12 jutro-4zz18-tv416l321i4r01e
+ ~$ arv-copy --project-uuid dstcl-j7d0g-a894213ukjhal12 pirca-4zz18-xa0i7qjide8td5d
@@ -73,9 +79,9 @@ h3. How to copy a workflow
Copying workflows requires @arvados-cwl-runner@ to be available in your @$PATH@.
-We will use the uuid @jutro-7fd4e-mkmmq53m1ze6apx@ as an example workflow.
+We will use the UUID @jutro-7fd4e-mkmmq53m1ze6apx@ as an example workflow.
-Arv-copy will infer the source cluster is @jutro@ from the object uuid, and destination cluster is @pirca@ from @--project-uuid@.
+Arv-copy will infer the source cluster is @jutro@ from the object UUID, and destination cluster is @pirca@ from @--project-uuid@.
~$ arv-copy --project-uuid pirca-j7d0g-ecak8knpefz8ere jutro-7fd4e-mkmmq53m1ze6apx
@@ -87,15 +93,15 @@ jutro-4zz18-vvvqlops0a0kpdl: 94M / 94M 100.0%
-The name, description, and workflow definition from the original workflow will be used for the destination copy. In addition, any *collections* and *docker images* referenced in the source workflow definition will also be copied to the destination.
+The name, description, and workflow definition from the original workflow will be used for the destination copy. In addition, any *collections* and *Docker images* referenced in the source workflow definition will also be copied to the destination.
If you would like to copy the object without dependencies, you can use the @--no-recursive@ flag.
h3. How to copy a project
-We will use the uuid @jutro-j7d0g-xj19djofle3aryq@ as an example project.
+We will use the UUID @jutro-j7d0g-xj19djofle3aryq@ as an example project.
-Arv-copy will infer the source cluster is @jutro@ from the source project uuid, and destination cluster is @pirca@ from @--project-uuid@.
+Arv-copy will infer the source cluster is @jutro@ from the source project UUID, and destination cluster is @pirca@ from @--project-uuid@.
~$ arv-copy --project-uuid pirca-j7d0g-lr8sq3tx3ovn68k jutro-j7d0g-xj19djofle3aryq
diff --git a/doc/user/tutorials/add-new-repository.html.textile.liquid b/doc/user/tutorials/add-new-repository.html.textile.liquid
deleted file mode 100644
index 6046e7d14b..0000000000
--- a/doc/user/tutorials/add-new-repository.html.textile.liquid
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: default
-navsection: userguide
-title: Adding a new Arvados git repository
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-Arvados supports managing git repositories. You can access these repositories using your Arvados credentials and share them with other Arvados users.
-
-{% include 'tutorial_expectations' %}
-
-h2. Setting up Git
-
-Before you start using Git and arvados repositories, you should do some basic configuration (you only need to do this the first time):
-
-
-~$ git config --global user.name "Your Name"
-~$ git config --global user.email $USER@example.com
-
-
-h2. Add "tutorial" repository
-
-On the Arvados Workbench, click on the dropdown menu icon (Account Management) in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*.
-
-In the *Repositories* page, you will see the + NEW REPOSITORY button.
-
-!{width: 100%;}{{ site.baseurl }}/images/repositories-panel.png!
-
-Click the + NEW REPOSITORY button to open the popup to add a new Arvados repository. You will see a text box where you can enter the name of the repository. Enter *tutorial* in this text box and click on *Create*.
-
-{% include 'notebox_begin' %}
-The name you enter here must begin with a letter and can only contain alphanumeric characters.
-{% include 'notebox_end' %}
-
-!{width: 100%;}{{ site.baseurl }}/images/add-new-repository.png!
-
-This will create a new repository with the name @$USER/tutorial@. It can be accessed using the URL https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
or git@git.{{ site.arvados_api_host }}:$USER/tutorial.git
-
-Back in the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the name column with these URLs.
-
-!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/added-new-repository.png!
-
-You are now ready to use this *tutorial* repository to run your crunch scripts.
diff --git a/doc/user/tutorials/git-arvados-guide.html.textile.liquid b/doc/user/tutorials/git-arvados-guide.html.textile.liquid
deleted file mode 100644
index a4ac2a5795..0000000000
--- a/doc/user/tutorials/git-arvados-guide.html.textile.liquid
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: default
-navsection: userguide
-title: Working with an Arvados git repository
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-This tutorial describes how to work with an Arvados-managed git repository. Working with an Arvados git repository is very similar to working with other public git repositories.
-
-{% include 'tutorial_expectations' %}
-
-{% include 'tutorial_git_repo_expectations' %}
-
-h2. Cloning a git repository
-
-Before you start using Git, you should do some basic configuration (you only need to do this the first time):
-
-
-~$ git config --global user.name "Your Name"
-~$ git config --global user.email $USER@example.com
-
-
-On the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the Account Management menu, and click on the menu item *Repositories*. In the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the *name* column. Next to *name* is the column *URL*. Copy the *URL* value associated with your repository. This should look like https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
. Alternatively, you can use git@git.{{ site.arvados_api_host }}:$USER/tutorial.git
-
-Next, on the Arvados virtual machine, clone your Git repository:
-
-
-~$ cd $HOME # (or wherever you want to install)
-~$ git clone https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
-Cloning into 'tutorial'...
-
-
-This will create a Git repository in the directory called @tutorial@ in your home directory. Say yes when prompted to continue with connection.
-Ignore any warning that you are cloning an empty repository.
-
-*Note:* If you are prompted for username and password when you try to git clone using this command, you may first need to update your git configuration. Execute the following commands to update your git configuration.
-
-
-
-~$ git config 'credential.https://git.{{ site.arvados_api_host }}/.username' none
-~$ git config 'credential.https://git.{{ site.arvados_api_host }}/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'
-
-
-
-h2. Creating a git branch in an Arvados repository
-
-Create a git branch named *tutorial_branch* in the *tutorial* Arvados git repository.
-
-
-~$ cd tutorial
-~/tutorial$ git checkout -b tutorial_branch
-
-
-
-h2. Adding scripts to an Arvados repository
-
-A git repository is a good place to store the CWL workflows that you run on Arvados.
-
-First, create a simple CWL CommandLineTool:
-
-notextile. ~/tutorials$ nano hello.cwl
-
- {% code tutorial_hello_cwl as yaml %}
-
-Next, add the file to the git repository. This tells @git@ that the file should be included on the next commit.
-
-notextile. ~/tutorial$ git add hello.cwl
-
-Next, commit your changes. All staged changes are recorded into the local git repository:
-
-
-~/tutorial$ git commit -m "my first script"
-
-
-
-Finally, upload your changes to the remote repository:
-
-
-~/tutorial/crunch_scripts$ git push origin tutorial_branch
-
-
-
-The same steps can be used to add any of your custom bash, R, or python scripts to an Arvados repository.
diff --git a/doc/user/tutorials/tutorial-projects.html.textile.liquid b/doc/user/tutorials/tutorial-projects.html.textile.liquid
index b4dc9edea3..9e658bf2ca 100644
--- a/doc/user/tutorials/tutorial-projects.html.textile.liquid
+++ b/doc/user/tutorials/tutorial-projects.html.textile.liquid
@@ -39,3 +39,13 @@ The *General access* drop-down menu controls the default sharing setting, with t
* *Public*: This means the list of *People with access* will include _Anonymous users_, even if they are not users of the current cluster. You can further set their access level in the *Authorization* level.
* *All users*: This means sharing with other users who are logged in on the current cluster.
* *Shared*: When you choose to share with specific people or groups, *General access* will be set to *Shared*. From this state, you can further specify the default sharing settings for *Public* and *All users*.
+
+h2(#descriptions). Descriptions and metadata
+
+You can add descriptions to projects, collections, workflows and workflow runs when you create them, or later using the "Edit" dialog. Descriptions are included when performing full-text search on records.
+
+Descriptions are formatted using HTML. Workbench provides a rich text editor for editing HTML descriptions.
+
+{% include 'html_tags' %}
+
+You can also add key:value metadata to projects, collections, and workflow runs when you create them, or later by using the "Edit" dialog. Properties can be queried in the advanced search. For more information, see "metadata properties":{{site.baseurl}}/api/properties.html .
\ No newline at end of file
diff --git a/docker/migrate-docker19/Dockerfile b/docker/migrate-docker19/Dockerfile
deleted file mode 100644
index 23bb63547b..0000000000
--- a/docker/migrate-docker19/Dockerfile
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-FROM debian:8
-
-ENV DEBIAN_FRONTEND noninteractive
-
-RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7 && \
- gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
- apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D || \
- apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-
-VOLUME /var/lib/docker
-
-RUN mkdir -p /etc/apt/sources.list.d && \
- echo deb http://apt.arvados.org/jessie jessie main > /etc/apt/sources.list.d/apt.arvados.org.list && \
- apt-get clean && \
- apt-get update && \
- apt-get install -yq --no-install-recommends -o Acquire::Retries=6 \
- git curl python-arvados-python-client apt-transport-https ca-certificates && \
- apt-get clean
-
-RUN echo deb https://apt.dockerproject.org/repo debian-jessie main > /etc/apt/sources.list.d/docker.list && \
- apt-get update && \
- apt-get install -yq --no-install-recommends -o Acquire::Retries=6 \
- docker-engine=1.9.1-0~jessie && \
- apt-get clean
-
-RUN mkdir /root/pkgs && \
- cd /root/pkgs && \
- curl -L -O https://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.13.1-0~debian-jessie_amd64.deb && \
- curl -L -O http://httpredir.debian.org/debian/pool/main/libt/libtool/libltdl7_2.4.2-1.11+b1_amd64.deb
-
-ADD migrate.sh dnd.sh /root/
diff --git a/docker/migrate-docker19/dnd.sh b/docker/migrate-docker19/dnd.sh
deleted file mode 100755
index 703a124280..0000000000
--- a/docker/migrate-docker19/dnd.sh
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-# Taken from https://github.com/jpetazzo/dind
-
-exec 2>&1
-
-# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
-dmsetup mknodes
-
-: {LOG:=stdio}
-
-# First, make sure that cgroups are mounted correctly.
-CGROUP=/sys/fs/cgroup
-[ -d $CGROUP ] || mkdir $CGROUP
-
-if mountpoint -q $CGROUP ; then
- true
-else
- mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP
-fi
-
-if ! mountpoint -q $CGROUP ; then
- echo "Could not find or mount cgroups. Tried /sys/fs/cgroup and /cgroup. Did you use --privileged?"
- exit 1
-fi
-
-if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
-then
- mount -t securityfs none /sys/kernel/security || {
- echo "Could not mount /sys/kernel/security."
- echo "AppArmor detection and --privileged mode might break."
- }
-fi
-
-# Mount the cgroup hierarchies exactly as they are in the parent system.
-for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
-do
- [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
- mountpoint -q $CGROUP/$SUBSYS ||
- mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
-
- # The two following sections address a bug which manifests itself
- # by a cryptic "lxc-start: no ns_cgroup option specified" when
- # trying to start containers withina container.
- # The bug seems to appear when the cgroup hierarchies are not
- # mounted on the exact same directories in the host, and in the
- # container.
-
- # Named, control-less cgroups are mounted with "-o name=foo"
- # (and appear as such under /proc//cgroup) but are usually
- # mounted on a directory named "foo" (without the "name=" prefix).
- # Systemd and OpenRC (and possibly others) both create such a
- # cgroup. To avoid the aforementioned bug, we symlink "foo" to
- # "name=foo". This shouldn't have any adverse effect.
- #echo $SUBSYS | grep -q ^name= && {
- # NAME=$(echo $SUBSYS | sed s/^name=//)
- # ln -s $SUBSYS $CGROUP/$NAME
- #}
-
- # Likewise, on at least one system, it has been reported that
- # systemd would mount the CPU and CPU accounting controllers
- # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
- # but on a directory called "cpu,cpuacct" (note the inversion
- # in the order of the groups). This tries to work around it.
- [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
-done
-
-# Note: as I write those lines, the LXC userland tools cannot setup
-# a "sub-container" properly if the "devices" cgroup is not in its
-# own hierarchy. Let's detect this and issue a warning.
-grep -q :devices: /proc/1/cgroup ||
- echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
-grep -qw devices /proc/1/cgroup ||
- echo "WARNING: it looks like the 'devices' cgroup is not mounted."
-
-# Now, close extraneous file descriptors.
-pushd /proc/self/fd >/dev/null
-for FD in *
-do
- case "$FD" in
- # Keep stdin/stdout/stderr
- [012])
- ;;
- # Nuke everything else
- *)
- eval exec "$FD>&-"
- ;;
- esac
-done
-popd >/dev/null
-
-
-# If a pidfile is still around (for example after a container restart),
-# delete it so that docker can start.
-rm -rf /var/run/docker.pid
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-
-exec docker daemon --storage-driver=$1 $DOCKER_DAEMON_ARGS
diff --git a/docker/migrate-docker19/migrate.sh b/docker/migrate-docker19/migrate.sh
deleted file mode 100755
index 76fe823394..0000000000
--- a/docker/migrate-docker19/migrate.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-# This script is called by arv-migrate-docker19 to perform the actual migration
-# of a single image. This works by running Docker-in-Docker (dnd.sh) to
-# download the image using Docker 1.9 and then upgrading to Docker 1.13 and
-# uploading the converted image.
-
-# When using bash in pid 1 and using "trap on EXIT"
-# it will sometimes go into an 100% CPU infinite loop.
-#
-# Using workaround from here:
-#
-# https://github.com/docker/docker/issues/4854
-if [ "$$" = 1 ]; then
- $0 "$@"
- exit $?
-fi
-
-# -x show script
-# -e exit on error
-# -o pipefail use exit code from 1st failure in pipeline, not last
-set -x -e -o pipefail
-
-image_tar_keepref=$1
-image_id=$2
-image_repo=$3
-image_tag=$4
-project_uuid=$5
-graph_driver=$6
-
-if [[ "$image_repo" = "" ]] ; then
- image_repo=none
- image_tag=latest
-fi
-
-# Print free space in /var/lib/docker
-function freespace() {
- df -B1 /var/lib/docker | tail -n1 | sed 's/ */ /g' | cut -d' ' -f4
-}
-
-# Run docker-in-docker script and then wait for it to come up
-function start_docker {
- /root/dnd.sh $graph_driver &
- for i in $(seq 1 10) ; do
- if docker version >/dev/null 2>/dev/null ; then
- return
- fi
- sleep 1
- done
- false
-}
-
-# Kill docker from pid then wait for it to be down
-function kill_docker {
- if test -f /var/run/docker.pid ; then
- kill $(cat /var/run/docker.pid)
- fi
- for i in $(seq 1 10) ; do
- if ! docker version >/dev/null 2>/dev/null ; then
- return
- fi
- sleep 1
- done
- false
-}
-
-# Ensure that we clean up docker graph and/or lingering cache files on exit
-function cleanup {
- kill_docker
- rm -rf /var/lib/docker/*
- rm -rf /root/.cache/arvados/docker/*
- echo "Available space after cleanup is $(freespace)"
-}
-
-trap cleanup EXIT
-
-start_docker
-
-echo "Initial available space is $(freespace)"
-
-arv-get $image_tar_keepref | docker load
-
-
-docker tag $image_id $image_repo:$image_tag
-
-docker images -a
-
-kill_docker
-
-echo "Available space after image load is $(freespace)"
-
-cd /root/pkgs
-dpkg -i libltdl7_2.4.2-1.11+b1_amd64.deb docker-engine_1.13.1-0~debian-jessie_amd64.deb
-
-echo "Available space after image upgrade is $(freespace)"
-
-start_docker
-
-docker images -a
-
-if [[ "$image_repo" = "none" ]] ; then
- image_repo=$(docker images -a --no-trunc | sed 's/ */ /g' | grep ^none | cut -d' ' -f3)
- image_tag=""
-fi
-
-UUID=$(arv-keepdocker --force-image-format --project-uuid=$project_uuid $image_repo $image_tag)
-
-echo "Available space after arv-keepdocker is $(freespace)"
-
-echo "Migrated uuid is $UUID"
diff --git a/go.mod b/go.mod
index aef54ac1c1..36c2fef358 100644
--- a/go.mod
+++ b/go.mod
@@ -1,119 +1,140 @@
module git.arvados.org/arvados.git
-go 1.17
+go 1.23.0
+
+toolchain go1.24.1
require (
+ dario.cat/mergo v1.0.0
github.com/AdRoll/goamz v0.0.0-20170825154802-2731d20f46f4
- github.com/Azure/azure-sdk-for-go v45.1.0+incompatible
- github.com/Azure/go-autorest/autorest v0.11.22
- github.com/Azure/go-autorest/autorest/azure/auth v0.5.9
+ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
+ github.com/Azure/go-autorest/autorest v0.11.29
+ github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
github.com/Azure/go-autorest/autorest/to v0.4.0
- github.com/arvados/cgofuse v1.2.0-arvados1
- github.com/aws/aws-sdk-go v1.44.174
- github.com/aws/aws-sdk-go-v2 v0.23.0
- github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092
- github.com/coreos/go-oidc/v3 v3.5.0
- github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
- github.com/creack/pty v1.1.18
- github.com/docker/docker v24.0.9+incompatible
- github.com/dustin/go-humanize v1.0.0
- github.com/fsnotify/fsnotify v1.4.9
+ github.com/arvados/cgofuse v1.2.0
+ github.com/aws/aws-sdk-go v1.44.256
+ github.com/aws/aws-sdk-go-v2 v1.27.0
+ github.com/aws/aws-sdk-go-v2/config v1.27.16
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.16
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21
+ github.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3
+ github.com/aws/smithy-go v1.20.2
+ github.com/bmatcuk/doublestar/v4 v4.6.1
+ github.com/coreos/go-oidc/v3 v3.10.0
+ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
+ github.com/creack/pty v1.1.21
+ github.com/docker/docker v26.1.5+incompatible
+ github.com/dustin/go-humanize v1.0.1
+ github.com/fsnotify/fsnotify v1.7.0
github.com/ghodss/yaml v1.0.0
github.com/go-ldap/ldap v3.0.3+incompatible
github.com/gogo/protobuf v1.3.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/gorilla/mux v1.8.0
- github.com/hashicorp/go-retryablehttp v0.7.2
- github.com/hashicorp/golang-lru v0.5.1
- github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87
- github.com/imdario/mergo v0.3.12
+ github.com/gotd/contrib v0.20.0
+ github.com/hashicorp/go-retryablehttp v0.7.7
+ github.com/hashicorp/golang-lru v1.0.2
+ github.com/hashicorp/yamux v0.1.1
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff
- github.com/jmoiron/sqlx v1.2.0
- github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc
+ github.com/jmoiron/sqlx v1.4.0
+ github.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d
github.com/julienschmidt/httprouter v1.3.0
- github.com/lib/pq v1.10.2
- github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9
- github.com/prometheus/client_golang v1.14.0
- github.com/prometheus/client_model v0.3.0
- github.com/prometheus/common v0.39.0
- github.com/sirupsen/logrus v1.8.1
- golang.org/x/crypto v0.22.0
- golang.org/x/net v0.21.0
- golang.org/x/oauth2 v0.11.0
- golang.org/x/sys v0.19.0
- google.golang.org/api v0.126.0
+ github.com/lib/pq v1.10.9
+ github.com/msteinert/pam v1.2.0
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
+ github.com/prometheus/client_golang v1.20.5
+ github.com/prometheus/client_model v0.6.1
+ github.com/prometheus/common v0.55.0
+ github.com/sirupsen/logrus v1.9.3
+ golang.org/x/crypto v0.39.0
+ golang.org/x/net v0.41.0
+ golang.org/x/oauth2 v0.21.0
+ golang.org/x/sys v0.33.0
+ google.golang.org/api v0.181.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
- gopkg.in/square/go-jose.v2 v2.5.1
- gopkg.in/src-d/go-billy.v4 v4.0.1
- gopkg.in/src-d/go-git.v4 v4.0.0
+ gopkg.in/go-jose/go-jose.v2 v2.6.3
rsc.io/getopt v0.0.0-20170811000552-20be20937449
)
require (
- cloud.google.com/go/compute v1.23.0 // indirect
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/auth v0.4.2 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
- github.com/Azure/go-autorest/autorest/adal v0.9.17 // indirect
- github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
+ github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
- github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
+ github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/Microsoft/go-winio v0.5.2 // indirect
- github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
- github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bgentry/speakeasy v0.1.0 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/containerd/log v0.1.0 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
github.com/dnaeon/go-vcr v1.2.0 // indirect
- github.com/docker/distribution v2.8.2+incompatible // indirect
- github.com/docker/go-connections v0.3.0 // indirect
- github.com/docker/go-units v0.4.0 // indirect
- github.com/gliderlabs/ssh v0.2.2 // indirect
- github.com/go-asn1-ber/asn1-ber v1.4.1 // indirect
- github.com/go-jose/go-jose/v3 v3.0.3 // indirect
- github.com/golang-jwt/jwt/v4 v4.1.0 // indirect
+ github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.5 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/gofrs/uuid v4.4.0+incompatible // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/s2a-go v0.1.4 // indirect
- github.com/google/uuid v1.3.1 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
- github.com/googleapis/gax-go/v2 v2.11.0 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.4 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
- github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 // indirect
- github.com/kr/pretty v0.2.1 // indirect
- github.com/kr/text v0.1.0 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.0.2 // indirect
- github.com/pelletier/go-buffruneio v0.2.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
- github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76 // indirect
- github.com/sergi/go-diff v1.0.0 // indirect
- github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect
- github.com/src-d/gcfg v1.3.0 // indirect
- github.com/xanzy/ssh-agent v0.1.0 // indirect
+ github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect
go.opencensus.io v0.24.0 // indirect
- golang.org/x/text v0.14.0 // indirect
- golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
- golang.org/x/tools v0.6.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
- google.golang.org/grpc v1.59.0 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
+ go.opentelemetry.io/otel v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect
+ go.opentelemetry.io/otel/metric v1.27.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.27.0 // indirect
+ go.opentelemetry.io/otel/trace v1.27.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ golang.org/x/tools v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240528155852-a33235495d66 // indirect
+ google.golang.org/grpc v1.64.1 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
- gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 // indirect
- gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gotest.tools/v3 v3.0.3 // indirect
)
@@ -121,3 +142,6 @@ require (
replace github.com/AdRoll/goamz => github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef
replace gopkg.in/yaml.v2 => github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b
+
+// Avoid v1.8.1, see https://dev.arvados.org/issues/21705#note-16
+replace github.com/gorilla/mux => github.com/gorilla/mux v1.8.0
diff --git a/go.sum b/go.sum
index c5f4d837d3..599ea5369e 100644
--- a/go.sum
+++ b/go.sum
@@ -1,296 +1,337 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
-cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A=
-github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg=
+cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=
+cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws=
-github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY=
-github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.9 h1:Y2CgdzitFDsdMwYMzf9LIZWrrTFysqbRc7b94XVVJ78=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 h1:iuooz5cZL6VRcO7DVSFYxRcouqn6bFVE/e77Wts50Zk=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.4/go.mod h1:yAQ2b6eP/CmLPnmLvxtT1ALIY3OR1oFcCqVBi8vHiTc=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
+github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
+github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
+github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
-github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
+github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
-github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/arvados/cgofuse v1.2.0-arvados1 h1:4Q4vRJ4hbTCcI4gGEaa6hqwj3rqlUuzeFQkfoEA2HqE=
-github.com/arvados/cgofuse v1.2.0-arvados1/go.mod h1:79WFV98hrkRHK9XPhh2IGGOwpFSjocsWubgxAs2KhRc=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/arvados/cgofuse v1.2.0 h1:sWgVxyvSFjH965Uc7ReScn/cBl9Jemc9SeUNlEmjRH4=
+github.com/arvados/cgofuse v1.2.0/go.mod h1:79WFV98hrkRHK9XPhh2IGGOwpFSjocsWubgxAs2KhRc=
github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef h1:cl7DIRbiAYNqaVxg3CZY8qfZoBOKrj06H/x9SPGaxas=
github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef/go.mod h1:rCtgyMmBGEbjTm37fCuBYbNL0IhztiALzo3OB9HyiOM=
github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b h1:hK0t0aJTTXI64lpXln2A1SripqOym+GVNTnwsLes39Y=
github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.44.174 h1:9lR4a6MKQW/t6YCG0ZKAt1GAkjdEPP8sWch/pfcuR0c=
-github.com/aws/aws-sdk-go v1.44.174/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
-github.com/aws/aws-sdk-go-v2 v0.23.0 h1:+E1q1LLSfHSDn/DzOtdJOX+pLZE2HiNV2yO5AjZINwM=
-github.com/aws/aws-sdk-go-v2 v0.23.0/go.mod h1:2LhT7UgHOXK3UXONKI5OMgIyoQL6zTAw/jwIeX6yqzw=
+github.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4=
+github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo=
+github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
+github.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM=
+github.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21 h1:1v8Ii0MRVGYB/sdhkbxrtolCA7Tp+lGh+5OJTs5vmZ8=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21/go.mod h1:cxdd1rc8yxCjKz28hi30XN1jDXr2DxZvD44vLxTz/bg=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 h1:/FUtT3xsoHO3cfh+I/kCbcMCN98QZRsiFet/V8QkWSs=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7/go.mod h1:MaCAgWpGooQoCWZnMur97rGn5dp350w2+CeiV5406wE=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4 h1:JBcPadBAnSwqUZQ1o2XOkTXy7GBcidpupkXZf02parw=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4/go.mod h1:iJ2sQeUTkjNp3nL7kE/Bav0xXYhtiRCRP5ZXk4jFhCQ=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 h1:UXqEWQI0n+q0QixzU0yUUQBZXRd5037qdInTIHFTl98=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9/go.mod h1:xP6Gq6fzGZT8w/ZN+XvGMZ2RU1LeEs7b2yUP5DN8NY4=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 h1:uO5XR6QGBcmPyo2gxofYJLFkcVQ4izOoGDNenlZhTEk=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7/go.mod h1:feeeAYfAcwTReM6vbwjEyDmiGho+YgBhaFULuXDW8kc=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 h1:57NtjG+WLims0TxIQbjTqebZUKDM03DfM11ANAekW0s=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3/go.mod h1:739CllldowZiPPsDFcJHNF4FXrVxaSGVnZ9Ez9Iz9hc=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=
+github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
+github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092 h1:0Di2onNnlN5PAyWPbqlPyN45eOQ+QW/J9eqLynt4IV4=
-github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092/go.mod h1:8IzBjZCRSnsvM6MJMG8HNNtnzMl48H22rbJL2kRUJ0Y=
+github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
+github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
-github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
-github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
+github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
+github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
+github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
+github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
-github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
-github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
-github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g=
+github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw=
-github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
-github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
-github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
-github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
+github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=
github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
+github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0=
-github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
-github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
-github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
-github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
-github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
+github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/gotd/contrib v0.20.0 h1:1Wc4+HMQiIKYQuGHVwVksIx152HFTP6B5n88dDe0ZYw=
+github.com/gotd/contrib v0.20.0/go.mod h1:P6o8W4niqhDPHLA0U+SA/L7l3BQHYLULpeHfRSePn9o=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
-github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
-github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
-github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
-github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I=
-github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
+github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
+github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
+github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff h1:6NvhExg4omUC9NfA+l4Oq3ibNNeJUdiAF3iBVB0PlDk=
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff/go.mod h1:ddfPX8Z28YMjiqoaJhNBzWHapTHXejnB5cDCUWDwriw=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
-github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
-github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc h1:JJPhSHowepOF2+ElJVyb9jgt5ZyBkPMkPuhS0uODSFs=
-github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc/go.mod h1:fNiSoOiEI5KlkWXn26OwKnNe58ilTIkpBlgOrt7Olu8=
+github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
+github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
+github.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d h1:9dIJ/sx3yapvuq3kvTSVQ6UVS2HxfOB4MCwWiH8JcvQ=
+github.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 h1:xXn0nBttYwok7DhU4RxqaADEpQn7fEMt5kKc3yoj/n0=
-github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
-github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
-github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
-github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
+github.com/msteinert/pam v1.2.0 h1:mYfjlvN2KYs2Pb9G6nb/1f/nPfAttT/Jee5Sq9r3bGE=
+github.com/msteinert/pam v1.2.0/go.mod h1:d2n0DCUK8rGecChV3JzvmsDjOY4R7AYbsNxAT+ftQl0=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
-github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
-github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
-github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
-github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76 h1:ofyVTM1w4iyKwaQIlRR6Ip06mXXx5Cnz7a4mTGYq1hE=
-github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0=
-github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=
+github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
-github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/xanzy/ssh-agent v0.1.0 h1:lOhdXLxtmYjaHc76ZtNmJWPg948y/RnT+3N3cvKWFzY=
-github.com/xanzy/ssh-agent v0.1.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
+go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
-golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -299,129 +340,108 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
-golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
-golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
+golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
+golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
-google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4=
+google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
-google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528155852-a33235495d66 h1:Dr/7zyt2bNrIJig4n+eIWx98s8vpo/gjS0JwWrxWEok=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528155852-a33235495d66/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
-google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
+google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -431,28 +451,16 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
+gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/src-d/go-billy.v4 v4.0.1 h1:iMxwQPj2cuKRyaIZ985zxClkcdTtT5VpXYf4PTJc0Ek=
-gopkg.in/src-d/go-billy.v4 v4.0.1/go.mod h1:ZHSF0JP+7oD97194otDUCD7Ofbk63+xFcfWP5bT6h+Q=
-gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
-gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
-gopkg.in/src-d/go-git.v4 v4.0.0 h1:9ZRNKHuhaTaJRGcGaH6Qg7uUORO2X0MNB5WL/CDdqto=
-gopkg.in/src-d/go-git.v4 v4.0.0/go.mod h1:CzbUWqMn4pvmvndg3gnh5iZFmSsbhyhUWdI0IQ60AQo=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/lib/boot/nginx.go b/lib/boot/nginx.go
index 9f1091eac3..338a6b5bcc 100644
--- a/lib/boot/nginx.go
+++ b/lib/boot/nginx.go
@@ -74,7 +74,6 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
{"KEEPWEB", super.cluster.Services.WebDAV},
{"KEEPWEBDL", super.cluster.Services.WebDAVDownload},
{"KEEPPROXY", super.cluster.Services.Keepproxy},
- {"GIT", super.cluster.Services.GitHTTP},
{"HEALTH", super.cluster.Services.Health},
{"WORKBENCH1", super.cluster.Services.Workbench1},
{"WORKBENCH2", super.cluster.Services.Workbench2},
@@ -135,7 +134,7 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
}
}
- configs := "error_log stderr info; "
+ configs := "error_log stderr warn; "
configs += "pid " + filepath.Join(super.wwwtempdir, "nginx.pid") + "; "
configs += "user www-data; "
diff --git a/lib/boot/passenger.go b/lib/boot/passenger.go
index 5367337e81..bf2ca2a78b 100644
--- a/lib/boot/passenger.go
+++ b/lib/boot/passenger.go
@@ -84,14 +84,9 @@ func (runner installPassenger) Run(ctx context.Context, fail func(error), super
if err != nil {
return err
}
- for _, version := range []string{"2.2.19"} {
- if !strings.Contains(buf.String(), "("+version+")") {
- err = super.RunProgram(ctx, appdir, runOptions{}, "gem", "install", "--user", "--conservative", "--no-document", "bundler:2.2.19")
- if err != nil {
- return err
- }
- break
- }
+ err = super.RunProgram(ctx, appdir, runOptions{}, "gem", "install", "--user", "--conservative", "--no-document", "--version", "~> 2.4.0", "bundler")
+ if err != nil {
+ return err
}
err = super.RunProgram(ctx, appdir, runOptions{}, "bundle", "config", "--set", "local", "path", filepath.Join(os.Getenv("HOME"), ".gem"))
if err != nil {
diff --git a/lib/boot/postgresql.go b/lib/boot/postgresql.go
index d105b0b623..ba6594e9db 100644
--- a/lib/boot/postgresql.go
+++ b/lib/boot/postgresql.go
@@ -10,7 +10,6 @@ import (
"database/sql"
"fmt"
"os"
- "os/exec"
"os/user"
"path/filepath"
"strconv"
@@ -40,11 +39,15 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
return nil
}
- iamroot := false
- if u, err := user.Current(); err != nil {
+ postgresUser, err := user.Current()
+ iamroot := postgresUser.Uid == "0"
+ if err != nil {
return fmt.Errorf("user.Current(): %w", err)
- } else if u.Uid == "0" {
- iamroot = true
+ } else if iamroot {
+ postgresUser, err = user.Lookup("postgres")
+ if err != nil {
+ return fmt.Errorf("user.Lookup(\"postgres\"): %s", err)
+ }
}
buf := bytes.NewBuffer(nil)
@@ -61,11 +64,14 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
}
prog, args := filepath.Join(bindir, "initdb"), []string{"-D", datadir, "-E", "utf8"}
opts := runOptions{}
+ opts.env = append(opts.env,
+ "PGHOST="+super.cluster.PostgreSQL.Connection["host"],
+ "PGPORT="+super.cluster.PostgreSQL.Connection["port"],
+ "PGUSER="+postgresUser.Username,
+ "PGDATABASE=",
+ "PGPASSFILE=",
+ )
if iamroot {
- postgresUser, err := user.Lookup("postgres")
- if err != nil {
- return fmt.Errorf("user.Lookup(\"postgres\"): %s", err)
- }
postgresUID, err := strconv.Atoi(postgresUser.Uid)
if err != nil {
return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric uid?: %q", postgresUser.Uid)
@@ -104,8 +110,6 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
}
}
- port := super.cluster.PostgreSQL.Connection["port"]
-
super.waitShutdown.Add(1)
go func() {
defer super.waitShutdown.Done()
@@ -116,10 +120,6 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
"-h", super.cluster.PostgreSQL.Connection["host"],
"-p", super.cluster.PostgreSQL.Connection["port"],
}
- opts := runOptions{}
- if iamroot {
- opts.user = "postgres"
- }
fail(super.RunProgram(ctx, super.tempdir, opts, prog, args...))
}()
@@ -127,19 +127,18 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
if ctx.Err() != nil {
return ctx.Err()
}
- if exec.CommandContext(ctx, "pg_isready", "--timeout=10", "--host="+super.cluster.PostgreSQL.Connection["host"], "--port="+port).Run() == nil {
+ err := super.RunProgram(ctx, super.tempdir, opts, "pg_isready", "--timeout=10")
+ if err == nil {
break
}
time.Sleep(time.Second / 2)
}
pgconn := arvados.PostgreSQLConnection{
"host": datadir,
- "port": port,
+ "port": super.cluster.PostgreSQL.Connection["port"],
+ "user": postgresUser.Username,
"dbname": "postgres",
}
- if iamroot {
- pgconn["user"] = "postgres"
- }
db, err := sql.Open("postgres", pgconn.String())
if err != nil {
return fmt.Errorf("db open failed: %s", err)
diff --git a/lib/boot/supervisor.go b/lib/boot/supervisor.go
index ac269b933a..f93f5dc592 100644
--- a/lib/boot/supervisor.go
+++ b/lib/boot/supervisor.go
@@ -317,6 +317,9 @@ func (super *Supervisor) runCluster() error {
super.prependEnv("PATH", super.tempdir+"/bin:")
}
super.setEnv("ARVADOS_SERVER_ADDRESS", super.ListenHost)
+ if super.ClusterType == "test" {
+ super.setEnv("ARVADOS_USE_KEEP_ACCESSIBLE_API", "true")
+ }
// Now that we have the config, replace the bootstrap logger
// with a new one according to the logging config.
@@ -366,7 +369,6 @@ func (super *Supervisor) runCluster() error {
runNginx{},
railsDatabase{},
runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{railsDatabase{}}},
- runServiceCommand{name: "git-httpd", svc: super.cluster.Services.GitHTTP},
runServiceCommand{name: "health", svc: super.cluster.Services.Health},
runServiceCommand{name: "keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
@@ -786,9 +788,9 @@ func (super *Supervisor) autofillConfig() error {
usedPort := map[string]bool{}
nextPort := func(host string) (string, error) {
for {
- port, err := availablePort(host)
+ port, err := AvailablePort(host)
if err != nil {
- port, err = availablePort(super.ListenHost)
+ port, err = AvailablePort(super.ListenHost)
}
if err != nil {
return "", err
@@ -821,7 +823,6 @@ func (super *Supervisor) autofillConfig() error {
for _, svc := range []*arvados.Service{
&super.cluster.Services.Controller,
&super.cluster.Services.DispatchCloud,
- &super.cluster.Services.GitHTTP,
&super.cluster.Services.Health,
&super.cluster.Services.Keepproxy,
&super.cluster.Services.Keepstore,
@@ -839,7 +840,6 @@ func (super *Supervisor) autofillConfig() error {
}
host := net.JoinHostPort(defaultExtHost, port)
if svc == &super.cluster.Services.Controller ||
- svc == &super.cluster.Services.GitHTTP ||
svc == &super.cluster.Services.Health ||
svc == &super.cluster.Services.Keepproxy ||
svc == &super.cluster.Services.WebDAV ||
@@ -1007,7 +1007,13 @@ func externalPort(svc arvados.Service) (string, error) {
}
}
-func availablePort(host string) (string, error) {
+// Return a TCP port that is not in use on the given local interface
+// address. The host argument may be an IP address, a hostname, or
+// empty.
+//
+// AvailablePort("") returns a TCP port that is not in use on any
+// local interface.
+func AvailablePort(host string) (string, error) {
ln, err := net.Listen("tcp", net.JoinHostPort(host, "0"))
if err != nil {
return "", err
diff --git a/lib/boot/supervisor_test.go b/lib/boot/supervisor_test.go
index b80fe1ed4c..23eaa5c800 100644
--- a/lib/boot/supervisor_test.go
+++ b/lib/boot/supervisor_test.go
@@ -8,6 +8,7 @@ import (
"net"
"testing"
+ "golang.org/x/net/nettest"
check "gopkg.in/check.v1"
)
@@ -32,9 +33,11 @@ func (s *supervisorSuite) TestAddrIsLocal(c *check.C) {
c.Check(err, check.IsNil)
c.Check(is, check.Equals, true)
- is, err = addrIsLocal("[::1]:32767")
- c.Check(err, check.IsNil)
- c.Check(is, check.Equals, true)
+ if nettest.SupportsIPv6() {
+ is, err = addrIsLocal("[::1]:32767")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+ }
is, err = addrIsLocal("8.8.8.8:32767")
c.Check(err, check.IsNil)
diff --git a/lib/cli/get.go b/lib/cli/get.go
index 352e7b9af6..39be092f1a 100644
--- a/lib/cli/get.go
+++ b/lib/cli/get.go
@@ -43,7 +43,7 @@ func (getCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, st
id := flags.Args()[0]
client := arvados.NewClientFromEnv()
- path, err := client.PathForUUID("show", id)
+ path, err := client.PathForUUID("get", id)
if err != nil {
return 1
}
diff --git a/lib/cloud/azure/azure.go b/lib/cloud/azure/azure.go
index 71f2a23dc9..bc335382b4 100644
--- a/lib/cloud/azure/azure.go
+++ b/lib/cloud/azure/azure.go
@@ -708,6 +708,11 @@ func (az *azureInstanceSet) manageDisks() {
}
}
+func (az *azureInstanceSet) InstanceQuotaGroup(arvados.InstanceType) cloud.InstanceQuotaGroup {
+ // All instance types share one quota.
+ return ""
+}
+
func (az *azureInstanceSet) Stop() {
az.stopFunc()
az.stopWg.Wait()
diff --git a/lib/cloud/ec2/ec2.go b/lib/cloud/ec2/ec2.go
index 6251f18df0..1f0e1507e3 100644
--- a/lib/cloud/ec2/ec2.go
+++ b/lib/cloud/ec2/ec2.go
@@ -5,12 +5,15 @@
package ec2
import (
+ "context"
"crypto/md5"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"encoding/base64"
+ "encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"math/big"
"regexp"
@@ -19,17 +22,17 @@ import (
"sync"
"sync/atomic"
"time"
+ "unicode"
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ config "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/ec2"
+ "github.com/aws/aws-sdk-go-v2/service/ec2/types"
+ "github.com/aws/smithy-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
@@ -50,10 +53,11 @@ type ec2InstanceSetConfig struct {
SecurityGroupIDs arvados.StringSet
SubnetID sliceOrSingleString
AdminUsername string
- EBSVolumeType string
+ EBSVolumeType types.VolumeType
EBSPrice float64
IAMInstanceProfile string
SpotPriceUpdateInterval arvados.Duration
+ InstanceTypeQuotaGroups map[string]string
}
type sliceOrSingleString []string
@@ -90,14 +94,14 @@ func (ss *sliceOrSingleString) UnmarshalJSON(data []byte) error {
}
type ec2Interface interface {
- DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
- ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
- RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error)
- DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
- DescribeInstanceStatusPages(input *ec2.DescribeInstanceStatusInput, fn func(*ec2.DescribeInstanceStatusOutput, bool) bool) error
- DescribeSpotPriceHistoryPages(input *ec2.DescribeSpotPriceHistoryInput, fn func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error
- CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
- TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
+ DescribeKeyPairs(context.Context, *ec2.DescribeKeyPairsInput, ...func(*ec2.Options)) (*ec2.DescribeKeyPairsOutput, error)
+ ImportKeyPair(context.Context, *ec2.ImportKeyPairInput, ...func(*ec2.Options)) (*ec2.ImportKeyPairOutput, error)
+ RunInstances(context.Context, *ec2.RunInstancesInput, ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error)
+ DescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error)
+ DescribeInstanceStatus(context.Context, *ec2.DescribeInstanceStatusInput, ...func(*ec2.Options)) (*ec2.DescribeInstanceStatusOutput, error)
+ DescribeSpotPriceHistory(context.Context, *ec2.DescribeSpotPriceHistoryInput, ...func(*ec2.Options)) (*ec2.DescribeSpotPriceHistoryOutput, error)
+ CreateTags(context.Context, *ec2.CreateTagsInput, ...func(*ec2.Options)) (*ec2.CreateTagsOutput, error)
+ TerminateInstances(context.Context, *ec2.TerminateInstancesInput, ...func(*ec2.Options)) (*ec2.TerminateInstancesOutput, error)
}
type ec2InstanceSet struct {
@@ -119,29 +123,40 @@ type ec2InstanceSet struct {
mInstanceStarts *prometheus.CounterVec
}
-func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {
+func newEC2InstanceSet(confRaw json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {
instanceSet := &ec2InstanceSet{
instanceSetID: instanceSetID,
logger: logger,
}
- err = json.Unmarshal(config, &instanceSet.ec2config)
+ err = json.Unmarshal(confRaw, &instanceSet.ec2config)
if err != nil {
return nil, err
}
-
- sess, err := session.NewSession()
+ awsConfig, err := config.LoadDefaultConfig(context.Background(),
+ config.WithRegion(instanceSet.ec2config.Region),
+ config.WithCredentialsCacheOptions(func(o *aws.CredentialsCacheOptions) {
+ o.ExpiryWindow = 5 * time.Minute
+ }),
+ func(o *config.LoadOptions) error {
+ if instanceSet.ec2config.AccessKeyID == "" && instanceSet.ec2config.SecretAccessKey == "" {
+ // Use default SDK behavior (IAM role
+ // via IMDSv2)
+ return nil
+ }
+ o.Credentials = credentials.StaticCredentialsProvider{
+ Value: aws.Credentials{
+ AccessKeyID: instanceSet.ec2config.AccessKeyID,
+ SecretAccessKey: instanceSet.ec2config.SecretAccessKey,
+ Source: "Arvados configuration",
+ },
+ }
+ return nil
+ })
if err != nil {
return nil, err
}
- // First try any static credentials, fall back to an IAM instance profile/role
- creds := credentials.NewChainCredentials(
- []credentials.Provider{
- &credentials.StaticProvider{Value: credentials.Value{AccessKeyID: instanceSet.ec2config.AccessKeyID, SecretAccessKey: instanceSet.ec2config.SecretAccessKey}},
- &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess)},
- })
- awsConfig := aws.NewConfig().WithCredentials(creds).WithRegion(instanceSet.ec2config.Region)
- instanceSet.client = ec2.New(session.Must(session.NewSession(awsConfig)))
+ instanceSet.client = ec2.NewFromConfig(awsConfig)
instanceSet.keys = make(map[string]string)
if instanceSet.ec2config.EBSVolumeType == "" {
instanceSet.ec2config.EBSVolumeType = "gp2"
@@ -179,7 +194,30 @@ func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID
return instanceSet, nil
}
-func awsKeyFingerprint(pk ssh.PublicKey) (md5fp string, sha1fp string, err error) {
+// Calculate the public key fingerprints that AWS might use for a
+// given key. For an rsa key, return the AWS MD5 and SHA-1
+// fingerprints in that order, like
+// {"02:d8:ca:c4:67:58:7b:46:64:50:41:59:3d:90:33:40",
+// "da:39:a3:ee:5e:6b:4b:0d:32:55:bf:ef:95:60:18:90:af:d8:07:09"}.
+// For an ed25519 key, return the SHA-256 fingerprint with and without
+// padding, like
+// {"SHA256:jgxbPn8JspgUBbZo3nRPWJ5e2h4v6FbiwlTe49NsNKE=",
+// "SHA256:jgxbPn8JspgUBbZo3nRPWJ5e2h4v6FbiwlTe49NsNKE"}.
+//
+// "When Amazon EC2 calculates a fingerprint, Amazon EC2 might append
+// padding to the fingerprint with = characters."
+//
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-keys.html
+func awsKeyFingerprints(pk ssh.PublicKey) ([]string, error) {
+ if pk.Type() != "ssh-rsa" {
+ // sha256 is always 256 bits, so the padded base64
+ // encoding will always be the unpadded encoding (as
+ // returned by ssh.FingerprintSHA256) plus a final
+ // "=".
+ hash2 := ssh.FingerprintSHA256(pk)
+ hash1 := hash2 + "="
+ return []string{hash1, hash2}, nil
+ }
// AWS key fingerprints don't use the usual key fingerprint
// you get from ssh-keygen or ssh.FingerprintLegacyMD5()
// (you can get that from md5.Sum(pk.Marshal())
@@ -192,24 +230,28 @@ func awsKeyFingerprint(pk ssh.PublicKey) (md5fp string, sha1fp string, err error
N *big.Int
}
if err := ssh.Unmarshal(pk.Marshal(), &rsaPub); err != nil {
- return "", "", fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
+ return nil, fmt.Errorf("Unmarshal failed to parse public key: %w", err)
}
rsaPk := rsa.PublicKey{
E: int(rsaPub.E.Int64()),
N: rsaPub.N,
}
pkix, _ := x509.MarshalPKIXPublicKey(&rsaPk)
- md5pkix := md5.Sum([]byte(pkix))
- sha1pkix := sha1.Sum([]byte(pkix))
- md5fp = ""
- sha1fp = ""
- for i := 0; i < len(md5pkix); i++ {
- md5fp += fmt.Sprintf(":%02x", md5pkix[i])
- }
- for i := 0; i < len(sha1pkix); i++ {
- sha1fp += fmt.Sprintf(":%02x", sha1pkix[i])
+ sum1 := md5.Sum(pkix)
+ sum2 := sha1.Sum(pkix)
+ return []string{
+ hexFingerprint(sum1[:]),
+ hexFingerprint(sum2[:]),
+ }, nil
+}
+
+// Return hex-fingerprint representation of sum, like "12:34:56:...".
+func hexFingerprint(sum []byte) string {
+ hexarray := make([]string, len(sum))
+ for i, c := range sum {
+ hexarray[i] = hex.EncodeToString([]byte{c})
}
- return md5fp[1:], sha1fp[1:], nil
+ return strings.Join(hexarray, ":")
}
func (instanceSet *ec2InstanceSet) Create(
@@ -219,9 +261,9 @@ func (instanceSet *ec2InstanceSet) Create(
initCommand cloud.InitCommand,
publicKey ssh.PublicKey) (cloud.Instance, error) {
- ec2tags := []*ec2.Tag{}
+ ec2tags := []types.Tag{}
for k, v := range newTags {
- ec2tags = append(ec2tags, &ec2.Tag{
+ ec2tags = append(ec2tags, types.Tag{
Key: aws.String(k),
Value: aws.String(v),
})
@@ -234,29 +276,28 @@ func (instanceSet *ec2InstanceSet) Create(
rii := ec2.RunInstancesInput{
ImageId: aws.String(string(imageID)),
- InstanceType: &instanceType.ProviderType,
- MaxCount: aws.Int64(1),
- MinCount: aws.Int64(1),
-
- NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
- {
- AssociatePublicIpAddress: aws.Bool(false),
- DeleteOnTermination: aws.Bool(true),
- DeviceIndex: aws.Int64(0),
- Groups: aws.StringSlice(groups),
- }},
+ InstanceType: types.InstanceType(instanceType.ProviderType),
+ MaxCount: aws.Int32(1),
+ MinCount: aws.Int32(1),
+
+ NetworkInterfaces: []types.InstanceNetworkInterfaceSpecification{{
+ AssociatePublicIpAddress: aws.Bool(false),
+ DeleteOnTermination: aws.Bool(true),
+ DeviceIndex: aws.Int32(0),
+ Groups: groups,
+ }},
DisableApiTermination: aws.Bool(false),
- InstanceInitiatedShutdownBehavior: aws.String("terminate"),
- TagSpecifications: []*ec2.TagSpecification{
+ InstanceInitiatedShutdownBehavior: types.ShutdownBehaviorTerminate,
+ TagSpecifications: []types.TagSpecification{
{
- ResourceType: aws.String("instance"),
+ ResourceType: types.ResourceTypeInstance,
Tags: ec2tags,
}},
- MetadataOptions: &ec2.InstanceMetadataOptionsRequest{
+ MetadataOptions: &types.InstanceMetadataOptionsRequest{
// Require IMDSv2, as described at
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html
- HttpEndpoint: aws.String(ec2.InstanceMetadataEndpointStateEnabled),
- HttpTokens: aws.String(ec2.HttpTokensStateRequired),
+ HttpEndpoint: types.InstanceMetadataEndpointStateEnabled,
+ HttpTokens: types.HttpTokensStateRequired,
},
UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
}
@@ -270,32 +311,33 @@ func (instanceSet *ec2InstanceSet) Create(
}
if instanceType.AddedScratch > 0 {
- rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{{
+ rii.BlockDeviceMappings = []types.BlockDeviceMapping{{
DeviceName: aws.String("/dev/xvdt"),
- Ebs: &ec2.EbsBlockDevice{
+ Ebs: &types.EbsBlockDevice{
DeleteOnTermination: aws.Bool(true),
- VolumeSize: aws.Int64((int64(instanceType.AddedScratch) + (1<<30 - 1)) >> 30),
- VolumeType: &instanceSet.ec2config.EBSVolumeType,
+ VolumeSize: aws.Int32(int32((int64(instanceType.AddedScratch) + (1<<30 - 1)) >> 30)),
+ VolumeType: instanceSet.ec2config.EBSVolumeType,
}}}
}
if instanceType.Preemptible {
- rii.InstanceMarketOptions = &ec2.InstanceMarketOptionsRequest{
- MarketType: aws.String("spot"),
- SpotOptions: &ec2.SpotMarketOptions{
- InstanceInterruptionBehavior: aws.String("terminate"),
+ rii.InstanceMarketOptions = &types.InstanceMarketOptionsRequest{
+ MarketType: types.MarketTypeSpot,
+ SpotOptions: &types.SpotMarketOptions{
+ InstanceInterruptionBehavior: types.InstanceInterruptionBehaviorTerminate,
MaxPrice: aws.String(fmt.Sprintf("%v", instanceType.Price)),
}}
}
if instanceSet.ec2config.IAMInstanceProfile != "" {
- rii.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{
+ rii.IamInstanceProfile = &types.IamInstanceProfileSpecification{
Name: aws.String(instanceSet.ec2config.IAMInstanceProfile),
}
}
- var rsv *ec2.Reservation
+ var rsv *ec2.RunInstancesOutput
var errToReturn error
+ var returningCapacityError bool
subnets := instanceSet.ec2config.SubnetID
currentSubnetIDIndex := int(atomic.LoadInt32(&instanceSet.currentSubnetIDIndex))
for tryOffset := 0; ; tryOffset++ {
@@ -307,13 +349,14 @@ func (instanceSet *ec2InstanceSet) Create(
rii.NetworkInterfaces[0].SubnetId = aws.String(trySubnet)
}
var err error
- rsv, err = instanceSet.client.RunInstances(&rii)
+ rsv, err = instanceSet.client.RunInstances(context.Background(), &rii)
instanceSet.mInstanceStarts.WithLabelValues(trySubnet, boolLabelValue[err == nil]).Add(1)
- if !isErrorCapacity(errToReturn) || isErrorCapacity(err) {
+ if instcap, groupcap := isErrorCapacity(err); !returningCapacityError || instcap || groupcap {
// We want to return the last capacity error,
// if any; otherwise the last non-capacity
// error.
errToReturn = err
+ returningCapacityError = instcap || groupcap
}
if isErrorSubnetSpecific(err) &&
tryOffset < len(subnets)-1 {
@@ -346,49 +389,49 @@ func (instanceSet *ec2InstanceSet) Create(
func (instanceSet *ec2InstanceSet) getKeyName(publicKey ssh.PublicKey) (string, error) {
instanceSet.keysMtx.Lock()
defer instanceSet.keysMtx.Unlock()
- md5keyFingerprint, sha1keyFingerprint, err := awsKeyFingerprint(publicKey)
+ fingerprints, err := awsKeyFingerprints(publicKey)
if err != nil {
- return "", fmt.Errorf("Could not make key fingerprint: %v", err)
+ return "", fmt.Errorf("Could not make key fingerprint: %w", err)
}
- if keyname, ok := instanceSet.keys[md5keyFingerprint]; ok {
+ if keyname, ok := instanceSet.keys[fingerprints[0]]; ok {
return keyname, nil
}
- keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
- Filters: []*ec2.Filter{{
+ keyout, err := instanceSet.client.DescribeKeyPairs(context.Background(), &ec2.DescribeKeyPairsInput{
+ Filters: []types.Filter{{
Name: aws.String("fingerprint"),
- Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
+ Values: fingerprints,
}},
})
if err != nil {
- return "", fmt.Errorf("Could not search for keypair: %v", err)
+ return "", fmt.Errorf("Could not search for keypair: %w", err)
}
if len(keyout.KeyPairs) > 0 {
return *(keyout.KeyPairs[0].KeyName), nil
}
- keyname := "arvados-dispatch-keypair-" + md5keyFingerprint
- _, err = instanceSet.client.ImportKeyPair(&ec2.ImportKeyPairInput{
+ keyname := "arvados-dispatch-keypair-" + fingerprints[0]
+ _, err = instanceSet.client.ImportKeyPair(context.Background(), &ec2.ImportKeyPairInput{
KeyName: &keyname,
PublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),
})
if err != nil {
- return "", fmt.Errorf("Could not import keypair: %v", err)
+ return "", fmt.Errorf("Could not import keypair: %w", err)
}
- instanceSet.keys[md5keyFingerprint] = keyname
+ instanceSet.keys[fingerprints[0]] = keyname
return keyname, nil
}
func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances []cloud.Instance, err error) {
- var filters []*ec2.Filter
+ var filters []types.Filter
for k, v := range tags {
- filters = append(filters, &ec2.Filter{
+ filters = append(filters, types.Filter{
Name: aws.String("tag:" + k),
- Values: []*string{aws.String(v)},
+ Values: []string{v},
})
}
needAZs := false
dii := &ec2.DescribeInstancesInput{Filters: filters}
for {
- dio, err := instanceSet.client.DescribeInstances(dii)
+ dio, err := instanceSet.client.DescribeInstances(context.Background(), dii)
err = wrapError(err, &instanceSet.throttleDelayInstances)
if err != nil {
return nil, err
@@ -396,34 +439,41 @@ func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances
for _, rsv := range dio.Reservations {
for _, inst := range rsv.Instances {
- if *inst.State.Name != "shutting-down" && *inst.State.Name != "terminated" {
+ switch inst.State.Name {
+ case types.InstanceStateNameShuttingDown:
+ case types.InstanceStateNameTerminated:
+ default:
instances = append(instances, &ec2Instance{
provider: instanceSet,
instance: inst,
})
- if aws.StringValue(inst.InstanceLifecycle) == "spot" {
+ if inst.InstanceLifecycle == types.InstanceLifecycleTypeSpot {
needAZs = true
}
}
}
}
- if dio.NextToken == nil {
+ if dio.NextToken == nil || *dio.NextToken == "" {
break
}
dii.NextToken = dio.NextToken
}
if needAZs && instanceSet.ec2config.SpotPriceUpdateInterval > 0 {
az := map[string]string{}
- err := instanceSet.client.DescribeInstanceStatusPages(&ec2.DescribeInstanceStatusInput{
- IncludeAllInstances: aws.Bool(true),
- }, func(page *ec2.DescribeInstanceStatusOutput, lastPage bool) bool {
+ disi := &ec2.DescribeInstanceStatusInput{IncludeAllInstances: aws.Bool(true)}
+ for {
+ page, err := instanceSet.client.DescribeInstanceStatus(context.Background(), disi)
+ if err != nil {
+ instanceSet.logger.WithError(err).Warn("error getting instance statuses")
+ break
+ }
for _, ent := range page.InstanceStatuses {
az[*ent.InstanceId] = *ent.AvailabilityZone
}
- return true
- })
- if err != nil {
- instanceSet.logger.Warnf("error getting instance statuses: %s", err)
+ if page.NextToken == nil || *page.NextToken == "" {
+ break
+ }
+ disi.NextToken = page.NextToken
}
for _, inst := range instances {
inst := inst.(*ec2Instance)
@@ -475,28 +525,28 @@ func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance)
updateTime := time.Now()
staleTime := updateTime.Add(-instanceSet.ec2config.SpotPriceUpdateInterval.Duration())
needUpdate := false
- allTypes := map[string]bool{}
+ allTypes := map[types.InstanceType]bool{}
for _, inst := range instances {
ec2inst := inst.(*ec2Instance).instance
- if aws.StringValue(ec2inst.InstanceLifecycle) == "spot" {
+ if ec2inst.InstanceLifecycle == types.InstanceLifecycleTypeSpot {
pk := priceKey{
- instanceType: *ec2inst.InstanceType,
+ instanceType: string(ec2inst.InstanceType),
spot: true,
availabilityZone: inst.(*ec2Instance).availabilityZone,
}
if instanceSet.pricesUpdated[pk].Before(staleTime) {
needUpdate = true
}
- allTypes[*ec2inst.InstanceType] = true
+ allTypes[ec2inst.InstanceType] = true
}
}
if !needUpdate {
return
}
- var typeFilterValues []*string
+ var typeFilterValues []string
for instanceType := range allTypes {
- typeFilterValues = append(typeFilterValues, aws.String(instanceType))
+ typeFilterValues = append(typeFilterValues, string(instanceType))
}
// Get 3x update interval worth of pricing data. (Ideally the
// AWS API would tell us "we have shown you all of the price
@@ -507,14 +557,19 @@ func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance)
// row.
dsphi := &ec2.DescribeSpotPriceHistoryInput{
StartTime: aws.Time(updateTime.Add(-3 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())),
- Filters: []*ec2.Filter{
- &ec2.Filter{Name: aws.String("instance-type"), Values: typeFilterValues},
- &ec2.Filter{Name: aws.String("product-description"), Values: []*string{aws.String("Linux/UNIX")}},
+ Filters: []types.Filter{
+ types.Filter{Name: aws.String("instance-type"), Values: typeFilterValues},
+ types.Filter{Name: aws.String("product-description"), Values: []string{"Linux/UNIX"}},
},
}
- err := instanceSet.client.DescribeSpotPriceHistoryPages(dsphi, func(page *ec2.DescribeSpotPriceHistoryOutput, lastPage bool) bool {
+ for {
+ page, err := instanceSet.client.DescribeSpotPriceHistory(context.Background(), dsphi)
+ if err != nil {
+ instanceSet.logger.WithError(err).Warn("error retrieving spot instance prices")
+ break
+ }
for _, ent := range page.SpotPriceHistory {
- if ent.InstanceType == nil || ent.SpotPrice == nil || ent.Timestamp == nil {
+ if ent.InstanceType == "" || ent.SpotPrice == nil || ent.Timestamp == nil {
// bogus record?
continue
}
@@ -524,7 +579,7 @@ func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance)
continue
}
pk := priceKey{
- instanceType: *ent.InstanceType,
+ instanceType: string(ent.InstanceType),
spot: true,
availabilityZone: *ent.AvailabilityZone,
}
@@ -534,10 +589,10 @@ func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance)
})
instanceSet.pricesUpdated[pk] = updateTime
}
- return true
- })
- if err != nil {
- instanceSet.logger.Warnf("error retrieving spot instance prices: %s", err)
+ if page.NextToken == nil || *page.NextToken == "" {
+ break
+ }
+ dsphi.NextToken = page.NextToken
}
expiredTime := updateTime.Add(-64 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())
@@ -555,9 +610,39 @@ func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance)
func (instanceSet *ec2InstanceSet) Stop() {
}
+func (instanceSet *ec2InstanceSet) InstanceQuotaGroup(it arvados.InstanceType) cloud.InstanceQuotaGroup {
+ // https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-instance-quotas.html
+ // 2024-09-09
+ var quotaGroup string
+ pt := strings.ToLower(it.ProviderType)
+ for i, c := range pt {
+ if !unicode.IsLower(c) && quotaGroup == "" {
+ // Fall back to the alphabetic prefix of
+ // ProviderType.
+ quotaGroup = pt[:i]
+ }
+ if conf := instanceSet.ec2config.InstanceTypeQuotaGroups[pt[:i]]; conf != "" && quotaGroup != "" {
+ // Prefer the longest prefix of ProviderType
+ // that is listed explicitly in config.
+ //
+ // (But don't look up a too-short prefix --
+ // for an instance type like "trn1.234", use
+ // the config for "trn" or "trn1" but not
+ // "t".)
+ quotaGroup = conf
+ }
+ }
+ if it.Preemptible {
+ // Spot instance quotas are separate from demand
+ // quotas.
+ quotaGroup += "-spot"
+ }
+ return cloud.InstanceQuotaGroup(quotaGroup)
+}
+
type ec2Instance struct {
provider *ec2InstanceSet
- instance *ec2.Instance
+ instance types.Instance
availabilityZone string // sometimes available for spot instances
}
@@ -570,20 +655,20 @@ func (inst *ec2Instance) String() string {
}
func (inst *ec2Instance) ProviderType() string {
- return *inst.instance.InstanceType
+ return string(inst.instance.InstanceType)
}
func (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {
- var ec2tags []*ec2.Tag
+ var ec2tags []types.Tag
for k, v := range newTags {
- ec2tags = append(ec2tags, &ec2.Tag{
+ ec2tags = append(ec2tags, types.Tag{
Key: aws.String(k),
Value: aws.String(v),
})
}
- _, err := inst.provider.client.CreateTags(&ec2.CreateTagsInput{
- Resources: []*string{inst.instance.InstanceId},
+ _, err := inst.provider.client.CreateTags(context.Background(), &ec2.CreateTagsInput{
+ Resources: []string{*inst.instance.InstanceId},
Tags: ec2tags,
})
@@ -601,8 +686,8 @@ func (inst *ec2Instance) Tags() cloud.InstanceTags {
}
func (inst *ec2Instance) Destroy() error {
- _, err := inst.provider.client.TerminateInstances(&ec2.TerminateInstancesInput{
- InstanceIds: []*string{inst.instance.InstanceId},
+ _, err := inst.provider.client.TerminateInstances(context.Background(), &ec2.TerminateInstancesInput{
+ InstanceIds: []string{*inst.instance.InstanceId},
})
return err
}
@@ -653,8 +738,8 @@ func (inst *ec2Instance) PriceHistory(instType arvados.InstanceType) []cloud.Ins
// inst.provider.prices only for spot instances, so if
// spot==false here, we will return no data.
pk := priceKey{
- instanceType: *inst.instance.InstanceType,
- spot: aws.StringValue(inst.instance.InstanceLifecycle) == "spot",
+ instanceType: string(inst.instance.InstanceType),
+ spot: inst.instance.InstanceLifecycle == types.InstanceLifecycleTypeSpot,
availabilityZone: inst.availabilityZone,
}
var prices []cloud.InstancePrice
@@ -680,13 +765,18 @@ func (err rateLimitError) EarliestRetry() time.Time {
type capacityError struct {
error
- isInstanceTypeSpecific bool
+ isInstanceQuotaGroupSpecific bool
+ isInstanceTypeSpecific bool
}
func (er *capacityError) IsCapacityError() bool {
return true
}
+func (er *capacityError) IsInstanceQuotaGroupSpecific() bool {
+ return er.isInstanceQuotaGroupSpecific
+}
+
func (er *capacityError) IsInstanceTypeSpecific() bool {
return er.isInstanceTypeSpecific
}
@@ -697,7 +787,6 @@ var isCodeQuota = map[string]bool{
"InsufficientFreeAddressesInSubnet": true,
"InsufficientVolumeCapacity": true,
"MaxSpotInstanceCountExceeded": true,
- "VcpuLimitExceeded": true,
}
// isErrorQuota returns whether the error indicates we have reached
@@ -706,8 +795,9 @@ var isCodeQuota = map[string]bool{
//
// Returns false if error is nil.
func isErrorQuota(err error) bool {
- if aerr, ok := err.(awserr.Error); ok && aerr != nil {
- if _, ok := isCodeQuota[aerr.Code()]; ok {
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ if _, ok := isCodeQuota[aerr.ErrorCode()]; ok {
return true
}
}
@@ -719,11 +809,11 @@ var reSubnetSpecificInvalidParameterMessage = regexp.MustCompile(`(?ms).*( subne
// isErrorSubnetSpecific returns true if the problem encountered by
// RunInstances might be avoided by trying a different subnet.
func isErrorSubnetSpecific(err error) bool {
- aerr, ok := err.(awserr.Error)
- if !ok {
+ var aerr smithy.APIError
+ if !errors.As(err, &aerr) {
return false
}
- code := aerr.Code()
+ code := aerr.ErrorCode()
return strings.Contains(code, "Subnet") ||
code == "InsufficientInstanceCapacity" ||
code == "InsufficientVolumeCapacity" ||
@@ -732,21 +822,28 @@ func isErrorSubnetSpecific(err error) bool {
// we look for substrings in code/message instead of
// only using specific codes here.
(strings.Contains(code, "InvalidParameter") &&
- reSubnetSpecificInvalidParameterMessage.MatchString(aerr.Message()))
+ reSubnetSpecificInvalidParameterMessage.MatchString(aerr.ErrorMessage()))
}
-// isErrorCapacity returns true if the error indicates lack of
-// capacity (either temporary or permanent) to run a specific instance
-// type -- i.e., retrying with a different instance type might
-// succeed.
-func isErrorCapacity(err error) bool {
- aerr, ok := err.(awserr.Error)
- if !ok {
- return false
+// isErrorCapacity determines whether the given error indicates lack
+// of capacity -- either temporary or permanent -- to run a specific
+// instance type (i.e., retrying with any other instance type might
+// succeed) or an instance quota group (i.e., retrying with an
+// instance type in a different instance quota group might succeed).
+func isErrorCapacity(err error) (instcap bool, groupcap bool) {
+ var aerr smithy.APIError
+ if !errors.As(err, &aerr) {
+ return false, false
+ }
+ code := aerr.ErrorCode()
+ if code == "VcpuLimitExceeded" {
+ return false, true
+ }
+ if code == "InsufficientInstanceCapacity" ||
+ (code == "Unsupported" && strings.Contains(aerr.ErrorMessage(), "requested instance type")) {
+ return true, false
}
- code := aerr.Code()
- return code == "InsufficientInstanceCapacity" ||
- (code == "Unsupported" && strings.Contains(aerr.Message(), "requested instance type"))
+ return false, false
}
type ec2QuotaError struct {
@@ -757,8 +854,17 @@ func (er *ec2QuotaError) IsQuotaError() bool {
return true
}
+func isThrottleError(err error) bool {
+ var aerr smithy.APIError
+ if !errors.As(err, &aerr) {
+ return false
+ }
+ _, is := retry.DefaultThrottleErrorCodes[aerr.ErrorCode()]
+ return is
+}
+
func wrapError(err error, throttleValue *atomic.Value) error {
- if request.IsErrorThrottle(err) {
+ if isThrottleError(err) {
// Back off exponentially until an upstream call
// either succeeds or returns a non-throttle error.
d, _ := throttleValue.Load().(time.Duration)
@@ -771,9 +877,13 @@ func wrapError(err error, throttleValue *atomic.Value) error {
throttleValue.Store(d)
return rateLimitError{error: err, earliestRetry: time.Now().Add(d)}
} else if isErrorQuota(err) {
- return &ec2QuotaError{err}
- } else if isErrorCapacity(err) {
- return &capacityError{err, true}
+ return &ec2QuotaError{error: err}
+ } else if instcap, groupcap := isErrorCapacity(err); instcap || groupcap {
+ return &capacityError{
+ error: err,
+ isInstanceTypeSpecific: !groupcap,
+ isInstanceQuotaGroupSpecific: groupcap,
+ }
} else if err != nil {
throttleValue.Store(time.Duration(0))
return err
diff --git a/lib/cloud/ec2/ec2_test.go b/lib/cloud/ec2/ec2_test.go
index 5e6cf2c82b..e758c322c4 100644
--- a/lib/cloud/ec2/ec2_test.go
+++ b/lib/cloud/ec2/ec2_test.go
@@ -23,23 +23,27 @@
package ec2
import (
+ "context"
"encoding/json"
"errors"
"flag"
- "fmt"
+ "os/exec"
+ "regexp"
"sync/atomic"
"testing"
"time"
"git.arvados.org/arvados.git/lib/cloud"
+ libconfig "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/config"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/ec2"
+ "github.com/aws/aws-sdk-go-v2/service/ec2/types"
+ "github.com/aws/smithy-go"
"github.com/ghodss/yaml"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
@@ -101,17 +105,17 @@ type ec2stub struct {
subnetErrorOnRunInstances map[string]error
}
-func (e *ec2stub) ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) {
+func (e *ec2stub) ImportKeyPair(ctx context.Context, input *ec2.ImportKeyPairInput, _ ...func(*ec2.Options)) (*ec2.ImportKeyPairOutput, error) {
e.importKeyPairCalls = append(e.importKeyPairCalls, input)
return nil, nil
}
-func (e *ec2stub) DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) {
+func (e *ec2stub) DescribeKeyPairs(ctx context.Context, input *ec2.DescribeKeyPairsInput, _ ...func(*ec2.Options)) (*ec2.DescribeKeyPairsOutput, error) {
e.describeKeyPairsCalls = append(e.describeKeyPairsCalls, input)
return &ec2.DescribeKeyPairsOutput{}, nil
}
-func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+func (e *ec2stub) RunInstances(ctx context.Context, input *ec2.RunInstancesInput, _ ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error) {
e.runInstancesCalls = append(e.runInstancesCalls, input)
if len(input.NetworkInterfaces) > 0 && input.NetworkInterfaces[0].SubnetId != nil {
err := e.subnetErrorOnRunInstances[*input.NetworkInterfaces[0].SubnetId]
@@ -119,98 +123,91 @@ func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation,
return nil, err
}
}
- return &ec2.Reservation{Instances: []*ec2.Instance{{
+ return &ec2.RunInstancesOutput{Instances: []types.Instance{{
InstanceId: aws.String("i-123"),
- InstanceType: aws.String("t2.micro"),
+ InstanceType: types.InstanceTypeT2Micro,
Tags: input.TagSpecifications[0].Tags,
}}}, nil
}
-func (e *ec2stub) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
+func (e *ec2stub) DescribeInstances(ctx context.Context, input *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) {
return &ec2.DescribeInstancesOutput{
- Reservations: []*ec2.Reservation{{
- Instances: []*ec2.Instance{{
+ Reservations: []types.Reservation{{
+ Instances: []types.Instance{{
InstanceId: aws.String("i-123"),
- InstanceLifecycle: aws.String("spot"),
- InstanceType: aws.String("t2.micro"),
+ InstanceLifecycle: types.InstanceLifecycleTypeSpot,
+ InstanceType: types.InstanceTypeT2Micro,
PrivateIpAddress: aws.String("10.1.2.3"),
- State: &ec2.InstanceState{Name: aws.String("running"), Code: aws.Int64(16)},
+ State: &types.InstanceState{Name: types.InstanceStateNameRunning, Code: aws.Int32(16)},
}, {
InstanceId: aws.String("i-124"),
- InstanceLifecycle: aws.String("spot"),
- InstanceType: aws.String("t2.micro"),
+ InstanceLifecycle: types.InstanceLifecycleTypeSpot,
+ InstanceType: types.InstanceTypeT2Micro,
PrivateIpAddress: aws.String("10.1.2.4"),
- State: &ec2.InstanceState{Name: aws.String("running"), Code: aws.Int64(16)},
+ State: &types.InstanceState{Name: types.InstanceStateNameRunning, Code: aws.Int32(16)},
}},
}},
}, nil
}
-func (e *ec2stub) DescribeInstanceStatusPages(input *ec2.DescribeInstanceStatusInput, fn func(*ec2.DescribeInstanceStatusOutput, bool) bool) error {
- fn(&ec2.DescribeInstanceStatusOutput{
- InstanceStatuses: []*ec2.InstanceStatus{{
+func (e *ec2stub) DescribeInstanceStatus(ctx context.Context, input *ec2.DescribeInstanceStatusInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstanceStatusOutput, error) {
+ return &ec2.DescribeInstanceStatusOutput{
+ InstanceStatuses: []types.InstanceStatus{{
InstanceId: aws.String("i-123"),
AvailabilityZone: aws.String("aa-east-1a"),
}, {
InstanceId: aws.String("i-124"),
AvailabilityZone: aws.String("aa-east-1a"),
}},
- }, true)
- return nil
+ }, nil
}
-func (e *ec2stub) DescribeSpotPriceHistoryPages(input *ec2.DescribeSpotPriceHistoryInput, fn func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error {
- if !fn(&ec2.DescribeSpotPriceHistoryOutput{
- SpotPriceHistory: []*ec2.SpotPrice{
- &ec2.SpotPrice{
- InstanceType: aws.String("t2.micro"),
- AvailabilityZone: aws.String("aa-east-1a"),
- SpotPrice: aws.String("0.005"),
- Timestamp: aws.Time(e.reftime.Add(-9 * time.Minute)),
+func (e *ec2stub) DescribeSpotPriceHistory(ctx context.Context, input *ec2.DescribeSpotPriceHistoryInput, _ ...func(*ec2.Options)) (*ec2.DescribeSpotPriceHistoryOutput, error) {
+ if input.NextToken == nil || *input.NextToken == "" {
+ return &ec2.DescribeSpotPriceHistoryOutput{
+ SpotPriceHistory: []types.SpotPrice{
+ types.SpotPrice{
+ InstanceType: types.InstanceTypeT2Micro,
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.005"),
+ Timestamp: aws.Time(e.reftime.Add(-9 * time.Minute)),
+ },
+ types.SpotPrice{
+ InstanceType: types.InstanceTypeT2Micro,
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.015"),
+ Timestamp: aws.Time(e.reftime.Add(-5 * time.Minute)),
+ },
},
- &ec2.SpotPrice{
- InstanceType: aws.String("t2.micro"),
- AvailabilityZone: aws.String("aa-east-1a"),
- SpotPrice: aws.String("0.015"),
- Timestamp: aws.Time(e.reftime.Add(-5 * time.Minute)),
+ NextToken: aws.String("stubnexttoken"),
+ }, nil
+ } else {
+ return &ec2.DescribeSpotPriceHistoryOutput{
+ SpotPriceHistory: []types.SpotPrice{
+ types.SpotPrice{
+ InstanceType: types.InstanceTypeT2Micro,
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.01"),
+ Timestamp: aws.Time(e.reftime.Add(-2 * time.Minute)),
+ },
},
- },
- }, false) {
- return nil
+ NextToken: aws.String(""), // see bug #22400
+ }, nil
}
- fn(&ec2.DescribeSpotPriceHistoryOutput{
- SpotPriceHistory: []*ec2.SpotPrice{
- &ec2.SpotPrice{
- InstanceType: aws.String("t2.micro"),
- AvailabilityZone: aws.String("aa-east-1a"),
- SpotPrice: aws.String("0.01"),
- Timestamp: aws.Time(e.reftime.Add(-2 * time.Minute)),
- },
- },
- }, true)
- return nil
}
-func (e *ec2stub) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
+func (e *ec2stub) CreateTags(ctx context.Context, input *ec2.CreateTagsInput, _ ...func(*ec2.Options)) (*ec2.CreateTagsOutput, error) {
return nil, nil
}
-func (e *ec2stub) TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) {
+func (e *ec2stub) TerminateInstances(ctx context.Context, input *ec2.TerminateInstancesInput, _ ...func(*ec2.Options)) (*ec2.TerminateInstancesOutput, error) {
return nil, nil
}
-type ec2stubError struct {
- code string
- message string
-}
-
-func (err *ec2stubError) Code() string { return err.code }
-func (err *ec2stubError) Message() string { return err.message }
-func (err *ec2stubError) Error() string { return fmt.Sprintf("%s: %s", err.code, err.message) }
-func (err *ec2stubError) OrigErr() error { return errors.New("stub OrigErr") }
+type ec2stubError = smithy.GenericAPIError
-// Ensure ec2stubError satisfies the aws.Error interface
-var _ = awserr.Error(&ec2stubError{})
+// Ensure ec2stubError satisfies the smithy.APIError interface
+var _ = smithy.APIError(&ec2stubError{})
func GetInstanceSet(c *check.C, conf string) (*ec2InstanceSet, cloud.ImageID, arvados.Cluster, *prometheus.Registry) {
reg := prometheus.NewRegistry()
@@ -280,8 +277,8 @@ func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
runcalls := ap.client.(*ec2stub).runInstancesCalls
if c.Check(runcalls, check.HasLen, 1) {
- c.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, aws.String("enabled"))
- c.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, aws.String("required"))
+ c.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, types.InstanceMetadataEndpointStateEnabled)
+ c.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, types.HttpTokensStateRequired)
}
}
}
@@ -333,8 +330,8 @@ func (*EC2InstanceSetSuite) TestCreateFailoverSecondSubnet(c *check.C) {
ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-good"]}`)
ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
"subnet-full": &ec2stubError{
- code: "InsufficientFreeAddressesInSubnet",
- message: "subnet is full",
+ Code: "InsufficientFreeAddressesInSubnet",
+ Message: "subnet is full",
},
}
inst, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
@@ -368,49 +365,49 @@ func (*EC2InstanceSetSuite) TestIsErrorSubnetSpecific(c *check.C) {
c.Check(isErrorSubnetSpecific(errors.New("misc error")), check.Equals, false)
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InsufficientInstanceCapacity",
+ Code: "InsufficientInstanceCapacity",
}), check.Equals, true)
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InsufficientVolumeCapacity",
+ Code: "InsufficientVolumeCapacity",
}), check.Equals, true)
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InsufficientFreeAddressesInSubnet",
- message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
+ Code: "InsufficientFreeAddressesInSubnet",
+ Message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
}), check.Equals, true)
// #21603: (Sometimes?) EC2 returns code InvalidParameterValue
// even though the code "InsufficientFreeAddressesInSubnet"
// seems like it must be meant for exactly this error.
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InvalidParameterValue",
- message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
+ Code: "InvalidParameterValue",
+ Message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
}), check.Equals, true)
// Similarly, AWS docs
// (https://repost.aws/knowledge-center/vpc-insufficient-ip-errors)
// suggest the following code/message combinations also exist.
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "Client.InvalidParameterValue",
- message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ Code: "Client.InvalidParameterValue",
+ Message: "There aren't sufficient free Ipv4 addresses or prefixes",
}), check.Equals, true)
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InvalidParameterValue",
- message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ Code: "InvalidParameterValue",
+ Message: "There aren't sufficient free Ipv4 addresses or prefixes",
}), check.Equals, true)
// Meanwhile, other AWS docs
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html)
// suggest Client.InvalidParameterValue is not a real code but
// ClientInvalidParameterValue is.
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "ClientInvalidParameterValue",
- message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ Code: "ClientInvalidParameterValue",
+ Message: "There aren't sufficient free Ipv4 addresses or prefixes",
}), check.Equals, true)
c.Check(isErrorSubnetSpecific(&ec2stubError{
- code: "InvalidParameterValue",
- message: "Some other invalid parameter error",
+ Code: "InvalidParameterValue",
+ Message: "Some other invalid parameter error",
}), check.Equals, false)
}
@@ -423,12 +420,12 @@ func (*EC2InstanceSetSuite) TestCreateAllSubnetsFailing(c *check.C) {
ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-broken"]}`)
ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
"subnet-full": &ec2stubError{
- code: "InsufficientFreeAddressesInSubnet",
- message: "subnet is full",
+ Code: "InsufficientFreeAddressesInSubnet",
+ Message: "subnet is full",
},
"subnet-broken": &ec2stubError{
- code: "InvalidSubnetId.NotFound",
- message: "bogus subnet id",
+ Code: "InvalidSubnetId.NotFound",
+ Message: "bogus subnet id",
},
}
_, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
@@ -464,12 +461,12 @@ func (*EC2InstanceSetSuite) TestCreateOneSubnetFailingCapacity(c *check.C) {
ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-broken"]}`)
ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
"subnet-full": &ec2stubError{
- code: "InsufficientFreeAddressesInSubnet",
- message: "subnet is full",
+ Code: "InsufficientFreeAddressesInSubnet",
+ Message: "subnet is full",
},
"subnet-broken": &ec2stubError{
- code: "InsufficientInstanceCapacity",
- message: "insufficient capacity",
+ Code: "InsufficientInstanceCapacity",
+ Message: "insufficient capacity",
},
}
for i := 0; i < 3; i++ {
@@ -560,7 +557,7 @@ func (*EC2InstanceSetSuite) TestInstancePriceHistory(c *check.C) {
running := 0
for _, inst := range instances {
ec2i := inst.(*ec2Instance).instance
- if *ec2i.InstanceLifecycle == "spot" && *ec2i.State.Code&16 != 0 {
+ if ec2i.InstanceLifecycle == types.InstanceLifecycleTypeSpot && *ec2i.State.Code&16 != 0 {
running++
}
}
@@ -591,28 +588,115 @@ func (*EC2InstanceSetSuite) TestInstancePriceHistory(c *check.C) {
}
func (*EC2InstanceSetSuite) TestWrapError(c *check.C) {
- retryError := awserr.New("Throttling", "", nil)
+ retryError := &ec2stubError{Code: "Throttling"}
wrapped := wrapError(retryError, &atomic.Value{})
_, ok := wrapped.(cloud.RateLimitError)
c.Check(ok, check.Equals, true)
- quotaError := awserr.New("InstanceLimitExceeded", "", nil)
+ quotaError := &ec2stubError{Code: "InstanceLimitExceeded"}
wrapped = wrapError(quotaError, nil)
_, ok = wrapped.(cloud.QuotaError)
c.Check(ok, check.Equals, true)
for _, trial := range []struct {
- code string
- msg string
+ code string
+ msg string
+ typeSpecific bool
+ quotaGroupSpecific bool
}{
- {"InsufficientInstanceCapacity", ""},
- {"Unsupported", "Your requested instance type (t3.micro) is not supported in your requested Availability Zone (us-east-1e). Please retry your request by not specifying an Availability Zone or choosing us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1f."},
+ {
+ code: "InsufficientInstanceCapacity",
+ msg: "",
+ typeSpecific: true,
+ quotaGroupSpecific: false,
+ },
+ {
+ code: "Unsupported",
+ msg: "Your requested instance type (t3.micro) is not supported in your requested Availability Zone (us-east-1e). Please retry your request by not specifying an Availability Zone or choosing us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1f.",
+ typeSpecific: true,
+ quotaGroupSpecific: false,
+ },
+ {
+ code: "VcpuLimitExceeded",
+ msg: "You have requested more vCPU capacity than your current vCPU limit of 64 allows for the instance bucket that the specified instance type belongs to. Please visit http://aws.amazon.com/contact-us/ec2-request to request an adjustment to this limit.",
+ typeSpecific: false,
+ quotaGroupSpecific: true,
+ },
} {
- capacityError := awserr.New(trial.code, trial.msg, nil)
+ capacityError := &ec2stubError{Code: trial.code, Message: trial.msg}
wrapped = wrapError(capacityError, nil)
caperr, ok := wrapped.(cloud.CapacityError)
c.Check(ok, check.Equals, true)
c.Check(caperr.IsCapacityError(), check.Equals, true)
- c.Check(caperr.IsInstanceTypeSpecific(), check.Equals, true)
+ c.Check(caperr.IsInstanceTypeSpecific(), check.Equals, trial.typeSpecific)
+ c.Check(caperr.IsInstanceQuotaGroupSpecific(), check.Equals, trial.quotaGroupSpecific)
+ }
+}
+
+func (*EC2InstanceSetSuite) TestInstanceQuotaGroup(c *check.C) {
+ ap, _, _, _ := GetInstanceSet(c, `{
+ "InstanceTypeQuotaGroups": {
+ "a": "standard",
+ "m": "standard",
+ "t": "standard",
+ "p5": "p5"
+ }
+}`)
+
+ for _, trial := range []struct {
+ ptype string
+ spot bool
+ quotaGroup cloud.InstanceQuotaGroup
+ }{
+ {ptype: "g1.large", quotaGroup: "g"},
+ {ptype: "x1.large", quotaGroup: "x"},
+ {ptype: "inf1.2xlarge", quotaGroup: "inf"},
+ {ptype: "a1.small", quotaGroup: "standard"},
+ {ptype: "m1.xlarge", quotaGroup: "standard"},
+ {ptype: "m1.xlarge", spot: true, quotaGroup: "standard-spot"},
+ {ptype: "p4.xlarge", spot: true, quotaGroup: "p-spot"},
+ {ptype: "p5.xlarge", spot: true, quotaGroup: "p5-spot"},
+ {ptype: "t3.2xlarge", quotaGroup: "standard"},
+ {ptype: "trn1.2xlarge", quotaGroup: "trn"},
+ {ptype: "trn1.2xlarge", spot: true, quotaGroup: "trn-spot"},
+ {ptype: "imaginary9.5xlarge", quotaGroup: "imaginary"},
+ {ptype: "", quotaGroup: ""},
+ } {
+ c.Check(ap.InstanceQuotaGroup(arvados.InstanceType{
+ ProviderType: trial.ptype,
+ Preemptible: trial.spot,
+ }), check.Equals, trial.quotaGroup)
+ }
+}
+
+func (*EC2InstanceSetSuite) TestAWSKeyFingerprints(c *check.C) {
+ for _, keytype := range []string{"rsa", "ed25519"} {
+ tmpdir := c.MkDir()
+ buf, err := exec.Command("ssh-keygen", "-f", tmpdir+"/key", "-N", "", "-t", keytype).CombinedOutput()
+ c.Assert(err, check.IsNil, check.Commentf("ssh-keygen: %s", buf))
+ var expectfps []string
+ switch keytype {
+ case "rsa":
+ for _, hash := range []string{"md5", "sha1"} {
+ cmd := exec.Command("bash", "-c", "set -e -o pipefail; ssh-keygen -ef key -m PEM | openssl rsa -RSAPublicKey_in -outform DER | openssl "+hash+" -c")
+ cmd.Dir = tmpdir
+ buf, err := cmd.CombinedOutput()
+ c.Assert(err, check.IsNil, check.Commentf("bash: %s", buf))
+ expectfps = append(expectfps, string(regexp.MustCompile(`[0-9a-f:]{20,}`).Find(buf)))
+ }
+ case "ed25519":
+ buf, err := exec.Command("ssh-keygen", "-l", "-f", tmpdir+"/key").CombinedOutput()
+ c.Assert(err, check.IsNil, check.Commentf("ssh-keygen: %s", buf))
+ sum := string(regexp.MustCompile(`SHA256:\S+`).Find(buf))
+ expectfps = []string{sum + "=", sum}
+ default:
+ c.Error("don't know how to test fingerprint for key type " + keytype)
+ continue
+ }
+ pk, err := libconfig.LoadSSHKey("file://" + tmpdir + "/key")
+ c.Assert(err, check.IsNil)
+ fingerprints, err := awsKeyFingerprints(pk.PublicKey())
+ c.Assert(err, check.IsNil)
+ c.Check(fingerprints, check.DeepEquals, expectfps)
}
}
diff --git a/lib/cloud/interfaces.go b/lib/cloud/interfaces.go
index a2aa9e1432..980cb592e0 100644
--- a/lib/cloud/interfaces.go
+++ b/lib/cloud/interfaces.go
@@ -45,9 +45,13 @@ type CapacityError interface {
// If true, wait before trying to create more instances.
IsCapacityError() bool
// If true, the condition is specific to the requested
- // instance types. Wait before trying to create more
- // instances of that same type.
+ // instance type. Wait before trying to create more instances
+ // of that same type.
IsInstanceTypeSpecific() bool
+ // If true, the condition affects all instance types in the
+ // same instance family. This implies
+ // IsInstanceTypeSpecific() returns false.
+ IsInstanceQuotaGroupSpecific() bool
error
}
@@ -55,6 +59,7 @@ type SharedResourceTags map[string]string
type InstanceSetID string
type InstanceTags map[string]string
type InstanceID string
+type InstanceQuotaGroup string
type ImageID string
// An Executor executes commands on an ExecutorTarget.
@@ -158,6 +163,10 @@ type InstanceSet interface {
// InstanceIDs returned by the instances' ID() methods.
Instances(InstanceTags) ([]Instance, error)
+ // Return the instance quota group of the given instance type.
+ // See (CapacityError)IsInstanceQuotaGroupSpecific().
+ InstanceQuotaGroup(arvados.InstanceType) InstanceQuotaGroup
+
// Stop any background tasks and release other resources.
Stop()
}
diff --git a/lib/cloud/loopback/loopback.go b/lib/cloud/loopback/loopback.go
index 41878acd22..e6fc6debd3 100644
--- a/lib/cloud/loopback/loopback.go
+++ b/lib/cloud/loopback/loopback.go
@@ -114,6 +114,10 @@ func (is *instanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {
return ret, nil
}
+func (is *instanceSet) InstanceQuotaGroup(arvados.InstanceType) cloud.InstanceQuotaGroup {
+ return ""
+}
+
func (is *instanceSet) Stop() {
is.mtx.Lock()
defer is.mtx.Unlock()
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 14e839a6cd..05c344ad25 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -2,13 +2,19 @@
#
# SPDX-License-Identifier: AGPL-3.0
-# Do not use this file for site configuration. Create
-# /etc/arvados/config.yml instead.
+# This file provides documentation and default values for all Arvados
+# configuration entries.
#
-# The order of precedence (highest to lowest):
-# 1. Legacy component-specific config files (deprecated)
-# 2. /etc/arvados/config.yml
-# 3. config.default.yml
+# It is NOT intended to be copied and used as a starting point for a
+# site configuration file. If you do that, the "SAMPLE" entries will
+# be misinterpreted as real configuration entries, and future upgrades
+# will leave your configuration file with stale defaults and
+# documentation. Instead, you should create /etc/arvados/config.yml
+# with only the entries you want to override, and refer to the latest
+# version of this file for documentation.
+#
+# To show the entire configuration, computed from the current defaults
+# and your local site configuration, run `arvados-server config-dump`.
Clusters:
xxxxx:
@@ -74,12 +80,6 @@ Clusters:
Keepbalance:
InternalURLs: {SAMPLE: {ListenURL: ""}}
ExternalURL: ""
- GitHTTP:
- InternalURLs: {SAMPLE: {ListenURL: ""}}
- ExternalURL: ""
- GitSSH:
- InternalURLs: {SAMPLE: {ListenURL: ""}}
- ExternalURL: ""
DispatchCloud:
InternalURLs: {SAMPLE: {ListenURL: ""}}
ExternalURL: ""
@@ -178,6 +178,28 @@ Clusters:
Health:
InternalURLs: {SAMPLE: {ListenURL: ""}}
ExternalURL: ""
+ ContainerWebServices:
+ InternalURLs: {SAMPLE: {ListenURL: ""}}
+ # URL used to make HTTP requests that are proxied to
+ # containers (which may host web apps or APIs). Requires
+ # wildcard DNS and TLS certificate.
+ #
+ # From a subdomain:
+ # https://*.containers.uuid_prefix.arvadosapi.com
+ #
+ # From the main domain:
+ # https://*--containers.uuid_prefix.arvadosapi.com
+ ExternalURL: ""
+ # If ExternalPortMin and ExternalPortMax are non-zero, and
+ # ExternalURL is not a wildcard, container services listed in
+ # published_ports can be reached via dynamically assigned
+ # ports in the range [ExternalPortMin, ExternalPortMax]. For
+ # example, if ExternalURL is https://example.com/ and
+ # ExternalPortMin is 8000, Arvados will dynamically assign
+ # https://example.com:8000/, https://example.com:8001/, etc.,
+ # as proxy addresses for services in running containers.
+ ExternalPortMin: 0
+ ExternalPortMax: 0
PostgreSQL:
# max concurrent connections per arvados server daemon
@@ -211,10 +233,8 @@ Clusters:
# normally be returned in a single response).
# Note 1: This setting never reduces the number of returned rows to
# zero, no matter how big the first data row is.
- # Note 2: Currently, this is only checked against a specific set of
- # columns that tend to get large (collections.manifest_text,
- # containers.mounts, workflows.definition). Other fields (e.g.,
- # "properties" hashes) are not counted against this limit.
+ # Note 2: Only columns that *can* grow large count against this limit.
+ # Small fixed-width columns like UUIDs and datetimes never do.
MaxIndexDatabaseRead: 134217728
# Maximum number of items to return when responding to a APIs that
@@ -239,7 +259,7 @@ Clusters:
# Maximum number of concurrent requests to process concurrently
# in a single RailsAPI service process, or 0 for no limit.
- MaxConcurrentRailsRequests: 8
+ MaxConcurrentRailsRequests: 16
# Maximum number of incoming requests to hold in a priority
# queue waiting for one of the MaxConcurrentRequests slots to be
@@ -261,11 +281,6 @@ Clusters:
# MaxConcurrentRequests.
MaxGatewayTunnels: 1000
- # Fraction of MaxConcurrentRequests that can be "log create"
- # messages at any given time. This is to prevent logging
- # updates from crowding out more important requests.
- LogCreateRequestFraction: 0.50
-
# Maximum number of 64MiB memory buffers per Keepstore server process, or
# 0 for no limit. When this limit is reached, up to
# (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
@@ -340,7 +355,6 @@ Clusters:
# AutoSetupUsernameBlacklist is a list of usernames to be blacklisted for auto setup.
AutoSetupNewUsers: false
AutoSetupNewUsersWithVmUUID: ""
- AutoSetupNewUsersWithRepository: false
AutoSetupUsernameBlacklist:
arvados: {}
git: {}
@@ -373,6 +387,17 @@ Clusters:
# other admin users exist will automatically become an admin user.
AutoAdminFirstUser: false
+ # Support email address to display in Workbench.
+ SupportEmailAddress: "arvados@example.com"
+
+ # Outgoing email configuration:
+ #
+ # In order to send mail, Arvados expects a default SMTP server
+ # on localhost:25. It cannot require authentication on
+ # connections from localhost. That server should be configured
+ # to relay mail to a "real" SMTP server that is able to send
+ # email on behalf of your domain.
+
# Recipient for notification email sent out when a user sets a
# profile on their account.
UserProfileNotificationAddress: ""
@@ -416,6 +441,10 @@ Clusters:
# Currently implemented for OpenID Connect only.
PreferDomainForUsername: ""
+ # Send an email to each user when their account has been set up
+ # (meaning they are able to log in).
+ SendUserSetupNotificationEmail: false
+
# Ruby ERB template used for the email sent out to users when
# they have been set up.
UserSetupMailText: |
@@ -758,8 +787,8 @@ Clusters:
TTL: 300s
# Maximum amount of data cached in /var/cache/arvados/keep.
- # Can be given as a percentage ("10%") or a number of bytes
- # ("10 GiB")
+ # Can be given as a percentage of filesystem size ("10%") or a
+ # number of bytes ("10 GiB")
DiskCacheSize: 10%
# Approximate memory limit (in bytes) for session cache.
@@ -801,6 +830,15 @@ Clusters:
# load on the API server and you don't need it.
WebDAVLogEvents: true
+ # If a client requests partial content past the start of a file,
+ # and a request from the same client for the same file was logged
+ # within the past WebDAVLogDownloadInterval, do not write a new log.
+ # This throttling applies to both printed and API server logs.
+ # This reduces log output when clients like `aws s3 cp` download
+ # one file in small chunks in parallel.
+ # Set this to 0 to disable throttling and log all requests.
+ WebDAVLogDownloadInterval: 30s
+
# Per-connection output buffer for WebDAV downloads. May improve
# throughput for large files, particularly when storage volumes
# have high latency.
@@ -855,8 +893,8 @@ Clusters:
# itself in its config response ("isser" key). If the
# configured value is "https://example" and the provider
# returns "https://example:443" or "https://example/" then
- # login will fail, even though those URLs are equivalent
- # (RFC3986).
+ # login will fail, even though those URLs are equivalent (RFC
+ # 3986).
Issuer: ""
# Your client ID and client secret (supplied by the provider).
@@ -983,7 +1021,7 @@ Clusters:
# entries that have the SearchAttribute present.
#
# Special characters in assertion values must be escaped (see
- # RFC4515).
+ # RFC 4515).
#
# Example: "(objectClass=person)"
SearchFilters: ""
@@ -1025,11 +1063,11 @@ Clusters:
# Default value zero means tokens don't have expiration.
TokenLifetime: 0s
- # If true (default) tokens issued through login are allowed to create
- # new tokens.
- # If false, tokens issued through login are not allowed to
- # viewing/creating other tokens. New tokens can only be created
- # by going through login again.
+ # If true (default), tokens are allowed to create new tokens and
+ # view existing tokens belonging to the same user.
+ # If false, tokens are not allowed to view or create other
+ # tokens. New tokens can only be created by going through login
+ # again.
IssueTrustedTokens: true
# Origins (scheme://host[:port]) of clients trusted to receive
@@ -1058,24 +1096,6 @@ Clusters:
# production use.
TrustPrivateNetworks: false
- Git:
- # Path to git or gitolite-shell executable. Each authenticated
- # request will execute this program with the single argument "http-backend"
- GitCommand: /usr/bin/git
-
- # Path to Gitolite's home directory. If a non-empty path is given,
- # the CGI environment will be set up to support the use of
- # gitolite-shell as a GitCommand: for example, if GitoliteHome is
- # "/gh", then the CGI environment will have GITOLITE_HTTP_HOME=/gh,
- # PATH=$PATH:/gh/bin, and GL_BYPASS_ACCESS_CHECKS=1.
- GitoliteHome: ""
-
- # Git repositories must be readable by api server, or you won't be
- # able to submit crunch jobs. To pass the test suites, put a clone
- # of the arvados tree in {git_repositories_dir}/arvados.git or
- # {git_repositories_dir}/arvados/.git
- Repositories: /var/lib/arvados/git/repositories
-
TLS:
# Use "file:///var/lib/acme/live/example.com/cert" and
# ".../privkey" to load externally managed certificates.
@@ -1178,7 +1198,7 @@ Clusters:
# or an idle instance of type B is already running.
MaximumPriceFactor: 1.5
- # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
+ # PEM encoded SSH key (RSA, DSA, ECDSA, or ED25519) used by the
# cloud dispatcher for executing containers on worker VMs.
# Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
# and ends with "\n-----END RSA PRIVATE KEY-----\n".
@@ -1186,6 +1206,8 @@ Clusters:
# Use "file:///absolute/path/to/key" to load the key from a
# separate file instead of embedding it in the configuration
# file.
+ #
+ # Amazon EC2 only supports RSA and ED25519 keys.
DispatchPrivateKey: ""
# Maximum time to wait for workers to come up before abandoning
@@ -1260,45 +1282,6 @@ Clusters:
LocalKeepLogsToContainerLog: none
Logging:
- # Periodically (see SweepInterval) Arvados will check for
- # containers that have been finished for at least this long,
- # and delete their stdout, stderr, arv-mount, crunch-run, and
- # crunchstat logs from the logs table.
- MaxAge: 720h
-
- # How often to delete cached log entries for finished
- # containers (see MaxAge).
- SweepInterval: 12h
-
- # These two settings control how frequently log events are flushed to the
- # database. Log lines are buffered until either crunch_log_bytes_per_event
- # has been reached or crunch_log_seconds_between_events has elapsed since
- # the last flush.
- LogBytesPerEvent: 4096
- LogSecondsBetweenEvents: 5s
-
- # The sample period for throttling logs.
- LogThrottlePeriod: 60s
-
- # Maximum number of bytes that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- LogThrottleBytes: 65536
-
- # Maximum number of lines that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- LogThrottleLines: 1024
-
- # Maximum bytes that may be logged as legacy log events
- # (records posted to the "logs" table). Starting with Arvados
- # 2.7, container live logging has migrated to a new system
- # (polling the container request live log endpoint) and this
- # value should be 0. As of this writing, the container will
- # still create a single log on the API server, noting for that
- # log events are throttled.
- LimitLogBytesPerJob: 0
-
- LogPartialLineThrottlePeriod: 5s
-
# Container logs are written to Keep and saved in a
# collection, which is updated periodically while the
# container runs. This value sets the interval between
@@ -1314,7 +1297,7 @@ Clusters:
# An admin user can use "arvados-client shell" to start an
# interactive shell (with any user ID) in any running
# container.
- Admin: false
+ Admin: true
# Any user can use "arvados-client shell" to start an
# interactive shell (with any user ID) in any running
@@ -1335,47 +1318,6 @@ Clusters:
SbatchArgumentsList: []
SbatchEnvironmentVariables:
SAMPLE: ""
- Managed:
- # Path to dns server configuration directory
- # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
- # files or touch restart.txt (see below).
- DNSServerConfDir: ""
-
- # Template file for the dns server host snippets. See
- # unbound.template in this directory for an example. If false, do
- # not write any config files.
- DNSServerConfTemplate: ""
-
- # String to write to {dns_server_conf_dir}/restart.txt (with a
- # trailing newline) after updating local data. If false, do not
- # open or write the restart.txt file.
- DNSServerReloadCommand: ""
-
- # Command to run after each DNS update. Template variables will be
- # substituted; see the "unbound" example below. If false, do not run
- # a command.
- DNSServerUpdateCommand: ""
-
- ComputeNodeDomain: ""
- ComputeNodeNameservers:
- "192.168.1.1": {}
- SAMPLE: {}
-
- # Hostname to assign to a compute node when it sends a "ping" and the
- # hostname in its Node record is nil.
- # During bootstrapping, the "ping" script is expected to notice the
- # hostname given in the ping response, and update its unix hostname
- # accordingly.
- # If false, leave the hostname alone (this is appropriate if your compute
- # nodes' hostnames are already assigned by some other mechanism).
- #
- # One way or another, the hostnames of your node records should agree
- # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
- #
- # Example for compute0000, compute0001, ....:
- # assign_node_hostname: compute%04d
- # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
- AssignNodeHostname: "compute%d"
LSF:
# Arguments to bsub when submitting Arvados containers as LSF jobs.
@@ -1386,7 +1328,7 @@ Clusters:
# %C number of VCPUs
# %M memory in MB
# %T tmp in MB
- # %G number of GPU devices (runtime_constraints.cuda.device_count)
+ # %G number of GPU devices (runtime_constraints.gpu.device_count)
# %W maximum run time in minutes (see MaxRunTimeOverhead and
# MaxRunTimeDefault below)
#
@@ -1407,8 +1349,8 @@ Clusters:
# Arguments that will be appended to the bsub command line
# when submitting Arvados containers as LSF jobs with
- # runtime_constraints.cuda.device_count > 0
- BsubCUDAArguments: ["-gpu", "num=%G"]
+ # runtime_constraints.gpu.device_count > 0
+ BsubGPUArguments: ["-gpu", "num=%G"]
# Use sudo to switch to this user account when submitting LSF
# jobs.
@@ -1431,24 +1373,6 @@ Clusters:
# MaxRunTimeDefault: 2h
MaxRunTimeDefault: 0
- JobsAPI:
- # Enable the legacy 'jobs' API (crunch v1). This value must be a string.
- #
- # Note: this only enables read-only access, creating new
- # legacy jobs and pipelines is not supported.
- #
- # 'auto' -- (default) enable the Jobs API only if it has been used before
- # (i.e., there are job records in the database)
- # 'true' -- enable the Jobs API despite lack of existing records.
- # 'false' -- disable the Jobs API despite presence of existing records.
- Enable: 'auto'
-
- # Git repositories must be readable by api server, or you won't be
- # able to submit crunch jobs. To pass the test suites, put a clone
- # of the arvados tree in {git_repositories_dir}/arvados.git or
- # {git_repositories_dir}/arvados/.git
- GitInternalDir: /var/lib/arvados/internal.git
-
CloudVMs:
# Enable the cloud scheduler.
Enable: false
@@ -1587,14 +1511,19 @@ Clusters:
# An executable file (located on the dispatcher host) to be
# copied to cloud instances at runtime and used as the
- # container runner/supervisor. The default value is the
- # dispatcher program itself.
+ # "crunch-run" container runner/supervisor. The default value
+ # is the dispatcher program itself.
#
# Use an empty string to disable this step: nothing will be
# copied, and cloud instances are assumed to have a suitable
# version of crunch-run installed; see CrunchRunCommand above.
DeployRunnerBinary: "/proc/self/exe"
+ # Directory to store the crunch-run binary on cloud instances
+ # (see DeployRunnerBinary above). The "mkdir -p" command will
+ # be used to create the directory and its parents if needed.
+ DeployRunnerDirectory: /tmp/arvados-crunch-run
+
# Install the Dispatcher's SSH public key (derived from
# DispatchPrivateKey) when creating new cloud
# instances. Change this to false if you are using a different
@@ -1650,7 +1579,7 @@ Clusters:
SubnetID: ""
EBSVolumeType: gp2
- AdminUsername: debian
+
# (ec2) name of the IAMInstanceProfile for instances started by
# the cloud dispatcher. Leave blank when not needed.
IAMInstanceProfile: ""
@@ -1669,6 +1598,31 @@ Clusters:
# price of $0.10/GiB and can be entered here as 0.10.
EBSPrice: 0.10
+ # (ec2) Mapping of alphabetic instance type prefix to
+ # instance quota group. Any prefix not listed here will be
+ # treated as a distinct instance quota group. For example,
+ # "trn1.2xlarge" will implicitly belong to instance quota
+ # group "trn".
+ #
+ # Knowing that multiple instance types belong to the same
+ # quota group enables the dispatcher to minimize futile
+ # attempts to create new instances when a quota has been
+ # reached.
+ #
+ # All keys must be lowercase.
+ InstanceTypeQuotaGroups:
+ a: standard
+ c: standard
+ d: standard
+ h: standard
+ i: standard
+ m: standard
+ r: standard
+ t: standard
+ z: standard
+ vt: g
+ p5: p5
+
# (azure) Credentials.
SubscriptionID: ""
ClientID: ""
@@ -1709,7 +1663,7 @@ Clusters:
# Account (that already exists in the VM image) that will be
# set up with an ssh authorized key to allow the compute
# dispatcher to connect.
- AdminUsername: arvados
+ AdminUsername: crunch
InstanceTypes:
@@ -1722,6 +1676,7 @@ Clusters:
RAM: 128MiB
IncludedScratch: 16GB
AddedScratch: 0
+
# Hourly price ($), used to select node types for containers,
# and to calculate estimated container costs. For spot
# instances on EC2, this is also used as the maximum price
@@ -1731,12 +1686,31 @@ Clusters:
# given here is used to compute container cost estimates.
Price: 0.1
Preemptible: false
- # Include this section if the node type includes GPU (CUDA) support
- CUDA:
+
+ # Include this section if the instance type includes GPU support
+ GPU:
+ # The software stack, currently "cuda" or "rocm"
+ Stack: "cuda"
+
+ # The version of the driver installed on this instance, in
+ # X.Y format
DriverVersion: "11.0"
- HardwareCapability: "9.0"
+
+ # The architecture or capabilities of the GPU hardware
+ #
+ # For 'cuda', this is the Compute Capability in X.Y
+ # format.
+ #
+ # For 'rocm', this is the LLVM target (e.g. gfx1100) for
+ # the GPU hardware.
+ HardwareTarget: "9.0"
+
+ # The number GPUs on this instance
DeviceCount: 1
+ # The amount of VRAM per GPU
+ VRAM: 8000000000
+
StorageClasses:
# If you use multiple storage classes, specify them here, using
@@ -1796,7 +1770,6 @@ Clusters:
DriverParameters:
# for s3 driver -- see
# https://doc.arvados.org/install/configure-s3-object-storage.html
- IAMRole: aaaaa
AccessKeyID: aaaaa
SecretAccessKey: aaaaa
Endpoint: ""
@@ -1804,6 +1777,7 @@ Clusters:
Bucket: aaaaa
LocationConstraint: false
V2Signature: false
+ UsePathStyle: false
IndexPageSize: 1000
ConnectTimeout: 1m
ReadTimeout: 10m
@@ -1853,32 +1827,6 @@ Clusters:
# should leave this alone.
Serialize: false
- Mail:
- # In order to send mail, Arvados expects a default SMTP server
- # on localhost:25. It cannot require authentication on
- # connections from localhost. That server should be configured
- # to relay mail to a "real" SMTP server that is able to send
- # email on behalf of your domain.
-
- # See also the "Users" configuration section for additional
- # email-related options.
-
- # When a user has been set up (meaning they are able to log in)
- # they will receive an email using the template specified
- # earlier in Users.UserSetupMailText
- SendUserSetupNotificationEmail: true
-
- # Bug/issue report notification to and from addresses
- IssueReporterEmailFrom: "arvados@example.com"
- IssueReporterEmailTo: "arvados@example.com"
- SupportEmailAddress: "arvados@example.com"
-
- # Generic issue email from
- EmailFrom: "arvados@example.com"
-
- # No longer supported, to be removed.
- MailchimpAPIKey: ""
- MailchimpListID: ""
RemoteClusters:
"*":
Host: ""
diff --git a/lib/config/deprecated.go b/lib/config/deprecated.go
index d518b3414a..445161d7ab 100644
--- a/lib/config/deprecated.go
+++ b/lib/config/deprecated.go
@@ -21,6 +21,18 @@ type deprRequestLimits struct {
MultiClusterRequestConcurrency *int
}
+type deprCUDAFeatures struct {
+ DriverVersion string
+ HardwareCapability string
+ DeviceCount int
+}
+
+type deprInstanceType struct {
+ CUDA *deprCUDAFeatures
+}
+
+type deprInstanceTypeMap map[string]deprInstanceType
+
type deprCluster struct {
RequestLimits deprRequestLimits
NodeProfiles map[string]nodeProfile
@@ -31,6 +43,16 @@ type deprCluster struct {
ProviderAppID *string
ProviderAppSecret *string
}
+ Mail struct {
+ SendUserSetupNotificationEmail *bool
+ SupportEmailAddress *string
+ }
+ Containers struct {
+ LSF struct {
+ BsubCUDAArguments *[]string
+ }
+ }
+ InstanceTypes deprInstanceTypeMap
}
type deprecatedConfig struct {
@@ -87,6 +109,18 @@ func (ldr *Loader) applyDeprecatedConfig(cfg *arvados.Config) error {
if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
*dst = *n
}
+ if dst, addr := &cluster.Users.SupportEmailAddress, dcluster.Mail.SupportEmailAddress; addr != nil {
+ *dst = *addr
+ ldr.Logger.Warnf("using your old config key Mail.SupportEmailAddress -- but you should rename it to Users.SupportEmailAddress")
+ }
+ if dst, b := &cluster.Users.SendUserSetupNotificationEmail, dcluster.Mail.SendUserSetupNotificationEmail; b != nil {
+ *dst = *b
+ ldr.Logger.Warnf("using your old config key Mail.SendUserSetupNotificationEmail -- but you should rename it to Users.SendUserSetupNotificationEmail")
+ }
+ if dst, n := &cluster.Containers.LSF.BsubGPUArguments, dcluster.Containers.LSF.BsubCUDAArguments; n != nil {
+ *dst = *n
+ ldr.Logger.Warnf("using your old config key Containers.LSF.BsubCUDAArguments -- but you should rename it to Containers.LSF.BsubGPUArguments")
+ }
// Google* moved to Google.*
if dst, n := &cluster.Login.Google.ClientID, dcluster.Login.GoogleClientID; n != nil && *n != *dst {
@@ -103,6 +137,21 @@ func (ldr *Loader) applyDeprecatedConfig(cfg *arvados.Config) error {
*dst = *n
}
+ for name, instanceType := range dcluster.InstanceTypes {
+ if instanceType.CUDA != nil {
+ updInstanceType := cluster.InstanceTypes[name]
+ updInstanceType.GPU = arvados.GPUFeatures{
+ Stack: "cuda",
+ DriverVersion: instanceType.CUDA.DriverVersion,
+ HardwareTarget: instanceType.CUDA.HardwareCapability,
+ DeviceCount: instanceType.CUDA.DeviceCount,
+ VRAM: 0,
+ }
+ cluster.InstanceTypes[name] = updInstanceType
+ ldr.Logger.Warnf("InstanceType %q has deprecated CUDA section, should be migrated to GPU section", name)
+ }
+ }
+
cfg.Clusters[id] = cluster
}
return nil
@@ -510,56 +559,6 @@ func (ldr *Loader) loadOldKeepWebConfig(cfg *arvados.Config) error {
return nil
}
-const defaultGitHttpdConfigPath = "/etc/arvados/git-httpd/git-httpd.yml"
-
-type oldGitHttpdConfig struct {
- Client *arvados.Client
- Listen *string
- GitCommand *string
- GitoliteHome *string
- RepoRoot *string
- ManagementToken *string
-}
-
-func (ldr *Loader) loadOldGitHttpdConfig(cfg *arvados.Config) error {
- if ldr.GitHttpdPath == "" {
- return nil
- }
- var oc oldGitHttpdConfig
- err := ldr.loadOldConfigHelper("arvados-git-httpd", ldr.GitHttpdPath, &oc)
- if os.IsNotExist(err) && ldr.GitHttpdPath == defaultGitHttpdConfigPath {
- return nil
- } else if err != nil {
- return err
- }
-
- cluster, err := cfg.GetCluster("")
- if err != nil {
- return err
- }
-
- loadOldClientConfig(cluster, oc.Client)
-
- if oc.Listen != nil {
- cluster.Services.GitHTTP.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
- }
- if oc.ManagementToken != nil {
- cluster.ManagementToken = *oc.ManagementToken
- }
- if oc.GitCommand != nil {
- cluster.Git.GitCommand = *oc.GitCommand
- }
- if oc.GitoliteHome != nil {
- cluster.Git.GitoliteHome = *oc.GitoliteHome
- }
- if oc.RepoRoot != nil {
- cluster.Git.Repositories = *oc.RepoRoot
- }
-
- cfg.Clusters[cluster.ClusterID] = *cluster
- return nil
-}
-
const defaultKeepBalanceConfigPath = "/etc/arvados/keep-balance/keep-balance.yml"
type oldKeepBalanceConfig struct {
diff --git a/lib/config/deprecated_test.go b/lib/config/deprecated_test.go
index e06a1f231d..fd54a7817e 100644
--- a/lib/config/deprecated_test.go
+++ b/lib/config/deprecated_test.go
@@ -49,6 +49,26 @@ func testLoadLegacyConfig(content []byte, mungeFlag string, c *check.C) (*arvado
return cluster, nil
}
+func (s *LoadSuite) TestOldEmailConfiguration(c *check.C) {
+ logs := checkEquivalent(c, `
+Clusters:
+ z1111:
+ Mail:
+ SendUserSetupNotificationEmail: false
+ SupportEmailAddress: "support@example.invalid"
+`, `
+Clusters:
+ z1111:
+ Users:
+ SendUserSetupNotificationEmail: false
+ SupportEmailAddress: "support@example.invalid"
+`)
+ c.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*Mail\.SendUserSetupNotificationEmail.*`)
+ c.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*Mail\.SupportEmailAddress.*`)
+ c.Check(logs, check.Matches, `(?ms).*using your old config key Mail\.SendUserSetupNotificationEmail -- but you should rename it to Users\.SendUserSetupNotificationEmail.*`)
+ c.Check(logs, check.Matches, `(?ms).*using your old config key Mail\.SupportEmailAddress -- but you should rename it to Users\.SupportEmailAddress.*`)
+}
+
func (s *LoadSuite) TestLegacyVolumeDriverParameters(c *check.C) {
logs := checkEquivalent(c, `
Clusters:
@@ -283,52 +303,6 @@ func fmtKeepproxyConfig(param string, debugLog bool) string {
`, debugLog, param)
}
-func (s *LoadSuite) TestLegacyArvGitHttpdConfig(c *check.C) {
- content := []byte(`
-{
- "Client": {
- "Scheme": "",
- "APIHost": "example.com",
- "AuthToken": "abcdefg",
- },
- "Listen": ":9000",
- "GitCommand": "/test/git",
- "GitoliteHome": "/test/gitolite",
- "RepoRoot": "/test/reporoot",
- "ManagementToken": "xyzzy"
-}
-`)
- f := "-legacy-git-httpd-config"
- cluster, err := testLoadLegacyConfig(content, f, c)
-
- c.Assert(err, check.IsNil)
- c.Assert(cluster, check.NotNil)
- c.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: "https", Host: "example.com", Path: "/"})
- c.Check(cluster.SystemRootToken, check.Equals, "abcdefg")
- c.Check(cluster.ManagementToken, check.Equals, "xyzzy")
- c.Check(cluster.Git.GitCommand, check.Equals, "/test/git")
- c.Check(cluster.Git.GitoliteHome, check.Equals, "/test/gitolite")
- c.Check(cluster.Git.Repositories, check.Equals, "/test/reporoot")
- c.Check(cluster.Services.Keepproxy.InternalURLs[arvados.URL{Host: ":9000"}], check.Equals, arvados.ServiceInstance{})
-}
-
-// Tests fix for https://dev.arvados.org/issues/15642
-func (s *LoadSuite) TestLegacyArvGitHttpdConfigDoesntDisableMissingItems(c *check.C) {
- content := []byte(`
-{
- "Client": {
- "Scheme": "",
- "APIHost": "example.com",
- "AuthToken": "abcdefg",
- }
-}
-`)
- cluster, err := testLoadLegacyConfig(content, "-legacy-git-httpd-config", c)
- c.Assert(err, check.IsNil)
- // The resulting ManagementToken should be the one set up on the test server.
- c.Check(cluster.ManagementToken, check.Equals, TestServerManagementToken)
-}
-
func (s *LoadSuite) TestLegacyKeepBalanceConfig(c *check.C) {
f := "-legacy-keepbalance-config"
content := []byte(fmtKeepBalanceConfig(""))
@@ -385,3 +359,33 @@ func fmtKeepBalanceConfig(param string) string {
}
`, param)
}
+
+func (s *LoadSuite) TestDeprecatedCUDA(c *check.C) {
+ checkEquivalent(c, `
+Clusters:
+ z1111:
+ InstanceTypes:
+ gpuInstance:
+ CUDA:
+ DriverVersion: "11.0"
+ HardwareCapability: "9.0"
+ DeviceCount: 1
+ Containers:
+ LSF:
+ BsubCUDAArguments: ["-gpu"]
+`, `
+Clusters:
+ z1111:
+ InstanceTypes:
+ gpuInstance:
+ GPU:
+ DriverVersion: "11.0"
+ HardwareTarget: "9.0"
+ DeviceCount: 1
+ Stack: "cuda"
+ VRAM: 0
+ Containers:
+ LSF:
+ BsubGPUArguments: ["-gpu"]
+`)
+}
diff --git a/lib/config/export.go b/lib/config/export.go
index f511ebbcb1..5c12d3edaa 100644
--- a/lib/config/export.go
+++ b/lib/config/export.go
@@ -59,108 +59,104 @@ func ExportJSON(w io.Writer, cluster *arvados.Cluster) error {
// exists.
var whitelist = map[string]bool{
// | sort -t'"' -k2,2
- "API": true,
- "API.AsyncPermissionsUpdateInterval": false,
- "API.DisabledAPIs": false,
- "API.FreezeProjectRequiresDescription": true,
- "API.FreezeProjectRequiresProperties": true,
- "API.FreezeProjectRequiresProperties.*": true,
- "API.KeepServiceRequestTimeout": false,
- "API.LockBeforeUpdate": false,
- "API.LogCreateRequestFraction": false,
- "API.MaxConcurrentRailsRequests": false,
- "API.MaxConcurrentRequests": false,
- "API.MaxGatewayTunnels": false,
- "API.MaxIndexDatabaseRead": false,
- "API.MaxItemsPerResponse": true,
- "API.MaxKeepBlobBuffers": false,
- "API.MaxQueuedRequests": false,
- "API.MaxQueueTimeForLockRequests": false,
- "API.MaxRequestAmplification": false,
- "API.MaxRequestSize": true,
- "API.MaxTokenLifetime": false,
- "API.RequestTimeout": true,
- "API.SendTimeout": true,
- "API.UnfreezeProjectRequiresAdmin": true,
- "API.VocabularyPath": false,
- "API.WebsocketClientEventQueue": false,
- "API.WebsocketServerEventQueue": false,
- "AuditLogs": false,
- "AuditLogs.MaxAge": false,
- "AuditLogs.MaxDeleteBatch": false,
- "AuditLogs.UnloggedAttributes": false,
- "ClusterID": true,
- "Collections": true,
- "Collections.BalanceCollectionBatch": false,
- "Collections.BalanceCollectionBuffers": false,
- "Collections.BalancePeriod": false,
- "Collections.BalancePullLimit": false,
- "Collections.BalanceTimeout": false,
- "Collections.BalanceTrashLimit": false,
- "Collections.BalanceUpdateLimit": false,
- "Collections.BlobDeleteConcurrency": false,
- "Collections.BlobMissingReport": false,
- "Collections.BlobReplicateConcurrency": false,
- "Collections.BlobSigning": true,
- "Collections.BlobSigningKey": false,
- "Collections.BlobSigningTTL": true,
- "Collections.BlobTrash": false,
- "Collections.BlobTrashCheckInterval": false,
- "Collections.BlobTrashConcurrency": false,
- "Collections.BlobTrashLifetime": false,
- "Collections.CollectionVersioning": true,
- "Collections.DefaultReplication": true,
- "Collections.DefaultTrashLifetime": true,
- "Collections.ForwardSlashNameSubstitution": true,
- "Collections.KeepproxyPermission": false,
- "Collections.ManagedProperties": true,
- "Collections.ManagedProperties.*": true,
- "Collections.ManagedProperties.*.*": true,
- "Collections.PreserveVersionIfIdle": true,
- "Collections.S3FolderObjects": true,
- "Collections.TrashSweepInterval": false,
- "Collections.TrustAllContent": true,
- "Collections.WebDAVCache": false,
- "Collections.WebDAVLogEvents": false,
- "Collections.WebDAVOutputBuffer": false,
- "Collections.WebDAVPermission": false,
- "Containers": true,
- "Containers.AlwaysUsePreemptibleInstances": true,
- "Containers.CloudVMs": false,
- "Containers.CrunchRunArgumentsList": false,
- "Containers.CrunchRunCommand": false,
- "Containers.DefaultKeepCacheRAM": true,
- "Containers.DispatchPrivateKey": false,
- "Containers.JobsAPI": true,
- "Containers.JobsAPI.Enable": true,
- "Containers.JobsAPI.GitInternalDir": false,
- "Containers.LocalKeepBlobBuffersPerVCPU": false,
- "Containers.LocalKeepLogsToContainerLog": false,
- "Containers.Logging": false,
- "Containers.LogReuseDecisions": false,
- "Containers.LSF": false,
- "Containers.MaxDispatchAttempts": false,
- "Containers.MaximumPriceFactor": true,
- "Containers.MaxRetryAttempts": true,
- "Containers.MinRetryPeriod": true,
- "Containers.PreemptiblePriceFactor": false,
- "Containers.ReserveExtraRAM": true,
- "Containers.RuntimeEngine": true,
- "Containers.ShellAccess": true,
- "Containers.ShellAccess.Admin": true,
- "Containers.ShellAccess.User": true,
- "Containers.SLURM": false,
- "Containers.StaleLockTimeout": false,
- "Containers.SupportedDockerImageFormats": true,
- "Containers.SupportedDockerImageFormats.*": true,
- "Git": false,
- "InstanceTypes": true,
- "InstanceTypes.*": true,
- "InstanceTypes.*.*": true,
- "InstanceTypes.*.*.*": true,
- "Login": true,
- "Login.Google": true,
- "Login.Google.AlternateEmailAddresses": false,
+ "API": true,
+ "API.AsyncPermissionsUpdateInterval": false,
+ "API.DisabledAPIs": false,
+ "API.FreezeProjectRequiresDescription": true,
+ "API.FreezeProjectRequiresProperties": true,
+ "API.FreezeProjectRequiresProperties.*": true,
+ "API.KeepServiceRequestTimeout": false,
+ "API.LockBeforeUpdate": false,
+ "API.MaxConcurrentRailsRequests": false,
+ "API.MaxConcurrentRequests": false,
+ "API.MaxGatewayTunnels": false,
+ "API.MaxIndexDatabaseRead": false,
+ "API.MaxItemsPerResponse": true,
+ "API.MaxKeepBlobBuffers": false,
+ "API.MaxQueuedRequests": false,
+ "API.MaxQueueTimeForLockRequests": false,
+ "API.MaxRequestAmplification": false,
+ "API.MaxRequestSize": true,
+ "API.MaxTokenLifetime": false,
+ "API.RequestTimeout": true,
+ "API.SendTimeout": true,
+ "API.UnfreezeProjectRequiresAdmin": true,
+ "API.VocabularyPath": false,
+ "API.WebsocketClientEventQueue": false,
+ "API.WebsocketServerEventQueue": false,
+ "AuditLogs": false,
+ "AuditLogs.MaxAge": false,
+ "AuditLogs.MaxDeleteBatch": false,
+ "AuditLogs.UnloggedAttributes": false,
+ "ClusterID": true,
+ "Collections": true,
+ "Collections.BalanceCollectionBatch": false,
+ "Collections.BalanceCollectionBuffers": false,
+ "Collections.BalancePeriod": false,
+ "Collections.BalancePullLimit": false,
+ "Collections.BalanceTimeout": false,
+ "Collections.BalanceTrashLimit": false,
+ "Collections.BalanceUpdateLimit": false,
+ "Collections.BlobDeleteConcurrency": false,
+ "Collections.BlobMissingReport": false,
+ "Collections.BlobReplicateConcurrency": false,
+ "Collections.BlobSigning": true,
+ "Collections.BlobSigningKey": false,
+ "Collections.BlobSigningTTL": true,
+ "Collections.BlobTrash": false,
+ "Collections.BlobTrashCheckInterval": false,
+ "Collections.BlobTrashConcurrency": false,
+ "Collections.BlobTrashLifetime": false,
+ "Collections.CollectionVersioning": true,
+ "Collections.DefaultReplication": true,
+ "Collections.DefaultTrashLifetime": true,
+ "Collections.ForwardSlashNameSubstitution": true,
+ "Collections.KeepproxyPermission": false,
+ "Collections.ManagedProperties": true,
+ "Collections.ManagedProperties.*": true,
+ "Collections.ManagedProperties.*.*": true,
+ "Collections.PreserveVersionIfIdle": true,
+ "Collections.S3FolderObjects": true,
+ "Collections.TrashSweepInterval": false,
+ "Collections.TrustAllContent": true,
+ "Collections.WebDAVCache": false,
+ "Collections.WebDAVLogEvents": false,
+ "Collections.WebDAVLogDownloadInterval": false,
+ "Collections.WebDAVOutputBuffer": false,
+ "Collections.WebDAVPermission": false,
+ "Containers": true,
+ "Containers.AlwaysUsePreemptibleInstances": true,
+ "Containers.CloudVMs": false,
+ "Containers.CrunchRunArgumentsList": false,
+ "Containers.CrunchRunCommand": false,
+ "Containers.DefaultKeepCacheRAM": true,
+ "Containers.DispatchPrivateKey": false,
+ "Containers.LocalKeepBlobBuffersPerVCPU": false,
+ "Containers.LocalKeepLogsToContainerLog": false,
+ "Containers.Logging": false,
+ "Containers.LogReuseDecisions": false,
+ "Containers.LSF": false,
+ "Containers.MaxDispatchAttempts": false,
+ "Containers.MaximumPriceFactor": true,
+ "Containers.MaxRetryAttempts": true,
+ "Containers.MinRetryPeriod": true,
+ "Containers.PreemptiblePriceFactor": false,
+ "Containers.ReserveExtraRAM": true,
+ "Containers.RuntimeEngine": true,
+ "Containers.ShellAccess": true,
+ "Containers.ShellAccess.Admin": true,
+ "Containers.ShellAccess.User": true,
+ "Containers.SLURM": false,
+ "Containers.StaleLockTimeout": false,
+ "Containers.SupportedDockerImageFormats": true,
+ "Containers.SupportedDockerImageFormats.*": true,
+ "InstanceTypes": true,
+ "InstanceTypes.*": true,
+ "InstanceTypes.*.*": true,
+ "InstanceTypes.*.*.*": true,
+ "Login": true,
+ "Login.Google": true,
+ "Login.Google.AlternateEmailAddresses": false,
"Login.Google.AuthenticationRequestParameters": false,
"Login.Google.ClientID": false,
"Login.Google.ClientSecret": false,
@@ -204,14 +200,6 @@ var whitelist = map[string]bool{
"Login.TokenLifetime": false,
"Login.TrustedClients": false,
"Login.TrustPrivateNetworks": false,
- "Mail": true,
- "Mail.EmailFrom": false,
- "Mail.IssueReporterEmailFrom": false,
- "Mail.IssueReporterEmailTo": false,
- "Mail.MailchimpAPIKey": false,
- "Mail.MailchimpListID": false,
- "Mail.SendUserSetupNotificationEmail": false,
- "Mail.SupportEmailAddress": true,
"ManagementToken": false,
"PostgreSQL": false,
"RemoteClusters": true,
@@ -223,6 +211,8 @@ var whitelist = map[string]bool{
"RemoteClusters.*.Scheme": true,
"Services": true,
"Services.*": true,
+ "Services.*.ExternalPortMax": false,
+ "Services.*.ExternalPortMin": false,
"Services.*.ExternalURL": true,
"Services.*.InternalURLs": false,
"StorageClasses": true,
@@ -243,7 +233,6 @@ var whitelist = map[string]bool{
"Users.AutoAdminFirstUser": false,
"Users.AutoAdminUserWithEmail": false,
"Users.AutoSetupNewUsers": false,
- "Users.AutoSetupNewUsersWithRepository": false,
"Users.AutoSetupNewUsersWithVmUUID": false,
"Users.AutoSetupUsernameBlacklist": false,
"Users.CanCreateRoleGroups": true,
@@ -253,6 +242,8 @@ var whitelist = map[string]bool{
"Users.NewUsersAreActive": false,
"Users.PreferDomainForUsername": false,
"Users.RoleGroupsVisibleToAll": false,
+ "Users.SendUserSetupNotificationEmail": false,
+ "Users.SupportEmailAddress": true,
"Users.SyncIgnoredGroups": true,
"Users.SyncRequiredGroups": true,
"Users.SyncUserAccounts": true,
diff --git a/lib/config/load.go b/lib/config/load.go
index d504f7796c..ba1bf27cb3 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -21,9 +21,9 @@ import (
"strings"
"time"
+ "dario.cat/mergo"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/ghodss/yaml"
- "github.com/imdario/mergo"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
@@ -48,7 +48,6 @@ type Loader struct {
CrunchDispatchSlurmPath string
WebsocketPath string
KeepproxyPath string
- GitHttpdPath string
KeepBalancePath string
configdata []byte
@@ -88,7 +87,6 @@ func (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {
flagset.StringVar(&ldr.CrunchDispatchSlurmPath, "legacy-crunch-dispatch-slurm-config", defaultCrunchDispatchSlurmConfigPath, "Legacy crunch-dispatch-slurm configuration `file`")
flagset.StringVar(&ldr.WebsocketPath, "legacy-ws-config", defaultWebsocketConfigPath, "Legacy arvados-ws configuration `file`")
flagset.StringVar(&ldr.KeepproxyPath, "legacy-keepproxy-config", defaultKeepproxyConfigPath, "Legacy keepproxy configuration `file`")
- flagset.StringVar(&ldr.GitHttpdPath, "legacy-git-httpd-config", defaultGitHttpdConfigPath, "Legacy arvados-git-httpd configuration `file`")
flagset.StringVar(&ldr.KeepBalancePath, "legacy-keepbalance-config", defaultKeepBalanceConfigPath, "Legacy keep-balance configuration `file`")
flagset.BoolVar(&ldr.SkipLegacy, "skip-legacy", false, "Don't load legacy config files")
}
@@ -168,9 +166,6 @@ func (ldr *Loader) MungeLegacyConfigArgs(lgr logrus.FieldLogger, args []string,
if legacyConfigArg != "-legacy-keepproxy-config" {
ldr.KeepproxyPath = ""
}
- if legacyConfigArg != "-legacy-git-httpd-config" {
- ldr.GitHttpdPath = ""
- }
if legacyConfigArg != "-legacy-keepbalance-config" {
ldr.KeepBalancePath = ""
}
@@ -296,7 +291,6 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
ldr.loadOldCrunchDispatchSlurmConfig,
ldr.loadOldWebsocketConfig,
ldr.loadOldKeepproxyConfig,
- ldr.loadOldGitHttpdConfig,
ldr.loadOldKeepBalanceConfig,
)
}
@@ -348,7 +342,7 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
ldr.checkUnlistedKeepstores(cc),
ldr.checkLocalKeepBlobBuffers(cc),
ldr.checkStorageClasses(cc),
- ldr.checkCUDAVersions(cc),
+ ldr.checkGPUVersions(cc),
// TODO: check non-empty Rendezvous on
// services other than Keepstore
} {
@@ -552,19 +546,15 @@ func (ldr *Loader) checkStorageClasses(cc arvados.Cluster) error {
return nil
}
-func (ldr *Loader) checkCUDAVersions(cc arvados.Cluster) error {
+func (ldr *Loader) checkGPUVersions(cc arvados.Cluster) error {
for _, it := range cc.InstanceTypes {
- if it.CUDA.DeviceCount == 0 {
+ if it.GPU.DeviceCount == 0 {
continue
}
- _, err := strconv.ParseFloat(it.CUDA.DriverVersion, 64)
- if err != nil {
- return fmt.Errorf("InstanceType %q has invalid CUDA.DriverVersion %q, expected format X.Y (%v)", it.Name, it.CUDA.DriverVersion, err)
- }
- _, err = strconv.ParseFloat(it.CUDA.HardwareCapability, 64)
+ _, err := strconv.ParseFloat(it.GPU.DriverVersion, 64)
if err != nil {
- return fmt.Errorf("InstanceType %q has invalid CUDA.HardwareCapability %q, expected format X.Y (%v)", it.Name, it.CUDA.HardwareCapability, err)
+ return fmt.Errorf("InstanceType %q has invalid GPU.DriverVersion %q, expected format X.Y (%v)", it.Name, it.GPU.DriverVersion, err)
}
}
return nil
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
index 75efc6a35a..28afb3dbdd 100644
--- a/lib/config/load_test.go
+++ b/lib/config/load_test.go
@@ -914,3 +914,22 @@ func (s *LoadSuite) TestLoadSSHKey(c *check.C) {
_, err = LoadSSHKey("file://" + cwd + "/../dispatchcloud/test/sshkey_dispatch")
c.Check(err, check.IsNil)
}
+
+func (s *LoadSuite) TestLoadSSHKeyTypes(c *check.C) {
+ for _, format := range []string{"PEM", "RFC4716", "PKCS8"} {
+ for _, keytype := range []string{"dsa", "ecdsa", "ed25519", "rsa"} {
+ c.Logf("=== keytype %s", keytype)
+ if keytype == "dsa" && format != "PEM" {
+ c.Logf("... skipping due to lack of support in stdlib")
+ continue
+ }
+ tmpdir := c.MkDir()
+ buf, err := exec.Command("ssh-keygen", "-N", "", "-t", keytype, "-m", format, "-f", tmpdir+"/key").CombinedOutput()
+ if !c.Check(err, check.IsNil, check.Commentf("(keytype %s, format %s) %s", keytype, format, buf)) {
+ continue
+ }
+ _, err = LoadSSHKey("file://" + tmpdir + "/key")
+ c.Check(err, check.IsNil, check.Commentf("LoadSSHKey failed on keytype %s in format %s", keytype, format))
+ }
+ }
+}
diff --git a/lib/controller/federation.go b/lib/controller/federation.go
index 93b8315a63..1254f3c7ee 100644
--- a/lib/controller/federation.go
+++ b/lib/controller/federation.go
@@ -156,7 +156,14 @@ func (h *Handler) validateAPItoken(req *http.Request, token string) (*CurrentUse
}
user.Authorization.APIToken = token
var scopes string
- err = db.QueryRowContext(req.Context(), `SELECT api_client_authorizations.uuid, api_client_authorizations.scopes, users.uuid FROM api_client_authorizations JOIN users on api_client_authorizations.user_id=users.id WHERE api_token=$1 AND (expires_at IS NULL OR expires_at > current_timestamp AT TIME ZONE 'UTC') LIMIT 1`, token).Scan(&user.Authorization.UUID, &scopes, &user.UUID)
+ err = db.QueryRowContext(req.Context(), `
+ SELECT api_client_authorizations.uuid, api_client_authorizations.scopes, users.uuid
+ FROM api_client_authorizations
+ JOIN users on api_client_authorizations.user_id=users.id
+ WHERE api_token=$1
+ AND (expires_at IS NULL OR expires_at > current_timestamp AT TIME ZONE 'UTC')
+ AND (refreshes_at IS NULL OR refreshes_at > current_timestamp AT TIME ZONE 'UTC')
+ LIMIT 1`, token).Scan(&user.Authorization.UUID, &scopes, &user.UUID)
if err == sql.ErrNoRows {
ctxlog.FromContext(req.Context()).Debugf("validateAPItoken(%s): not found in database", token)
return nil, false, nil
@@ -203,10 +210,10 @@ func (h *Handler) createAPItoken(req *http.Request, userUUID string, scopes []st
`INSERT INTO api_client_authorizations
(uuid, api_token, expires_at, scopes,
user_id,
-api_client_id, created_at, updated_at)
+created_at, updated_at)
VALUES ($1, $2, CURRENT_TIMESTAMP AT TIME ZONE 'UTC' + INTERVAL '2 weeks', $3,
(SELECT id FROM users WHERE users.uuid=$4 LIMIT 1),
-0, CURRENT_TIMESTAMP AT TIME ZONE 'UTC', CURRENT_TIMESTAMP AT TIME ZONE 'UTC')`,
+CURRENT_TIMESTAMP AT TIME ZONE 'UTC', CURRENT_TIMESTAMP AT TIME ZONE 'UTC')`,
uuid, token, string(scopesjson), userUUID)
if err != nil {
diff --git a/lib/controller/federation/conn.go b/lib/controller/federation/conn.go
index 949cc56dd2..d5a3429b8b 100644
--- a/lib/controller/federation/conn.go
+++ b/lib/controller/federation/conn.go
@@ -21,6 +21,7 @@ import (
"git.arvados.org/arvados.git/lib/controller/localdb"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
@@ -408,6 +409,10 @@ func (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.Untrash
return conn.chooseBackend(options.UUID).CollectionUntrash(ctx, options)
}
+func (conn *Conn) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {
+ return conn.local.ComputedPermissionList(ctx, options)
+}
+
func (conn *Conn) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
return conn.generated_ContainerList(ctx, options)
}
@@ -440,6 +445,14 @@ func (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOption
return conn.chooseBackend(options.UUID).ContainerUnlock(ctx, options)
}
+func (conn *Conn) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {
+ if len(options.Target) >= 29 && options.Target[27] == '-' && arvadosclient.UUIDMatch(options.Target[:27]) {
+ return conn.chooseBackend(options.Target[:27]).ContainerHTTPProxy(ctx, options)
+ } else {
+ return conn.local.ContainerHTTPProxy(ctx, options)
+ }
+}
+
func (conn *Conn) ContainerSSH(ctx context.Context, options arvados.ContainerSSHOptions) (arvados.ConnectionResponse, error) {
return conn.chooseBackend(options.UUID).ContainerSSH(ctx, options)
}
@@ -605,26 +618,6 @@ func (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions)
return conn.chooseBackend(options.UUID).LogDelete(ctx, options)
}
-func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
- return conn.generated_SpecimenList(ctx, options)
-}
-
-func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
- return conn.chooseBackend(options.ClusterID).SpecimenCreate(ctx, options)
-}
-
-func (conn *Conn) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
- return conn.chooseBackend(options.UUID).SpecimenUpdate(ctx, options)
-}
-
-func (conn *Conn) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
- return conn.chooseBackend(options.UUID).SpecimenGet(ctx, options)
-}
-
-func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
- return conn.chooseBackend(options.UUID).SpecimenDelete(ctx, options)
-}
-
func (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
return conn.local.SysTrashSweep(ctx, options)
}
diff --git a/lib/controller/federation/generate.go b/lib/controller/federation/generate.go
index 2dc2918f79..079d908f0d 100644
--- a/lib/controller/federation/generate.go
+++ b/lib/controller/federation/generate.go
@@ -53,7 +53,7 @@ func main() {
defer out.Close()
out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
- for _, t := range []string{"AuthorizedKey", "Container", "ContainerRequest", "Group", "Specimen", "User", "Link", "Log", "APIClientAuthorization"} {
+ for _, t := range []string{"AuthorizedKey", "Container", "ContainerRequest", "Group", "User", "Link", "Log", "APIClientAuthorization"} {
_, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
if err != nil {
panic(err)
diff --git a/lib/controller/federation/generated.go b/lib/controller/federation/generated.go
index 8c8666fea1..95f2f650fc 100755
--- a/lib/controller/federation/generated.go
+++ b/lib/controller/federation/generated.go
@@ -181,47 +181,6 @@ func (conn *Conn) generated_GroupList(ctx context.Context, options arvados.ListO
return merged, err
}
-func (conn *Conn) generated_SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
- var mtx sync.Mutex
- var merged arvados.SpecimenList
- var needSort atomic.Value
- needSort.Store(false)
- err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
- options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
- cl, err := backend.SpecimenList(ctx, options)
- if err != nil {
- return nil, err
- }
- mtx.Lock()
- defer mtx.Unlock()
- if len(merged.Items) == 0 {
- merged = cl
- } else if len(cl.Items) > 0 {
- merged.Items = append(merged.Items, cl.Items...)
- needSort.Store(true)
- }
- uuids := make([]string, 0, len(cl.Items))
- for _, item := range cl.Items {
- uuids = append(uuids, item.UUID)
- }
- return uuids, nil
- })
- if needSort.Load().(bool) {
- // Apply the default/implied order, "modified_at desc"
- sort.Slice(merged.Items, func(i, j int) bool {
- mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
- return mj.Before(mi)
- })
- }
- if merged.Items == nil {
- // Return empty results as [], not null
- // (https://github.com/golang/go/issues/27589 might be
- // a better solution in the future)
- merged.Items = []arvados.Specimen{}
- }
- return merged, err
-}
-
func (conn *Conn) generated_UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
var mtx sync.Mutex
var merged arvados.UserList
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
index 7c4bb0912f..6ccd2ac15a 100644
--- a/lib/controller/handler.go
+++ b/lib/controller/handler.go
@@ -25,6 +25,7 @@ import (
"git.arvados.org/arvados.git/lib/controller/router"
"git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
"git.arvados.org/arvados.git/sdk/go/httpserver"
@@ -40,11 +41,11 @@ type Handler struct {
setupOnce sync.Once
federation *federation.Conn
handlerStack http.Handler
+ router http.Handler
proxy *proxy
secureClient *http.Client
insecureClient *http.Client
dbConnector ctrlctx.DBConnector
- limitLogCreate chan struct{}
cache map[string]*cacheEnt
}
@@ -56,15 +57,24 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// the incoming request has a double slash. Some
// clients (including the Go standard library) change
// the request method to GET when following a 301
- // redirect if the original method was not HEAD
- // (RFC7231 6.4.2 specifically allows this in the case
- // of POST). Thus "POST //foo" gets misdirected to
- // "GET /foo". To avoid this, eliminate double slashes
+ // redirect if the original method was not HEAD (RFC
+ // 7231 6.4.2 specifically allows this in the case of
+ // POST). Thus "POST //foo" gets misdirected to "GET
+ // /foo". To avoid this, eliminate double slashes
// before passing the request to ServeMux.
for strings.Contains(req.URL.Path, "//") {
req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
}
}
+ if len(req.Host) > 28 && arvadosclient.UUIDMatch(req.Host[:27]) && req.Host[27] == '-' {
+ // Requests to a vhost like
+ // "{ctr-uuid}-{port}.example.com" go straight to
+ // controller-specific routing, bypassing
+ // handlerStack's logic about proxying
+ // non-controller-specific paths through to RailsAPI.
+ h.router.ServeHTTP(w, req)
+ return
+ }
h.handlerStack.ServeHTTP(w, req)
}
@@ -109,8 +119,9 @@ func (h *Handler) setup() {
}()
oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.dbConnector.GetDB)
h.federation = federation.New(h.BackgroundContext, h.Cluster, &healthFuncs, h.dbConnector.GetDB)
- rtr := router.New(h.federation, router.Config{
- MaxRequestSize: h.Cluster.API.MaxRequestSize,
+ h.router = router.New(h.federation, router.Config{
+ ContainerWebServices: &h.Cluster.Services.ContainerWebServices,
+ MaxRequestSize: h.Cluster.API.MaxRequestSize,
WrapCalls: api.ComposeWrappers(
ctrlctx.WrapCallsInTransactions(h.dbConnector.GetDB),
oidcAuthorizer.WrapCalls,
@@ -126,31 +137,30 @@ func (h *Handler) setup() {
Prefix: "/_health/",
Routes: healthRoutes,
})
- mux.Handle("/arvados/v1/config", rtr)
- mux.Handle("/arvados/v1/vocabulary", rtr)
- mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr) // must come before .../users/
- mux.Handle("/arvados/v1/collections", rtr)
- mux.Handle("/arvados/v1/collections/", rtr)
- mux.Handle("/arvados/v1/users", rtr)
- mux.Handle("/arvados/v1/users/", rtr)
- mux.Handle("/arvados/v1/connect/", rtr)
- mux.Handle("/arvados/v1/container_requests", rtr)
- mux.Handle("/arvados/v1/container_requests/", rtr)
- mux.Handle("/arvados/v1/groups", rtr)
- mux.Handle("/arvados/v1/groups/", rtr)
- mux.Handle("/arvados/v1/links", rtr)
- mux.Handle("/arvados/v1/links/", rtr)
- mux.Handle("/arvados/v1/authorized_keys", rtr)
- mux.Handle("/arvados/v1/authorized_keys/", rtr)
- mux.Handle("/login", rtr)
- mux.Handle("/logout", rtr)
- mux.Handle("/arvados/v1/api_client_authorizations", rtr)
- mux.Handle("/arvados/v1/api_client_authorizations/", rtr)
+ mux.Handle("/arvados/v1/config", h.router)
+ mux.Handle("/arvados/v1/vocabulary", h.router)
+ mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, h.router) // must come before .../users/
+ mux.Handle("/arvados/v1/collections", h.router)
+ mux.Handle("/arvados/v1/collections/", h.router)
+ mux.Handle("/arvados/v1/users", h.router)
+ mux.Handle("/arvados/v1/users/", h.router)
+ mux.Handle("/arvados/v1/connect/", h.router)
+ mux.Handle("/arvados/v1/container_requests", h.router)
+ mux.Handle("/arvados/v1/container_requests/", h.router)
+ mux.Handle("/arvados/v1/groups", h.router)
+ mux.Handle("/arvados/v1/groups/", h.router)
+ mux.Handle("/arvados/v1/links", h.router)
+ mux.Handle("/arvados/v1/links/", h.router)
+ mux.Handle("/arvados/v1/authorized_keys", h.router)
+ mux.Handle("/arvados/v1/authorized_keys/", h.router)
+ mux.Handle("/login", h.router)
+ mux.Handle("/logout", h.router)
+ mux.Handle("/arvados/v1/api_client_authorizations", h.router)
+ mux.Handle("/arvados/v1/api_client_authorizations/", h.router)
hs := http.NotFoundHandler()
hs = prepend(hs, h.proxyRailsAPI)
- hs = prepend(hs, h.routeContainerEndpoints(rtr))
- hs = prepend(hs, h.limitLogCreateRequests)
+ hs = prepend(hs, h.routeContainerEndpoints(h.router))
hs = h.setupProxyRemoteCluster(hs)
hs = prepend(hs, oidcAuthorizer.Middleware)
mux.Handle("/", hs)
@@ -164,12 +174,6 @@ func (h *Handler) setup() {
ic.CheckRedirect = neverRedirect
h.insecureClient = &ic
- logCreateLimit := int(float64(h.Cluster.API.MaxConcurrentRequests) * h.Cluster.API.LogCreateRequestFraction)
- if logCreateLimit == 0 && h.Cluster.API.LogCreateRequestFraction > 0 {
- logCreateLimit = 1
- }
- h.limitLogCreate = make(chan struct{}, logCreateLimit)
-
h.proxy = &proxy{
Name: "arvados-controller",
}
@@ -231,20 +235,6 @@ func (h *Handler) routeContainerEndpoints(rtr http.Handler) middlewareFunc {
}
}
-func (h *Handler) limitLogCreateRequests(w http.ResponseWriter, req *http.Request, next http.Handler) {
- if cap(h.limitLogCreate) > 0 && req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, "/arvados/v1/logs") {
- select {
- case h.limitLogCreate <- struct{}{}:
- defer func() { <-h.limitLogCreate }()
- next.ServeHTTP(w, req)
- default:
- http.Error(w, "Excess log messages", http.StatusServiceUnavailable)
- }
- return
- }
- next.ServeHTTP(w, req)
-}
-
// cacheEnt implements a basic stale-while-revalidate cache, suitable
// for the Arvados discovery document.
type cacheEnt struct {
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
index eef0443b9a..189505def6 100644
--- a/lib/controller/handler_test.go
+++ b/lib/controller/handler_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"time"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
@@ -65,6 +66,25 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
func (s *HandlerSuite) TearDownTest(c *check.C) {
s.cancel()
+
+ // Wait for dblocks to be released. Otherwise, a subsequent
+ // test might time out waiting to acquire them.
+ timeout := time.After(10 * time.Second)
+ for _, locker := range []*dblock.DBLocker{dblock.TrashSweep, dblock.ContainerLogSweep} {
+ ok := make(chan struct{})
+ go func() {
+ if locker.Lock(context.Background(), s.handler.dbConnector.GetDB) {
+ locker.Unlock()
+ }
+ close(ok)
+ }()
+ select {
+ case <-timeout:
+ c.Log("timed out waiting for dblocks")
+ c.Fail()
+ case <-ok:
+ }
+ }
}
func (s *HandlerSuite) TestConfigExport(c *check.C) {
@@ -204,8 +224,12 @@ func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
// depending on flags.
var wantError, wantBadContent bool
s.railsSpy.Director = func(req *http.Request) {
+ <-holdReqs
if wantError {
- req.Method = "MAKE-COFFEE"
+ // The Passenger server hosting RailsAPI will drop HTTP requests
+ // unrecognized names. Make a request with a real method that
+ // RailsAPI doesn't implement.
+ req.Method = "TRACE"
} else if wantBadContent {
req.URL.Path = "/_health/ping"
req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
@@ -244,6 +268,22 @@ func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
getDDConcurrently(5, http.StatusOK, check.Commentf("error with warm cache")).Wait()
c.Check(countRailsReqs(), check.Equals, reqsBefore)
+ checkBackgroundRefresh := func(reqsExpected int) {
+ // There is no guarantee that a background refresh has
+ // progressed far enough that we can detect it
+ // directly (the first line of refresh() might not
+ // have run). So, to avoid false positives, we just
+ // need to poll until it happens.
+ for deadline := time.Now().Add(time.Second); countRailsReqs() == reqsBefore && time.Now().Before(deadline); {
+ c.Logf("countRailsReqs = %d", countRailsReqs())
+ time.Sleep(time.Second / 100)
+ }
+ // Similarly, to ensure there are no additional
+ // refreshes, we just need to wait.
+ time.Sleep(time.Second / 2)
+ c.Check(countRailsReqs(), check.Equals, reqsExpected)
+ }
+
// Error with stale cache => caller gets OK with stale data
// while the re-fetch is attempted in the background
refreshNow()
@@ -252,10 +292,10 @@ func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
holdReqs = make(chan struct{})
getDDConcurrently(5, http.StatusOK, check.Commentf("error with stale cache")).Wait()
close(holdReqs)
- // Only one attempt to re-fetch (holdReqs ensured the first
- // update took long enough for the last incoming request to
- // arrive)
- c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+ // After piling up 5 requests (holdReqs having ensured the
+ // first update took long enough for the last incoming request
+ // to arrive) there should be only one attempt to re-fetch.
+ checkBackgroundRefresh(reqsBefore + 1)
refreshNow()
wantError, wantBadContent = false, false
@@ -263,8 +303,7 @@ func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
holdReqs = make(chan struct{})
getDDConcurrently(5, http.StatusOK, check.Commentf("refresh cache after error condition clears")).Wait()
close(holdReqs)
- waitPendingUpdates()
- c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+ checkBackgroundRefresh(reqsBefore + 1)
// Make sure expireAfter is getting set
waitPendingUpdates()
@@ -568,8 +607,10 @@ func (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, ski
req.Header.Set("Authorization", "Bearer "+token)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Assert(resp.Code, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
+ if !c.Check(resp.Code, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String())) {
+ return
+ }
err = json.Unmarshal(resp.Body.Bytes(), &proxied)
c.Check(err, check.Equals, nil)
@@ -581,9 +622,11 @@ func (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, ski
}
resp2, err := client.Get(s.cluster.Services.RailsAPI.ExternalURL.String() + url + "/?api_token=" + token)
c.Check(err, check.Equals, nil)
- c.Assert(resp2.StatusCode, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the RailsAPI at %q", url))
defer resp2.Body.Close()
+ if !c.Check(resp2.StatusCode, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the RailsAPI at %q", url)) {
+ return
+ }
db, err := ioutil.ReadAll(resp2.Body)
c.Check(err, check.Equals, nil)
err = json.Unmarshal(db, &direct)
@@ -603,6 +646,12 @@ func (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, ski
check.Commentf("RailsAPI %s key %q's value %q differs from controller's %q.", direct["kind"], k, direct[k], val))
}
}
+
+ // The "href" field has been removed. We don't particularly
+ // care whether Rails returns it, as long as controller
+ // doesn't.
+ _, hasHref := proxied["href"]
+ c.Check(hasHref, check.Equals, false)
}
func (s *HandlerSuite) TestGetObjects(c *check.C) {
@@ -623,8 +672,7 @@ func (s *HandlerSuite) TestGetObjects(c *check.C) {
"api_client_authorization": {
"owner_uuid": "`+arvadostest.AdminUserUUID+`",
"created_by_ip_address": "::1",
- "last_used_by_ip_address": "::1",
- "default_owner_uuid": "`+arvadostest.AdminUserUUID+`"
+ "last_used_by_ip_address": "::1"
}
}`))
req.Header.Set("Authorization", "Bearer "+arvadostest.SystemRootToken)
@@ -638,19 +686,16 @@ func (s *HandlerSuite) TestGetObjects(c *check.C) {
c.Assert(auth.UUID, check.Not(check.Equals), "")
testCases := map[string]map[string]bool{
- "api_clients/" + arvadostest.TrustedWorkbenchAPIClientUUID: nil,
- "api_client_authorizations/" + auth.UUID: {"href": true, "modified_by_client_uuid": true, "modified_by_user_uuid": true},
- "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: {"href": true},
- "collections/" + arvadostest.CollectionWithUniqueWordsUUID: {"href": true},
+ "api_client_authorizations/" + auth.UUID: {"modified_by_client_uuid": true, "modified_by_user_uuid": true},
+ "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: nil,
+ "collections/" + arvadostest.CollectionWithUniqueWordsUUID: nil,
"containers/" + arvadostest.RunningContainerUUID: nil,
"container_requests/" + arvadostest.QueuedContainerRequestUUID: nil,
"groups/" + arvadostest.AProjectUUID: nil,
"keep_services/" + ksUUID: nil,
"links/" + arvadostest.ActiveUserCanReadAllUsersLinkUUID: nil,
- "logs/" + arvadostest.CrunchstatForRunningJobLogUUID: nil,
- "nodes/" + arvadostest.IdleNodeUUID: nil,
- "repositories/" + arvadostest.ArvadosRepoUUID: nil,
- "users/" + arvadostest.ActiveUserUUID: {"href": true},
+ "logs/" + arvadostest.CrunchstatForRunningContainerLogUUID: nil,
+ "users/" + arvadostest.ActiveUserUUID: nil,
"virtual_machines/" + arvadostest.TestVMUUID: nil,
"workflows/" + arvadostest.WorkflowWithDefinitionYAMLUUID: nil,
}
@@ -705,14 +750,14 @@ func (s *HandlerSuite) TestTrashSweep(c *check.C) {
func (s *HandlerSuite) TestContainerLogSweep(c *check.C) {
s.cluster.SystemRootToken = arvadostest.SystemRootToken
- s.cluster.Containers.Logging.SweepInterval = arvados.Duration(time.Second / 10)
+ s.cluster.Collections.TrashSweepInterval = arvados.Duration(2 * time.Second)
s.handler.CheckHealth()
ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
logentry, err := s.handler.federation.LogCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
"object_uuid": arvadostest.CompletedContainerUUID,
"event_type": "stderr",
"properties": map[string]interface{}{
- "text": "test trash sweep\n",
+ "text": "test container log sweep\n",
},
}})
c.Assert(err, check.IsNil)
@@ -766,59 +811,3 @@ func (s *HandlerSuite) TestLogActivity(c *check.C) {
c.Check(rows, check.Equals, 1, check.Commentf("expect 1 row for user uuid %s", userUUID))
}
}
-
-func (s *HandlerSuite) TestLogLimiting(c *check.C) {
- s.handler.Cluster.API.MaxConcurrentRequests = 2
- s.handler.Cluster.API.LogCreateRequestFraction = 0.5
-
- logreq := httptest.NewRequest("POST", "/arvados/v1/logs", strings.NewReader(`{
- "log": {
- "event_type": "test"
- }
- }`))
- logreq.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
-
- // Log create succeeds
- for i := 0; i < 2; i++ {
- resp := httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- var lg arvados.Log
- err := json.Unmarshal(resp.Body.Bytes(), &lg)
- c.Check(err, check.IsNil)
- c.Check(lg.UUID, check.Matches, "zzzzz-57u5n-.*")
- }
-
- // Pretend there's a log create in flight
- s.handler.limitLogCreate <- struct{}{}
-
- // Log create should be rejected now
- resp := httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-
- // Other requests still succeed
- req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
- req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- var u arvados.User
- err := json.Unmarshal(resp.Body.Bytes(), &u)
- c.Check(err, check.IsNil)
- c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
-
- // log create still fails
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-
- // Pretend in-flight log is done
- <-s.handler.limitLogCreate
-
- // log create succeeds again
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusOK)
-
-}
diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
index 45f35a6d2e..28c3e409f7 100644
--- a/lib/controller/integration_test.go
+++ b/lib/controller/integration_test.go
@@ -299,6 +299,38 @@ func (s *IntegrationSuite) TestRemoteUserAndTokenCacheRace(c *check.C) {
wg2.Wait()
}
+// After using a token issued by z1111 to call the Logout endpoint on
+// z2222, the token should be expired and rejected by both z1111 and
+// z2222.
+func (s *IntegrationSuite) TestLogoutUsingLoginCluster(c *check.C) {
+ conn1 := s.super.Conn("z1111")
+ conn2 := s.super.Conn("z2222")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user1@example.com", true)
+ userctx2, ac2, _ := s.super.ClientsWithToken("z2222", ac1.AuthToken)
+ c.Assert(ac2.AuthToken, check.Matches, `^v2/z1111-.*`)
+ _, err := conn1.CollectionCreate(userctx2, arvados.CreateOptions{})
+ c.Assert(err, check.IsNil)
+ _, err = conn2.CollectionCreate(userctx2, arvados.CreateOptions{})
+ c.Assert(err, check.IsNil)
+
+ _, err = conn2.Logout(userctx2, arvados.LogoutOptions{})
+ c.Assert(err, check.IsNil)
+
+ _, err = conn1.CollectionCreate(userctx2, arvados.CreateOptions{})
+ se, ok := err.(httpserver.HTTPStatusError)
+ if c.Check(ok, check.Equals, true, check.Commentf("after logging out, token should have been rejected by login cluster")) {
+ c.Check(se.HTTPStatus(), check.Equals, 401)
+ }
+
+ _, err = conn2.CollectionCreate(userctx2, arvados.CreateOptions{})
+ se, ok = err.(httpserver.HTTPStatusError)
+ if c.Check(ok, check.Equals, true, check.Commentf("after logging out, token should have been rejected by remote cluster")) {
+ c.Check(se.HTTPStatus(), check.Equals, 401)
+ }
+
+}
+
func (s *IntegrationSuite) TestS3WithFederatedToken(c *check.C) {
if _, err := exec.LookPath("s3cmd"); err != nil {
c.Skip("s3cmd not in PATH")
@@ -544,12 +576,12 @@ func (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {
c.Check(err, check.IsNil)
c.Check(cr.UUID, check.Matches, "z2222-.*")
- c.Log("...post with good cached token ('OAuth2 ...')")
+ c.Log("...post with good cached token ('Bearer ...')")
cr = arvados.ContainerRequest{}
req, err = http.NewRequest("POST", "https://"+ac2.APIHost+"/arvados/v1/container_requests", bytes.NewReader(body.Bytes()))
c.Assert(err, check.IsNil)
req.Header.Set("Content-Type", "application/json")
- req.Header.Set("Authorization", "OAuth2 "+ac2.AuthToken)
+ req.Header.Set("Authorization", "Bearer "+ac2.AuthToken)
resp, err = arvados.InsecureHTTPClient.Do(req)
c.Assert(err, check.IsNil)
defer resp.Body.Close()
@@ -604,8 +636,6 @@ func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
coll, err := conn1.CollectionCreate(userctx1, arvados.CreateOptions{})
c.Check(err, check.IsNil)
- specimen, err := conn1.SpecimenCreate(userctx1, arvados.CreateOptions{})
- c.Check(err, check.IsNil)
tests := []struct {
path string
@@ -618,8 +648,6 @@ func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
{"/arvados/v1/nonexistant", true, true},
{"/arvados/v1/collections/" + coll.UUID, false, false},
{"/arvados/v1/collections/" + coll.UUID, true, false},
- {"/arvados/v1/specimens/" + specimen.UUID, false, false},
- {"/arvados/v1/specimens/" + specimen.UUID, true, false},
// new code path (lib/controller/router etc) - single-cluster request
{"/arvados/v1/collections/z1111-4zz18-0123456789abcde", false, true},
{"/arvados/v1/collections/z1111-4zz18-0123456789abcde", true, true},
@@ -627,8 +655,8 @@ func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
{"/arvados/v1/collections/z2222-4zz18-0123456789abcde", false, true},
{"/arvados/v1/collections/z2222-4zz18-0123456789abcde", true, true},
// old code path (proxyRailsAPI) - single-cluster request
- {"/arvados/v1/specimens/z1111-j58dm-0123456789abcde", false, true},
- {"/arvados/v1/specimens/z1111-j58dm-0123456789abcde", true, true},
+ {"/arvados/v1/containers/z1111-dz642-0123456789abcde", false, true},
+ {"/arvados/v1/containers/z1111-dz642-0123456789abcde", true, true},
// old code path (setupProxyRemoteCluster) - federated request
{"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde", false, true},
{"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde", true, true},
@@ -802,7 +830,6 @@ func (s *IntegrationSuite) TestFederatedApiClientAuthHandling(c *check.C) {
},
)
c.Assert(err, check.IsNil)
- c.Assert(resp.APIClientID, check.Not(check.Equals), 0)
newTok := resp.TokenV2()
c.Assert(newTok, check.Not(check.Equals), "")
@@ -830,7 +857,6 @@ func (s *IntegrationSuite) TestFederatedApiClientAuthHandling(c *check.C) {
// Test for bug #18076
func (s *IntegrationSuite) TestStaleCachedUserRecord(c *check.C) {
rootctx1, _, _ := s.super.RootClients("z1111")
- _, rootclnt3, _ := s.super.RootClients("z3333")
conn1 := s.super.Conn("z1111")
conn3 := s.super.Conn("z3333")
@@ -842,92 +868,69 @@ func (s *IntegrationSuite) TestStaleCachedUserRecord(c *check.C) {
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
- for testCaseNr, testCase := range []struct {
- name string
- withRepository bool
- }{
- {"User without local repository", false},
- {"User with local repository", true},
- } {
- c.Log(c.TestName() + " " + testCase.name)
- // Create some users, request them on the federated cluster so they're cached.
- var users []arvados.User
- for userNr := 0; userNr < 2; userNr++ {
- _, _, _, user := s.super.UserClients("z1111",
- rootctx1,
- c,
- conn1,
- fmt.Sprintf("user%d%d@example.com", testCaseNr, userNr),
- true)
- c.Assert(user.Username, check.Not(check.Equals), "")
- users = append(users, user)
-
- lst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
- c.Assert(err, check.Equals, nil)
- userFound := false
- for _, fedUser := range lst.Items {
- if fedUser.UUID == user.UUID {
- c.Assert(fedUser.Username, check.Equals, user.Username)
- userFound = true
- break
- }
- }
- c.Assert(userFound, check.Equals, true)
-
- if testCase.withRepository {
- var repo interface{}
- err = rootclnt3.RequestAndDecode(
- &repo, "POST", "arvados/v1/repositories", nil,
- map[string]interface{}{
- "repository": map[string]string{
- "name": fmt.Sprintf("%s/test", user.Username),
- "owner_uuid": user.UUID,
- },
- },
- )
- c.Assert(err, check.IsNil)
- }
- }
-
- // Swap the usernames
- _, err := conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
- UUID: users[0].UUID,
- Attrs: map[string]interface{}{
- "username": "",
- },
- })
- c.Assert(err, check.Equals, nil)
- _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
- UUID: users[1].UUID,
- Attrs: map[string]interface{}{
- "username": users[0].Username,
- },
- })
- c.Assert(err, check.Equals, nil)
- _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
- UUID: users[0].UUID,
- Attrs: map[string]interface{}{
- "username": users[1].Username,
- },
- })
- c.Assert(err, check.Equals, nil)
+ // Create some users, request them on the federated cluster so they're cached.
+ var users []arvados.User
+ for userNr := 0; userNr < 2; userNr++ {
+ _, _, _, user := s.super.UserClients("z1111",
+ rootctx1,
+ c,
+ conn1,
+ fmt.Sprintf("user0%d@example.com", userNr),
+ true)
+ c.Assert(user.Username, check.Not(check.Equals), "")
+ users = append(users, user)
- // Re-request the list on the federated cluster & check for updates
lst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
c.Assert(err, check.Equals, nil)
- var user0Found, user1Found bool
- for _, user := range lst.Items {
- if user.UUID == users[0].UUID {
- user0Found = true
- c.Assert(user.Username, check.Equals, users[1].Username)
- } else if user.UUID == users[1].UUID {
- user1Found = true
- c.Assert(user.Username, check.Equals, users[0].Username)
+ userFound := false
+ for _, fedUser := range lst.Items {
+ if fedUser.UUID == user.UUID {
+ c.Assert(fedUser.Username, check.Equals, user.Username)
+ userFound = true
+ break
}
}
- c.Assert(user0Found, check.Equals, true)
- c.Assert(user1Found, check.Equals, true)
+ c.Assert(userFound, check.Equals, true)
}
+
+ // Swap the usernames
+ _, err := conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[0].UUID,
+ Attrs: map[string]interface{}{
+ "username": "",
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+ _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[1].UUID,
+ Attrs: map[string]interface{}{
+ "username": users[0].Username,
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+ _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[0].UUID,
+ Attrs: map[string]interface{}{
+ "username": users[1].Username,
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+
+ // Re-request the list on the federated cluster & check for updates
+ lst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ c.Assert(err, check.Equals, nil)
+ var user0Found, user1Found bool
+ for _, user := range lst.Items {
+ if user.UUID == users[0].UUID {
+ user0Found = true
+ c.Assert(user.Username, check.Equals, users[1].Username)
+ } else if user.UUID == users[1].UUID {
+ user1Found = true
+ c.Assert(user.Username, check.Equals, users[0].Username)
+ }
+ }
+ c.Assert(user0Found, check.Equals, true)
+ c.Assert(user1Found, check.Equals, true)
}
// Test for bug #16263
@@ -1338,11 +1341,13 @@ func (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token stri
c.Check(ctr.ExitCode, check.Equals, expectExitCode)
err = ac.RequestAndDecode(&outcoll, "GET", "/arvados/v1/collections/"+cr.OutputUUID, nil, nil)
c.Assert(err, check.IsNil)
- c.Check(allStatus, check.Matches, `Queued, waiting for dispatch\n`+
- `(Queued, waiting.*\n)*`+
- `(Locked, waiting for dispatch\n)?`+
- `(Locked, waiting for new instance to be ready\n)?`+
- `(Locked, preparing runtime environment\n)?`+
+ c.Check(allStatus, check.Matches, `(Queued, Waiting in queue\.\n)?`+
+ // Occasionally the dispatcher will
+ // unlock/retry, and we get state/status from
+ // database/dispatcher via separate API calls,
+ // so we can also see "Queued, preparing
+ // runtime environment".
+ `((Queued|Locked), (Waiting .*|Container is allocated to an instance and preparing to run\.)\n)*`+
`(Running, \n)?`+
`Complete, \n`)
}
@@ -1350,3 +1355,96 @@ func (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token stri
checkwebdavlogs(cr)
return outcoll, logcfs
}
+
+func (s *IntegrationSuite) TestCUDAContainerReuse(c *check.C) {
+ // Check that the legacy "CUDA" API still works.
+
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+
+ crInput := map[string]interface{}{
+ "command": []string{"echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"},
+ "cwd": "test",
+ "environment": map[string]interface{}{},
+ "output_path": "test",
+ "output_glob": []string{},
+ "container_image": "fa3c1a9cb6783f85f2ecda037e07b8c3+167",
+ "mounts": map[string]interface{}{},
+ "runtime_constraints": map[string]interface{}{
+ "cuda": map[string]interface{}{
+ "device_count": 1,
+ "driver_version": "11.0",
+ "hardware_capability": "9.0",
+ },
+ "ram": 12000000000,
+ "vcpus": 4,
+ },
+ "state": "Committed",
+ }
+
+ var outCR arvados.ContainerRequest
+ err := ac1.RequestAndDecode(&outCR, "POST", "/arvados/v1/container_requests", nil,
+ map[string]interface{}{"container_request": crInput})
+ c.Check(err, check.IsNil)
+
+ c.Check(outCR.RuntimeConstraints.GPU.Stack, check.Equals, "cuda")
+ c.Check(outCR.RuntimeConstraints.GPU.DriverVersion, check.Equals, "11.0")
+ c.Check(outCR.RuntimeConstraints.GPU.HardwareTarget, check.DeepEquals, []string{"9.0"})
+ c.Check(outCR.RuntimeConstraints.GPU.DeviceCount, check.Equals, 1)
+ c.Check(outCR.RuntimeConstraints.GPU.VRAM, check.Equals, int64(0))
+
+ var outCR2 arvados.ContainerRequest
+ err = ac1.RequestAndDecode(&outCR2, "POST", "/arvados/v1/container_requests", nil,
+ map[string]interface{}{"container_request": crInput})
+ c.Check(err, check.IsNil)
+
+ c.Check(outCR.ContainerUUID, check.Equals, outCR2.ContainerUUID)
+}
+
+func (s *IntegrationSuite) TestGPUContainerReuse(c *check.C) {
+ // Test container reuse using the "GPU" API
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+
+ crInput := map[string]interface{}{
+ "command": []string{"echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"},
+ "cwd": "test",
+ "environment": map[string]interface{}{},
+ "output_path": "test",
+ "output_glob": []string{},
+ "container_image": "fa3c1a9cb6783f85f2ecda037e07b8c3+167",
+ "mounts": map[string]interface{}{},
+ "runtime_constraints": map[string]interface{}{
+ "gpu": map[string]interface{}{
+ "stack": "cuda",
+ "device_count": 1,
+ "driver_version": "11.0",
+ "hardware_target": []string{"9.0"},
+ "vram": 8000000000,
+ },
+ "ram": 12000000000,
+ "vcpus": 4,
+ },
+ "state": "Committed",
+ }
+
+ var outCR arvados.ContainerRequest
+ err := ac1.RequestAndDecode(&outCR, "POST", "/arvados/v1/container_requests", nil,
+ map[string]interface{}{"container_request": crInput})
+ c.Check(err, check.IsNil)
+
+ c.Check(outCR.RuntimeConstraints.GPU.Stack, check.Equals, "cuda")
+ c.Check(outCR.RuntimeConstraints.GPU.DriverVersion, check.Equals, "11.0")
+ c.Check(outCR.RuntimeConstraints.GPU.HardwareTarget, check.DeepEquals, []string{"9.0"})
+ c.Check(outCR.RuntimeConstraints.GPU.DeviceCount, check.Equals, 1)
+ c.Check(outCR.RuntimeConstraints.GPU.VRAM, check.Equals, int64(8000000000))
+
+ var outCR2 arvados.ContainerRequest
+ err = ac1.RequestAndDecode(&outCR2, "POST", "/arvados/v1/container_requests", nil,
+ map[string]interface{}{"container_request": crInput})
+ c.Check(err, check.IsNil)
+
+ c.Check(outCR.ContainerUUID, check.Equals, outCR2.ContainerUUID)
+}
diff --git a/lib/controller/localdb/collection.go b/lib/controller/localdb/collection.go
index 581595e5e3..a24f561372 100644
--- a/lib/controller/localdb/collection.go
+++ b/lib/controller/localdb/collection.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
@@ -74,6 +75,9 @@ func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptio
if opts.Attrs, err = conn.applyReplaceFilesOption(ctx, "", opts.Attrs, opts.ReplaceFiles); err != nil {
return arvados.Collection{}, err
}
+ if opts.Attrs, err = conn.applyReplaceSegmentsOption(ctx, "", opts.Attrs, opts.ReplaceSegments); err != nil {
+ return arvados.Collection{}, err
+ }
resp, err := conn.railsProxy.CollectionCreate(ctx, opts)
if err != nil {
return resp, err
@@ -96,9 +100,16 @@ func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptio
// them.
opts.Select = append([]string{"is_trashed", "trash_at"}, opts.Select...)
}
+ err = conn.lockUUID(ctx, opts.UUID)
+ if err != nil {
+ return arvados.Collection{}, err
+ }
if opts.Attrs, err = conn.applyReplaceFilesOption(ctx, opts.UUID, opts.Attrs, opts.ReplaceFiles); err != nil {
return arvados.Collection{}, err
}
+ if opts.Attrs, err = conn.applyReplaceSegmentsOption(ctx, opts.UUID, opts.Attrs, opts.ReplaceSegments); err != nil {
+ return arvados.Collection{}, err
+ }
resp, err := conn.railsProxy.CollectionUpdate(ctx, opts)
if err != nil {
return resp, err
@@ -126,6 +137,18 @@ func (conn *Conn) signCollection(ctx context.Context, coll *arvados.Collection)
coll.ManifestText = arvados.SignManifest(coll.ManifestText, token, exp, ttl, []byte(conn.cluster.Collections.BlobSigningKey))
}
+func (conn *Conn) lockUUID(ctx context.Context, uuid string) error {
+ tx, err := ctrlctx.CurrentTx(ctx)
+ if err != nil {
+ return err
+ }
+ _, err = tx.ExecContext(ctx, `insert into uuid_locks (uuid) values ($1) on conflict (uuid) do update set n=uuid_locks.n+1`, uuid)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
// If replaceFiles is non-empty, populate attrs["manifest_text"] by
// starting with the content of fromUUID (or an empty collection if
// fromUUID is empty) and applying the specified file/directory
@@ -135,8 +158,20 @@ func (conn *Conn) signCollection(ctx context.Context, coll *arvados.Collection)
func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string, attrs map[string]interface{}, replaceFiles map[string]string) (map[string]interface{}, error) {
if len(replaceFiles) == 0 {
return attrs, nil
- } else if mtxt, ok := attrs["manifest_text"].(string); ok && len(mtxt) > 0 {
- return nil, httpserver.Errorf(http.StatusBadRequest, "ambiguous request: both 'replace_files' and attrs['manifest_text'] values provided")
+ }
+
+ providedManifestText, _ := attrs["manifest_text"].(string)
+ if providedManifestText != "" {
+ used := false
+ for _, src := range replaceFiles {
+ if strings.HasPrefix(src, "manifest_text/") {
+ used = true
+ break
+ }
+ }
+ if !used {
+ return nil, httpserver.Errorf(http.StatusBadRequest, "invalid request: attrs['manifest_text'] was provided, but would not be used because it is not referenced by any 'replace_files' entry")
+ }
}
// Load the current collection (if any) and set up an
@@ -199,6 +234,23 @@ func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string,
}
}
+ current := make(map[string]*arvados.Subtree)
+ // Check whether any sources are "current/...", and if so,
+ // populate current with the relevant snapshot. Doing this
+ // ahead of time, before making any modifications to dstfs
+ // below, ensures that even instructions like {/a: current/b,
+ // b: current/a} will be handled correctly.
+ for _, src := range replaceFiles {
+ if strings.HasPrefix(src, "current/") && current[src] == nil {
+ current[src], err = arvados.Snapshot(dstfs, src[8:])
+ if os.IsNotExist(err) {
+ return nil, httpserver.Errorf(http.StatusBadRequest, "replace_files: nonexistent source %q", src)
+ } else if err != nil {
+ return nil, fmt.Errorf("%s: %w", src, err)
+ }
+ }
+ }
+
var srcidloaded string
var srcfs arvados.FileSystem
// Apply the requested replacements.
@@ -217,15 +269,33 @@ func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string,
}
continue
}
+ var snap *arvados.Subtree
srcspec := strings.SplitN(src, "/", 2)
srcid, srcpath := srcspec[0], "/"
- if !arvadosclient.PDHMatch(srcid) {
- return nil, httpserver.Errorf(http.StatusBadRequest, "invalid source %q for replace_files[%q]: must be \"\" or \"PDH\" or \"PDH/path\"", src, dst)
- }
if len(srcspec) == 2 && srcspec[1] != "" {
srcpath = srcspec[1]
}
- if srcidloaded != srcid {
+ switch {
+ case srcid == "current":
+ snap = current[src]
+ if snap == nil {
+ return nil, fmt.Errorf("internal error: current[%s] == nil", src)
+ }
+ case srcid == "manifest_text":
+ if srcidloaded == srcid {
+ break
+ }
+ srcfs = nil
+ srccoll := &arvados.Collection{ManifestText: providedManifestText}
+ srcfs, err = srccoll.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})
+ if err != nil {
+ return nil, err
+ }
+ srcidloaded = srcid
+ case arvadosclient.PDHMatch(srcid):
+ if srcidloaded == srcid {
+ break
+ }
srcfs = nil
srccoll, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: srcid})
if err != nil {
@@ -239,10 +309,14 @@ func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string,
return nil, err
}
srcidloaded = srcid
+ default:
+ return nil, httpserver.Errorf(http.StatusBadRequest, "invalid source %q for replace_files[%q]: must be \"\" or \"SRC\" or \"SRC/path\" where SRC is \"current\", \"manifest_text\", or a portable data hash", src, dst)
}
- snap, err := arvados.Snapshot(srcfs, srcpath)
- if err != nil {
- return nil, httpserver.Errorf(http.StatusBadRequest, "error getting snapshot of %q from %q: %w", srcpath, srcid, err)
+ if snap == nil {
+ snap, err = arvados.Snapshot(srcfs, srcpath)
+ if err != nil {
+ return nil, httpserver.Errorf(http.StatusBadRequest, "error getting snapshot of %q from %q: %w", srcpath, srcid, err)
+ }
}
// Create intermediate dirs, in case dst is
// "newdir1/newdir2/dst".
@@ -269,3 +343,39 @@ func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string,
attrs["manifest_text"] = mtxt
return attrs, nil
}
+
+func (conn *Conn) applyReplaceSegmentsOption(ctx context.Context, fromUUID string, attrs map[string]interface{}, replaceSegments map[arvados.BlockSegment]arvados.BlockSegment) (map[string]interface{}, error) {
+ if len(replaceSegments) == 0 {
+ return attrs, nil
+ }
+
+ // Load the current collection content (unless it's being
+ // replaced by the provided manifest_text).
+ var dst arvados.Collection
+ if txt, ok := attrs["manifest_text"].(string); ok {
+ dst.ManifestText = txt
+ } else if fromUUID != "" {
+ src, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: fromUUID})
+ if err != nil {
+ return nil, err
+ }
+ dst = src
+ }
+ dstfs, err := dst.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})
+ if err != nil {
+ return nil, err
+ }
+ if changed, err := dstfs.ReplaceSegments(replaceSegments); err != nil {
+ return nil, httpserver.Errorf(http.StatusBadRequest, "replace_segments: %s", err)
+ } else if changed {
+ txt, err := dstfs.MarshalManifest(".")
+ if err != nil {
+ return nil, err
+ }
+ if attrs == nil {
+ attrs = make(map[string]interface{})
+ }
+ attrs["manifest_text"] = txt
+ }
+ return attrs, nil
+}
diff --git a/lib/controller/localdb/collection_test.go b/lib/controller/localdb/collection_test.go
index 7d1a909a6f..fd04019279 100644
--- a/lib/controller/localdb/collection_test.go
+++ b/lib/controller/localdb/collection_test.go
@@ -5,18 +5,24 @@
package localdb
import (
+ "context"
+ "errors"
+ "fmt"
"io/fs"
+ "net/http"
"path/filepath"
"regexp"
- "sort"
"strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
check "gopkg.in/check.v1"
)
@@ -74,34 +80,162 @@ func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C
}
}
-func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
- adminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
- foo, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
+func (s *CollectionSuite) TestSignatures(c *check.C) {
+ resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
+ c.Check(err, check.IsNil)
+ c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
+ s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
+
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{"manifest_text"}})
+ c.Check(err, check.IsNil)
+ c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
+
+ lresp, err := s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}})
+ c.Check(err, check.IsNil)
+ if c.Check(lresp.Items, check.HasLen, 1) {
+ c.Check(lresp.Items[0].UUID, check.Equals, arvadostest.FooCollection)
+ c.Check(lresp.Items[0].ManifestText, check.Equals, "")
+ c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
+ }
+
+ lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"manifest_text"}})
+ c.Check(err, check.IsNil)
+ if c.Check(lresp.Items, check.HasLen, 1) {
+ c.Check(lresp.Items[0].ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
+ c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
+ }
+
+ lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"unsigned_manifest_text"}})
+ c.Check(err, check.IsNil)
+ if c.Check(lresp.Items, check.HasLen, 1) {
+ c.Check(lresp.Items[0].ManifestText, check.Equals, "")
+ c.Check(lresp.Items[0].UnsignedManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3 0:.*`)
+ }
+
+ // early trash date causes lower signature TTL (even if
+ // trash_at and is_trashed fields are unselected)
+ trashed, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
+ Select: []string{"uuid", "manifest_text"},
+ Attrs: map[string]interface{}{
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "trash_at": time.Now().UTC().Add(time.Hour),
+ }})
+ c.Assert(err, check.IsNil)
+ s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour)
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
+ c.Assert(err, check.IsNil)
+ s.checkSignatureExpiry(c, resp.ManifestText, time.Hour)
+
+ // distant future trash date does not cause higher signature TTL
+ trashed, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: trashed.UUID,
+ Attrs: map[string]interface{}{
+ "trash_at": time.Now().UTC().Add(time.Hour * 24 * 365),
+ }})
+ c.Assert(err, check.IsNil)
+ s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour*24*7*2)
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
+ c.Assert(err, check.IsNil)
+ s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
+
+ // Make sure groups/contents doesn't return manifest_text with
+ // collections (if it did, we'd need to sign it).
+ gresp, err := s.localdb.GroupContents(s.userctx, arvados.GroupContentsOptions{
+ Limit: -1,
+ Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}},
+ Select: []string{"uuid", "manifest_text"},
+ })
+ if err != nil {
+ c.Check(err, check.ErrorMatches, `.*Invalid attribute.*manifest_text.*`)
+ } else if c.Check(gresp.Items, check.HasLen, 1) {
+ c.Check(gresp.Items[0].(map[string]interface{})["uuid"], check.Equals, arvadostest.FooCollection)
+ c.Check(gresp.Items[0].(map[string]interface{})["manifest_text"], check.Equals, nil)
+ }
+}
+
+func (s *CollectionSuite) checkSignatureExpiry(c *check.C, manifestText string, expectedTTL time.Duration) {
+ m := regexp.MustCompile(`@([[:xdigit:]]+)`).FindStringSubmatch(manifestText)
+ c.Assert(m, check.HasLen, 2)
+ sigexp, err := strconv.ParseInt(m[1], 16, 64)
+ c.Assert(err, check.IsNil)
+ expectedExp := time.Now().Add(expectedTTL).Unix()
+ c.Check(sigexp > expectedExp-60, check.Equals, true)
+ c.Check(sigexp <= expectedExp, check.Equals, true)
+}
+
+func (s *CollectionSuite) TestSignaturesDisabled(c *check.C) {
+ s.localdb.cluster.Collections.BlobSigning = false
+ resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
+ c.Check(err, check.IsNil)
+ c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ +]*\+3 0:.*`)
+}
+
+var _ = check.Suite(&replaceFilesSuite{})
+
+type replaceFilesSuite struct {
+ localdbSuite
+ client *arvados.Client
+ ac *arvadosclient.ArvadosClient
+ kc *keepclient.KeepClient
+ foo arvados.Collection // contains /foo.txt
+ tmp arvados.Collection // working collection, initially contains /foo.txt
+}
+
+func (s *replaceFilesSuite) SetUpSuite(c *check.C) {
+ s.localdbSuite.SetUpSuite(c)
+ var err error
+ s.client = arvados.NewClientFromEnv()
+ s.ac, err = arvadosclient.New(s.client)
+ c.Assert(err, check.IsNil)
+ s.kc, err = keepclient.MakeKeepClient(s.ac)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *replaceFilesSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
+ // Unlike most test suites, we need to COMMIT our setup --
+ // otherwise, when our tests start additional
+ // transactions/connections, they won't see our setup.
+ ctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)
+ defer txFinish(new(error))
+ adminctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.AdminToken)
+ var err error
+ s.foo, err = s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(adminctx, &foo)
+ s.tmp, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
+ ReplaceFiles: map[string]string{
+ "/foo.txt": s.foo.PortableDataHash + "/foo.txt",
+ },
+ Attrs: map[string]interface{}{
+ "owner_uuid": arvadostest.ActiveUserUUID,
+ }})
+ c.Assert(err, check.IsNil)
+ s.expectFiles(c, s.tmp, "foo.txt")
+}
+
+func (s *replaceFilesSuite) TestCollectionReplaceFiles(c *check.C) {
+ adminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
foobarbaz, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": "./foo/bar 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(adminctx, &foobarbaz)
wazqux, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": "./waz d85b1213473c2fd7c2045020a6b9c62b+3 0:3:qux.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(adminctx, &wazqux)
// Create using content from existing collections
dst, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
ReplaceFiles: map[string]string{
- "/f": foo.PortableDataHash + "/foo.txt",
+ "/f": s.foo.PortableDataHash + "/foo.txt",
"/b": foobarbaz.PortableDataHash + "/foo/bar",
"/q": wazqux.PortableDataHash + "/",
"/w": wazqux.PortableDataHash + "/waz",
@@ -191,30 +325,203 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
c.Logf("badrepl %#v\n... got err: %s", badrepl, err)
c.Check(err, check.NotNil)
}
+}
+
+func (s *replaceFilesSuite) TestMultipleRename(c *check.C) {
+ adminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
+ tmp, err := s.localdb.CollectionUpdate(adminctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ Attrs: map[string]interface{}{
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1 0:2:file2 0:3:file3\n"}})
+ c.Assert(err, check.IsNil)
+ tmp, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: tmp.UUID,
+ ReplaceFiles: map[string]string{
+ "/file1": "current/file2",
+ "/file2": "current/file3",
+ "/file3": "current/file1",
+ "/dir/file1": "current/file1",
+ }})
+ c.Check(err, check.IsNil)
+ s.expectFileSizes(c, tmp, map[string]int64{
+ "file1": 2,
+ "file2": 3,
+ "file3": 1,
+ "dir/file1": 1,
+ })
+}
- // Check conflicting replace_files and manifest_text
+func (s *replaceFilesSuite) TestNonexistentCurrentFile(c *check.C) {
+ adminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
+ tmp, err := s.localdb.CollectionUpdate(adminctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ Attrs: map[string]interface{}{
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1 0:2:file2 0:3:file3\n"}})
+ c.Assert(err, check.IsNil)
_, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
- UUID: dst.UUID,
- ReplaceFiles: map[string]string{"/": ""},
+ UUID: tmp.UUID,
+ ReplaceFiles: map[string]string{
+ "/dst": "current/file404",
+ }})
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)
+}
+
+func (s *replaceFilesSuite) TestConcurrentCopyFromPDH(c *check.C) {
+ var wg sync.WaitGroup
+ var expectFiles []string
+ for i := 0; i < 10; i++ {
+ fnm := fmt.Sprintf("copy%d.txt", i)
+ expectFiles = append(expectFiles, fnm)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)
+ defer txFinish(new(error))
+ userctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)
+ _, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceFiles: map[string]string{
+ "/" + fnm: s.foo.PortableDataHash + "/foo.txt",
+ "/foo.txt": "",
+ }})
+ c.Check(err, check.IsNil)
+ }()
+ }
+ wg.Wait()
+ // After N concurrent/overlapping requests to add different
+ // files by copying from another collection, we should see all
+ // N files.
+ final, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})
+ c.Assert(err, check.IsNil)
+ s.expectFiles(c, final, expectFiles...)
+}
+
+func (s *replaceFilesSuite) TestConcurrentCopyFromProvidedManifestText(c *check.C) {
+ blockLocator := strings.Split(s.tmp.ManifestText, " ")[1]
+ var wg sync.WaitGroup
+ expectFileSizes := make(map[string]int64)
+ for i := 0; i < 10; i++ {
+ fnm := fmt.Sprintf("upload%d.txt", i)
+ expectFileSizes[fnm] = 2
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)
+ defer txFinish(new(error))
+ userctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)
+ _, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ Attrs: map[string]interface{}{
+ "manifest_text": ". " + blockLocator + " 0:2:" + fnm + "\n",
+ },
+ ReplaceFiles: map[string]string{
+ "/" + fnm: "manifest_text/" + fnm,
+ "/foo.txt": "",
+ }})
+ c.Check(err, check.IsNil)
+ }()
+ }
+ wg.Wait()
+ // After N concurrent/overlapping requests to add different
+ // files, we should see all N files.
+ final, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})
+ c.Assert(err, check.IsNil)
+ s.expectFileSizes(c, final, expectFileSizes)
+}
+
+func (s *replaceFilesSuite) TestUnusedManifestText_Create(c *check.C) {
+ blockLocator := strings.Split(s.tmp.ManifestText, " ")[1]
+ _, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
- "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:z\n",
+ "manifest_text": ". " + blockLocator + " 0:3:foo\n",
+ },
+ ReplaceFiles: map[string]string{
+ "/foo.txt": "",
}})
- c.Logf("replace_files+manifest_text\n... got err: %s", err)
- c.Check(err, check.ErrorMatches, "ambiguous request: both.*replace_files.*manifest_text.*")
+ c.Check(err, check.ErrorMatches, `.*manifest_text.*would not be used.*`)
+}
+
+func (s *replaceFilesSuite) TestUnusedManifestText_Update(c *check.C) {
+ blockLocator := strings.Split(s.tmp.ManifestText, " ")[1]
+ _, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ Attrs: map[string]interface{}{
+ "manifest_text": ". " + blockLocator + " 0:3:foo\n",
+ },
+ ReplaceFiles: map[string]string{
+ "/foo.txt": "",
+ }})
+ c.Check(err, check.ErrorMatches, `.*manifest_text.*would not be used.*`)
+}
+
+func (s *replaceFilesSuite) TestConcurrentRename(c *check.C) {
+ var wg sync.WaitGroup
+ var renamed atomic.Int32
+ n := 10
+ errors := make(chan error, n)
+ var newnameOK string
+ for i := 0; i < n; i++ {
+ newname := fmt.Sprintf("newname%d.txt", i)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)
+ defer txFinish(new(error))
+ userctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)
+ upd, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceFiles: map[string]string{
+ "/" + newname: "current/foo.txt",
+ "/foo.txt": "",
+ }})
+ if err != nil {
+ errors <- err
+ } else {
+ renamed.Add(1)
+ s.expectFiles(c, upd, newname)
+ newnameOK = newname
+ }
+ }()
+ }
+ wg.Wait()
+ // N concurrent/overlapping attempts to rename foo.txt should
+ // have succeed exactly one time, and the final collection
+ // content should correspond to the operation that returned
+ // success.
+ if !c.Check(int(renamed.Load()), check.Equals, 1) {
+ close(errors)
+ for err := range errors {
+ c.Logf("err: %s", err)
+ }
+ return
+ }
+ c.Assert(newnameOK, check.Not(check.Equals), "")
+ final, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})
+ c.Assert(err, check.IsNil)
+ s.expectFiles(c, final, newnameOK)
}
// expectFiles checks coll's directory structure against the given
// list of expected files and empty directories. An expected path with
// a trailing slash indicates an empty directory.
-func (s *CollectionSuite) expectFiles(c *check.C, coll arvados.Collection, expected ...string) {
- client := arvados.NewClientFromEnv()
- ac, err := arvadosclient.New(client)
- c.Assert(err, check.IsNil)
- kc, err := keepclient.MakeKeepClient(ac)
- c.Assert(err, check.IsNil)
- cfs, err := coll.FileSystem(client, kc)
+func (s *replaceFilesSuite) expectFiles(c *check.C, coll arvados.Collection, expected ...string) {
+ expectSizes := make(map[string]int64)
+ for _, path := range expected {
+ expectSizes[path] = -1
+ }
+ s.expectFileSizes(c, coll, expectSizes)
+}
+
+// expectFileSizes checks coll's directory structure against the given
+// map of path->size. An expected path with a trailing slash
+// indicates an empty directory. An expected size of -1 indicates the
+// file size does not need to be checked.
+func (s *replaceFilesSuite) expectFileSizes(c *check.C, coll arvados.Collection, expected map[string]int64) {
+ cfs, err := coll.FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
- var found []string
+ found := make(map[string]int64)
nonemptydirs := map[string]bool{}
fs.WalkDir(arvados.FS(cfs), "/", func(path string, d fs.DirEntry, err error) error {
dir, _ := filepath.Split(path)
@@ -227,111 +534,236 @@ func (s *CollectionSuite) expectFiles(c *check.C, coll arvados.Collection, expec
nonemptydirs[path] = false
}
} else {
- found = append(found, path)
+ fi, err := d.Info()
+ c.Assert(err, check.IsNil)
+ found[path] = fi.Size()
}
return nil
})
for d, nonempty := range nonemptydirs {
if !nonempty {
- found = append(found, d)
+ found[d] = 0
}
}
- for i, path := range found {
- if path != "/" {
- found[i] = strings.TrimPrefix(path, "/")
+ for path, size := range found {
+ if trimmed := strings.TrimPrefix(path, "/"); trimmed != path && trimmed != "" {
+ found[trimmed] = size
+ delete(found, path)
+ path = trimmed
+ }
+ if expected[path] == -1 {
+ // Path is expected to exist, and -1 means we
+ // aren't supposed to check the size. Change
+ // "found size" to -1 as well, so this entry
+ // will pass the DeepEquals check below.
+ found[path] = -1
}
}
- sort.Strings(found)
- sort.Strings(expected)
c.Check(found, check.DeepEquals, expected)
}
-func (s *CollectionSuite) TestSignatures(c *check.C) {
- resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
- c.Check(err, check.IsNil)
- c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
- s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
+var _ = check.Suite(&replaceSegmentsSuite{})
- resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{"manifest_text"}})
- c.Check(err, check.IsNil)
- c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
+type replaceSegmentsSuite struct {
+ localdbSuite
+ client *arvados.Client
+ ac *arvadosclient.ArvadosClient
+ kc *keepclient.KeepClient
+ locator []string // locator[i] is a locator of a block consisting of i null bytes.
+ tmp arvados.Collection // each test case starts off with file1 and file2
+}
- lresp, err := s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}})
- c.Check(err, check.IsNil)
- if c.Check(lresp.Items, check.HasLen, 1) {
- c.Check(lresp.Items[0].UUID, check.Equals, arvadostest.FooCollection)
- c.Check(lresp.Items[0].ManifestText, check.Equals, "")
- c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
- }
+func (s *replaceSegmentsSuite) SetUpSuite(c *check.C) {
+ s.localdbSuite.SetUpSuite(c)
+ var err error
+ s.client = arvados.NewClientFromEnv()
+ s.client.AuthToken = arvadostest.ActiveTokenV2
+ s.ac, err = arvadosclient.New(s.client)
+ c.Assert(err, check.IsNil)
+ s.kc, err = keepclient.MakeKeepClient(s.ac)
+ c.Assert(err, check.IsNil)
+}
- lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"manifest_text"}})
- c.Check(err, check.IsNil)
- if c.Check(lresp.Items, check.HasLen, 1) {
- c.Check(lresp.Items[0].ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
- c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
+func (s *replaceSegmentsSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
+ if s.locator == nil {
+ s.locator = make([]string, 10)
+ for i := range s.locator {
+ resp, err := s.kc.BlockWrite(s.userctx, arvados.BlockWriteOptions{Data: make([]byte, i)})
+ c.Assert(err, check.IsNil)
+ s.locator[i] = resp.Locator
+ c.Logf("locator %d %s", i, s.locator[i])
+ }
}
+ var err error
+ s.tmp, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "manifest_text": ". " + s.locator[1] + " " + s.locator[2] + " 0:1:file1 1:2:file2\n",
+ }})
+ c.Assert(err, check.IsNil)
+}
- lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"unsigned_manifest_text"}})
- c.Check(err, check.IsNil)
- if c.Check(lresp.Items, check.HasLen, 1) {
- c.Check(lresp.Items[0].ManifestText, check.Equals, "")
- c.Check(lresp.Items[0].UnsignedManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3 0:.*`)
- }
+func (s *replaceSegmentsSuite) checkCollectionNotModified(c *check.C) {
+ // Confirm the collection was not modified.
+ coll, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})
+ c.Assert(err, check.IsNil)
+ c.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(s.tmp.ManifestText))
+ c.Check(coll.ModifiedAt, check.Equals, s.tmp.ModifiedAt)
+}
- // early trash date causes lower signature TTL (even if
- // trash_at and is_trashed fields are unselected)
- trashed, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
- Select: []string{"uuid", "manifest_text"},
+func (s *replaceSegmentsSuite) Test2to1_Simple(c *check.C) {
+ coll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 1, 2},
+ }})
+ c.Assert(err, check.IsNil)
+ c.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(". "+s.locator[3]+" 0:1:file1 1:2:file2\n"))
+}
+
+// Apply replacements to provided manifest_text when creating a new
+// collection.
+func (s *replaceSegmentsSuite) TestCreate(c *check.C) {
+ coll, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
- "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
- "trash_at": time.Now().UTC().Add(time.Hour),
+ "manifest_text": ". " + s.locator[2] + " " + s.locator[3] + " 0:5:file5\n",
+ },
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[5], 0, 2},
+ arvados.BlockSegment{s.locator[3], 0, 3}: arvados.BlockSegment{s.locator[5], 2, 3},
}})
c.Assert(err, check.IsNil)
- s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour)
- resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
+ c.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(". "+s.locator[5]+" 0:5:file5\n"))
+}
+
+func (s *replaceSegmentsSuite) TestSignatureCheck(c *check.C) {
+ var badlocator string
+ {
+ adminclient := arvados.NewClientFromEnv()
+ ac, err := arvadosclient.New(adminclient)
+ c.Assert(err, check.IsNil)
+ kc, err := keepclient.MakeKeepClient(ac)
+ c.Assert(err, check.IsNil)
+ resp, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{Data: make([]byte, 3)})
+ c.Assert(err, check.IsNil)
+ badlocator = resp.Locator
+ }
+
+ // Replacement locator has an invalid signature (signed with a
+ // different token) so this update should fail.
+ _, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{badlocator, 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{badlocator, 1, 2},
+ }})
+ c.Assert(err, check.ErrorMatches, `.*PermissionDenied.*`)
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, http.StatusForbidden)
+
+ s.checkCollectionNotModified(c)
+}
+
+func (s *replaceSegmentsSuite) Test2to1_Reordered(c *check.C) {
+ coll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 2, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 0, 2},
+ }})
c.Assert(err, check.IsNil)
- s.checkSignatureExpiry(c, resp.ManifestText, time.Hour)
+ c.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(". "+s.locator[3]+" 2:1:file1 0:2:file2\n"))
+}
- // distant future trash date does not cause higher signature TTL
- trashed, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
- UUID: trashed.UUID,
+func (s *replaceSegmentsSuite) Test2to1_MultipleReferences(c *check.C) {
+ coll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
Attrs: map[string]interface{}{
- "trash_at": time.Now().UTC().Add(time.Hour * 24 * 365),
+ "manifest_text": ". " + s.locator[1] + " " + s.locator[2] + " 0:1:file1 1:2:file2\n" +
+ "./dir " + s.locator[1] + " 0:1:file3\n",
+ }})
+ coll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 1, 2},
}})
c.Assert(err, check.IsNil)
- s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour*24*7*2)
- resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
+ c.Check(stripSignatures(coll.ManifestText), check.Equals,
+ stripSignatures(". "+s.locator[3]+" 0:1:file1 1:2:file2\n"+
+ "./dir "+s.locator[3]+" 0:1:file3\n"))
+}
+
+// Caller is asking to repack 1,2,4->7 and 5->8, but a different
+// caller has already repacked 1,2,3->6, so we skip 1,2,4->7 but apply
+// 5->8.
+func (s *replaceSegmentsSuite) TestSkipUnreferenced(c *check.C) {
+ orig := ". " + s.locator[6] + " " + s.locator[4] + " 0:1:file1 1:2:file2 3:3:file3 6:4:file4\n" +
+ "./dir " + s.locator[5] + " 0:5:file5\n"
+ coll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ Attrs: map[string]interface{}{
+ "manifest_text": orig,
+ }})
c.Assert(err, check.IsNil)
- s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
+ coll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: coll.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[7], 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[7], 1, 2},
+ arvados.BlockSegment{s.locator[4], 0, 4}: arvados.BlockSegment{s.locator[7], 3, 4},
+ arvados.BlockSegment{s.locator[5], 0, 5}: arvados.BlockSegment{s.locator[8], 0, 5},
+ }})
+ c.Assert(err, check.IsNil)
+ c.Check(stripSignatures(coll.ManifestText), check.Equals,
+ stripSignatures(". "+s.locator[6]+" "+s.locator[4]+" 0:1:file1 1:2:file2 3:3:file3 6:4:file4\n"+
+ "./dir "+s.locator[8]+" 0:5:file5\n"))
+}
- // Make sure groups/contents doesn't return manifest_text with
- // collections (if it did, we'd need to sign it).
- gresp, err := s.localdb.GroupContents(s.userctx, arvados.GroupContentsOptions{
- Limit: -1,
- Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}},
- Select: []string{"uuid", "manifest_text"},
- })
- if err != nil {
- c.Check(err, check.ErrorMatches, `.*Invalid attribute.*manifest_text.*`)
- } else if c.Check(gresp.Items, check.HasLen, 1) {
- c.Check(gresp.Items[0].(map[string]interface{})["uuid"], check.Equals, arvadostest.FooCollection)
- c.Check(gresp.Items[0].(map[string]interface{})["manifest_text"], check.Equals, nil)
- }
+func (s *replaceSegmentsSuite) TestLengthMismatch(c *check.C) {
+ _, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 2},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 0, 2},
+ }})
+ c.Check(err, check.ErrorMatches, `replace_segments: mismatched length: replacing segment length 1 with segment length 2`)
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)
+ s.checkCollectionNotModified(c)
}
-func (s *CollectionSuite) checkSignatureExpiry(c *check.C, manifestText string, expectedTTL time.Duration) {
- m := regexp.MustCompile(`@([[:xdigit:]]+)`).FindStringSubmatch(manifestText)
- c.Assert(m, check.HasLen, 2)
- sigexp, err := strconv.ParseInt(m[1], 16, 64)
- c.Assert(err, check.IsNil)
- expectedExp := time.Now().Add(expectedTTL).Unix()
- c.Check(sigexp > expectedExp-60, check.Equals, true)
- c.Check(sigexp <= expectedExp, check.Equals, true)
+func (s *replaceSegmentsSuite) TestInvalidReplacementOffset(c *check.C) {
+ _, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 3, 2},
+ }})
+ c.Check(err, check.ErrorMatches, `replace_segments: invalid replacement: offset 3 \+ length 2 > block size 3`)
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)
+ s.checkCollectionNotModified(c)
}
-func (s *CollectionSuite) TestSignaturesDisabled(c *check.C) {
- s.localdb.cluster.Collections.BlobSigning = false
- resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
- c.Check(err, check.IsNil)
- c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ +]*\+3 0:.*`)
+func (s *replaceSegmentsSuite) TestInvalidReplacementLength(c *check.C) {
+ _, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.tmp.UUID,
+ ReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{
+ arvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},
+ arvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 4, 2},
+ }})
+ c.Check(err, check.ErrorMatches, `replace_segments: invalid replacement: offset 4 \+ length 2 > block size 3`)
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)
+ s.checkCollectionNotModified(c)
+}
+
+func stripSignatures(manifest string) string {
+ return regexp.MustCompile(`\+A[^ ]+`).ReplaceAllString(manifest, "")
}
diff --git a/lib/controller/localdb/container_gateway.go b/lib/controller/localdb/container_gateway.go
index 0b6a630fae..676a884063 100644
--- a/lib/controller/localdb/container_gateway.go
+++ b/lib/controller/localdb/container_gateway.go
@@ -13,6 +13,7 @@ import (
"crypto/subtle"
"crypto/tls"
"crypto/x509"
+ "database/sql"
"errors"
"fmt"
"io"
@@ -22,12 +23,15 @@ import (
"net/http/httputil"
"net/url"
"os"
+ "strconv"
"strings"
+ "time"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/lib/webdavfs"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
@@ -37,7 +41,19 @@ import (
)
var (
- forceProxyForTest = false
+ // forceProxyForTest enables test cases to exercise the "proxy
+ // to a different controller instance" code path without
+ // running a second controller instance. If this is set, an
+ // incoming request with NoForward==false is always proxied to
+ // the configured controller instance that matches the
+ // container gateway's tunnel endpoint, without checking
+ // whether the tunnel is actually connected to the current
+ // process.
+ forceProxyForTest = false
+
+ // forceInternalURLForTest is sent to the crunch-run gateway
+ // when setting up a tunnel in a test suite where
+ // service.URLFromContext() does not return anything.
forceInternalURLForTest *arvados.URL
)
@@ -135,63 +151,12 @@ func (conn *Conn) ContainerRequestLog(ctx context.Context, opts arvados.Containe
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r = r.WithContext(ctx)
- var proxyReq *http.Request
var proxyErr error
- var expectRespondAuth string
- proxy := &httputil.ReverseProxy{
- // Our custom Transport:
- //
- // - Uses a custom dialer to connect to the
- // gateway (either directly or through a
- // tunnel set up though ContainerTunnel)
- //
- // - Verifies the gateway's TLS certificate
- // using X-Arvados-Authorization headers.
- //
- // This involves modifying the outgoing
- // request header in DialTLSContext.
- // (ReverseProxy certainly doesn't expect us
- // to do this, but it works.)
- Transport: &http.Transport{
- DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
- tlsconn, requestAuth, respondAuth, err := dial()
- if err != nil {
- return nil, err
- }
- proxyReq.Header.Set("X-Arvados-Authorization", requestAuth)
- expectRespondAuth = respondAuth
- return tlsconn, nil
- },
- },
- Director: func(r *http.Request) {
- // Scheme/host of incoming r.URL are
- // irrelevant now, and may even be
- // missing. Host is ignored by our
- // DialTLSContext, but we need a
- // generic syntactically correct URL
- // for net/http to work with.
- r.URL.Scheme = "https"
- r.URL.Host = "0.0.0.0:0"
- r.Header.Set("X-Arvados-Container-Gateway-Uuid", ctr.UUID)
- r.Header.Set("X-Webdav-Prefix", "/arvados/v1/container_requests/"+cr.UUID+"/log/"+ctr.UUID)
- r.Header.Set("X-Webdav-Source", "/log")
- proxyReq = r
- },
- ModifyResponse: func(resp *http.Response) error {
- if resp.Header.Get("X-Arvados-Authorization-Response") != expectRespondAuth {
- // Note this is how we detect
- // an attacker-in-the-middle.
- return httpserver.ErrorWithStatus(errors.New("bad X-Arvados-Authorization-Response header"), http.StatusBadGateway)
- }
- resp.Header.Del("X-Arvados-Authorization-Response")
- preemptivelyDeduplicateHeaders(w.Header(), resp.Header)
- return nil
- },
- ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
- proxyErr = err
- },
- }
- proxy.ServeHTTP(w, r)
+ gatewayProxy(dial, w, http.Header{
+ "X-Arvados-Container-Gateway-Uuid": {ctr.UUID},
+ "X-Webdav-Prefix": {"/arvados/v1/container_requests/" + cr.UUID + "/log/" + ctr.UUID},
+ "X-Webdav-Source": {"/log"},
+ }, &proxyErr).ServeHTTP(w, r)
if proxyErr == nil {
// proxy succeeded
return
@@ -290,6 +255,74 @@ func (conn *Conn) serveContainerRequestLogViaKeepWeb(opts arvados.ContainerLogOp
proxy.ServeHTTP(w, r)
}
+func gatewayProxy(dial gatewayDialer, responseWriter http.ResponseWriter, setRequestHeader http.Header, proxyErr *error) *httputil.ReverseProxy {
+ var proxyReq *http.Request
+ var expectRespondAuth string
+ return &httputil.ReverseProxy{
+ // Our custom Transport:
+ //
+ // - Uses a custom dialer to connect to the gateway
+ // (either directly or through a tunnel set up though
+ // ContainerTunnel)
+ //
+ // - Verifies the gateway's TLS certificate using
+ // X-Arvados-Authorization headers.
+ //
+ // This involves modifying the outgoing request header
+ // in DialTLSContext. (ReverseProxy certainly doesn't
+ // expect us to do this, but it works.)
+ Transport: &http.Transport{
+ DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ tlsconn, requestAuth, respondAuth, err := dial()
+ if err != nil {
+ return nil, err
+ }
+ proxyReq.Header.Set("X-Arvados-Authorization", requestAuth)
+ expectRespondAuth = respondAuth
+ return tlsconn, nil
+ },
+ // This transport is only used for a single
+ // request, so http keep-alive would
+ // accumulate open sockets without providing
+ // any benefit. So, disable keep-alive.
+ DisableKeepAlives: true,
+ // Use stdlib defaults.
+ ForceAttemptHTTP2: http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,
+ TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout,
+ ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,
+ },
+ Director: func(r *http.Request) {
+ // Scheme/host of incoming r.URL are
+ // irrelevant now, and may even be
+ // missing. Host is ignored by our
+ // DialTLSContext, but we need a generic
+ // syntactically correct URL for net/http to
+ // work with.
+ r.URL.Scheme = "https"
+ r.URL.Host = "0.0.0.0:0"
+ for k, v := range setRequestHeader {
+ r.Header[k] = v
+ }
+ proxyReq = r
+ },
+ ModifyResponse: func(resp *http.Response) error {
+ if resp.Header.Get("X-Arvados-Authorization-Response") != expectRespondAuth {
+ // Note this is how we detect
+ // an attacker-in-the-middle.
+ return httpserver.ErrorWithStatus(errors.New("bad X-Arvados-Authorization-Response header"), http.StatusBadGateway)
+ }
+ resp.Header.Del("X-Arvados-Authorization-Response")
+ preemptivelyDeduplicateHeaders(responseWriter.Header(), resp.Header)
+ return nil
+ },
+ ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
+ if proxyErr != nil {
+ *proxyErr = err
+ }
+ },
+ }
+}
+
// httputil.ReverseProxy uses (http.Header)Add() to copy headers from
// the upstream Response to the downstream ResponseWriter. If headers
// have already been set on the downstream ResponseWriter, Add() will
@@ -348,23 +381,14 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
if err != nil {
return sshconn, err
}
- ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
if !user.IsAdmin || !conn.cluster.Containers.ShellAccess.Admin {
if !conn.cluster.Containers.ShellAccess.User {
return sshconn, httpserver.ErrorWithStatus(errors.New("shell access is disabled in config"), http.StatusServiceUnavailable)
}
- crs, err := conn.railsProxy.ContainerRequestList(ctxRoot, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"container_uuid", "=", opts.UUID}}})
+ err = conn.checkContainerLoginPermission(ctx, user.UUID, opts.UUID)
if err != nil {
return sshconn, err
}
- for _, cr := range crs.Items {
- if cr.ModifiedByUserUUID != user.UUID {
- return sshconn, httpserver.ErrorWithStatus(errors.New("permission denied: container is associated with requests submitted by other users"), http.StatusForbidden)
- }
- }
- if crs.ItemsAvailable != len(crs.Items) {
- return sshconn, httpserver.ErrorWithStatus(errors.New("incomplete response while checking permission"), http.StatusInternalServerError)
- }
}
if ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {
@@ -435,6 +459,7 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
}
if !ctr.InteractiveSessionStarted {
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
_, err = conn.railsProxy.ContainerUpdate(ctxRoot, arvados.UpdateOptions{
UUID: opts.UUID,
Attrs: map[string]interface{}{
@@ -454,6 +479,272 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
return sshconn, nil
}
+// Check that userUUID is permitted to start an interactive login
+// session in ctrUUID. Any returned error has an HTTPStatus().
+func (conn *Conn) checkContainerLoginPermission(ctx context.Context, userUUID, ctrUUID string) error {
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
+ crs, err := conn.railsProxy.ContainerRequestList(ctxRoot, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"container_uuid", "=", ctrUUID}}})
+ if err != nil {
+ return err
+ }
+ for _, cr := range crs.Items {
+ if cr.ModifiedByUserUUID != userUUID {
+ return httpserver.ErrorWithStatus(errors.New("permission denied: container is associated with requests submitted by other users"), http.StatusForbidden)
+ }
+ }
+ if crs.ItemsAvailable != len(crs.Items) {
+ return httpserver.ErrorWithStatus(errors.New("incomplete response while checking permission"), http.StatusInternalServerError)
+ }
+ return nil
+}
+
+var errUnassignedPort = httpserver.ErrorWithStatus(errors.New("unassigned port"), http.StatusGone)
+
+// ContainerHTTPProxy proxies an incoming request through to the
+// specified port on a running container, via crunch-run's container
+// gateway.
+func (conn *Conn) ContainerHTTPProxy(ctx context.Context, opts arvados.ContainerHTTPProxyOptions) (http.Handler, error) {
+ // We'll use ctxRoot to do requests below that don't depend on
+ // the supplied token.
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
+
+ var targetUUID string
+ var targetPort int
+ if strings.HasPrefix(opts.Target, ":") {
+ // Target ":1234" means "the entry in the
+ // container_ports table with external_port=1234".
+ extport, err := strconv.Atoi(opts.Target[1:])
+ if err != nil {
+ return nil, httpserver.ErrorWithStatus(fmt.Errorf("invalid port in target: %s", opts.Target), http.StatusBadRequest)
+ }
+ db, err := conn.getdb(ctx)
+ if err != nil {
+ return nil, httpserver.ErrorWithStatus(fmt.Errorf("getdb: %w", err), http.StatusBadGateway)
+ }
+ err = db.QueryRowContext(ctx, `select container_uuid, container_port
+ from container_ports
+ where external_port = $1`, extport).Scan(&targetUUID, &targetPort)
+ if err == sql.ErrNoRows {
+ return nil, errUnassignedPort
+ } else if err != nil {
+ return nil, httpserver.ErrorWithStatus(err, http.StatusBadGateway)
+ }
+ } else if len(opts.Target) > 28 && arvadosclient.UUIDMatch(opts.Target[:27]) && opts.Target[27] == '-' {
+ targetUUID = opts.Target[:27]
+ fmt.Sscanf(opts.Target[28:], "%d", &targetPort)
+ if targetPort < 1 {
+ return nil, httpserver.ErrorWithStatus(fmt.Errorf("cannot parse port number from vhost prefix %q", opts.Target), http.StatusBadRequest)
+ }
+ } else {
+ links, err := conn.railsProxy.LinkList(ctxRoot, arvados.ListOptions{
+ Limit: 1,
+ Filters: []arvados.Filter{
+ {"link_class", "=", "published_port"},
+ {"name", "=", opts.Target}}})
+ if err != nil {
+ return nil, fmt.Errorf("lookup failed: %w", err)
+ }
+ if len(links.Items) == 0 {
+ return nil, httpserver.ErrorWithStatus(fmt.Errorf("container web service not found: %q", opts.Target), http.StatusNotFound)
+ }
+ targetUUID = links.Items[0].HeadUUID
+ port, ok := links.Items[0].Properties["port"].(float64)
+ targetPort = int(port)
+ if !ok || targetPort < 1 || targetPort > 65535 {
+ return nil, httpserver.ErrorWithStatus(fmt.Errorf("invalid port in published_port link: %v", links.Items[0].Properties["port"]), http.StatusInternalServerError)
+ }
+ }
+
+ needClearSiteData := false
+ // A redirect might be needed for one or two reasons: (1) to
+ // avoid letting the container web service access it via
+ // document.location, or showing the token in the browser's
+ // location bar (even when returning an error), and/or (2) to
+ // clear client-side state left over from a different
+ // container that was previously available on the same
+ // dynamically assigned port.
+ //
+ // maybeRedirect() returns (nil, nil) if the given err is nil
+ // and there is no need to redirect. Otherwise, it returns
+ // suitable values for the main function to return: either
+ // (nil, err), or (h, nil) where h implements a redirect.
+ maybeRedirect := func(err error) (http.Handler, error) {
+ if opts.Request.URL.Query().Get("arvados_api_token") == "" && !needClearSiteData {
+ // Redirect not needed
+ return nil, err
+ }
+ return containerHTTPProxyRedirect(needClearSiteData), nil
+ }
+
+ // First we need to fetch the container request (or container)
+ // record as root, so we can check whether the requested port
+ // is marked public in published_ports. This needs to work
+ // even if the request did not provide a token at all.
+ var ctr arvados.Container
+ var isPublic bool
+ if len(targetUUID) == 27 && targetUUID[6:11] == "xvhdp" {
+ // Look up specified container request
+ ctrreq, err := conn.railsProxy.ContainerRequestGet(ctxRoot, arvados.GetOptions{
+ UUID: targetUUID,
+ Select: []string{"uuid", "state", "published_ports", "container_uuid"},
+ })
+ if err == nil && ctrreq.PublishedPorts[strconv.Itoa(targetPort)].Access == arvados.PublishedPortAccessPublic {
+ isPublic = true
+ targetUUID = ctrreq.ContainerUUID
+ }
+ } else {
+ // Look up specified container
+ var err error
+ ctr, err = conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{
+ UUID: targetUUID,
+ Select: []string{"uuid", "state", "gateway_address", "published_ports"},
+ })
+ if err == nil && ctr.PublishedPorts[strconv.Itoa(targetPort)].Access == arvados.PublishedPortAccessPublic {
+ isPublic = true
+ }
+ }
+
+ if !isPublic {
+ // Re-fetch the container request record, this time as
+ // the authenticated user instead of root. This lets
+ // us return 404 if the container is not readable by
+ // this user, for example.
+ if len(targetUUID) == 27 && targetUUID[6:11] == "xvhdp" {
+ ctrreq, err := conn.railsProxy.ContainerRequestGet(ctxRoot, arvados.GetOptions{
+ UUID: targetUUID,
+ Select: []string{"uuid", "state", "published_ports", "container_uuid"},
+ })
+ if err != nil {
+ return maybeRedirect(fmt.Errorf("container request lookup error: %w", err))
+ }
+ if ctrreq.ContainerUUID == "" {
+ return maybeRedirect(httpserver.ErrorWithStatus(errors.New("container request does not have an assigned container"), http.StatusBadRequest))
+ }
+ targetUUID = ctrreq.ContainerUUID
+ }
+ var err error
+ ctr, err = conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: targetUUID, Select: []string{"uuid", "state", "gateway_address"}})
+ if err != nil {
+ return maybeRedirect(fmt.Errorf("container lookup failed: %w", err))
+ }
+ user, err := conn.railsProxy.UserGetCurrent(ctx, arvados.GetOptions{})
+ if err != nil {
+ return maybeRedirect(err)
+ }
+ if !user.IsAdmin {
+ // For non-public ports, access is only granted to
+ // admins and the user who submitted all of the
+ // container requests that reference this container.
+ err = conn.checkContainerLoginPermission(ctx, user.UUID, ctr.UUID)
+ if err != nil {
+ return maybeRedirect(err)
+ }
+ }
+ } else if ctr.UUID == "" {
+ // isPublic, but we don't have the container record
+ // yet because the request specified a container
+ // request UUID.
+ var err error
+ ctr, err = conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{UUID: targetUUID, Select: []string{"uuid", "state", "gateway_address"}})
+ if err != nil {
+ return maybeRedirect(fmt.Errorf("container lookup failed: %w", err))
+ }
+ }
+ dial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)
+ if err != nil {
+ return maybeRedirect(fmt.Errorf("cannot find gateway: %w", err))
+ }
+ if arpc != nil {
+ if h, err := maybeRedirect(nil); h != nil || err != nil {
+ return h, err
+ }
+ opts.NoForward = true
+ return arpc.ContainerHTTPProxy(ctx, opts)
+ }
+
+ // Check for an "arvados_container_uuid" cookie indicating
+ // that the user agent might have client-side state left over
+ // from a different container that was previously available on
+ // this port.
+ for _, cookie := range opts.Request.CookiesNamed("arvados_container_uuid") {
+ if cookie.Value != ctr.UUID {
+ needClearSiteData = true
+ }
+ }
+ // Redirect if needed to clear site data and/or move the token
+ // from the query to a cookie.
+ if h, err := maybeRedirect(nil); h != nil || err != nil {
+ return h, err
+ }
+
+ // Remove arvados_api_token cookie to ensure the http service
+ // in the container does not see it.
+ cookies := opts.Request.Cookies()
+ opts.Request.Header.Del("Cookie")
+ for _, cookie := range cookies {
+ if cookie.Name != "arvados_api_token" {
+ opts.Request.AddCookie(cookie)
+ }
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ http.SetCookie(w, &http.Cookie{Name: "arvados_container_uuid", Value: ctr.UUID})
+ gatewayProxy(dial, w, http.Header{
+ "X-Arvados-Container-Gateway-Uuid": {targetUUID},
+ "X-Arvados-Container-Target-Port": {strconv.Itoa(targetPort)},
+ }, nil).ServeHTTP(w, opts.Request)
+ }), nil
+}
+
+// containerHTTPProxyRedirect returns a redirect handler that (1) if
+// there is a token in the query, moves it to a cookie, and (2) if
+// needClearSiteData is true, clears all other client-side state.
+func containerHTTPProxyRedirect(needClearSiteData bool) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ redir := *r.URL
+ query := redir.Query()
+ needTokenCookie := query.Get("arvados_api_token")
+ if needTokenCookie != "" {
+ delete(query, "arvados_api_token")
+ redir.RawQuery = query.Encode()
+ }
+ if needTokenCookie != "" {
+ http.SetCookie(w, &http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(needTokenCookie)),
+ Path: "/",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ })
+ }
+ if needClearSiteData {
+ if r.Method != http.MethodHead && r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusGone)
+ return
+ }
+ // We cannot use `Clear-Site-Data: "cookies"`
+ // to clear cookies, because that applies to
+ // all origins in the entire registered
+ // domain. We only want to clear cookies for
+ // this dynamically assigned origin.
+ for _, cookie := range r.Cookies() {
+ if cookie.Name != "arvados_api_token" {
+ cookie.MaxAge = -1
+ cookie.Expires = time.Time{}
+ cookie.Value = ""
+ http.SetCookie(w, cookie)
+ }
+ }
+ // Unlike the "cookies" directive, "cache" and
+ // "storage" clear data for the current origin
+ // only.
+ w.Header().Set("Clear-Site-Data", `"cache", "storage"`)
+ }
+ w.Header().Set("Location", redir.String())
+ w.WriteHeader(http.StatusSeeOther)
+ })
+}
+
// ContainerGatewayTunnel sets up a tunnel enabling us (controller) to
// connect to the caller's (crunch-run's) gateway server.
func (conn *Conn) ContainerGatewayTunnel(ctx context.Context, opts arvados.ContainerGatewayTunnelOptions) (resp arvados.ConnectionResponse, err error) {
diff --git a/lib/controller/localdb/container_gateway_test.go b/lib/controller/localdb/container_gateway_test.go
index 0c58a9192c..7d02190b7d 100644
--- a/lib/controller/localdb/container_gateway_test.go
+++ b/lib/controller/localdb/container_gateway_test.go
@@ -9,6 +9,7 @@ import (
"context"
"crypto/hmac"
"crypto/sha256"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -19,7 +20,10 @@ import (
"os"
"os/exec"
"path/filepath"
+ "strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/lib/controller/router"
@@ -41,16 +45,63 @@ var _ = check.Suite(&ContainerGatewaySuite{})
type ContainerGatewaySuite struct {
localdbSuite
- reqUUID string
- ctrUUID string
- srv *httptest.Server
- gw *crunchrun.Gateway
+ containerServices []*httpserver.Server
+ reqCreateOptions arvados.CreateOptions
+ reqUUID string
+ ctrUUID string
+ srv *httptest.Server
+ gw *crunchrun.Gateway
+ assignedExtPort atomic.Int32
}
-func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
- s.localdbSuite.SetUpTest(c)
+const (
+ testDynamicPortMin = 10000
+ testDynamicPortMax = 20000
+)
+
+func (s *ContainerGatewaySuite) SetUpSuite(c *check.C) {
+ s.localdbSuite.SetUpSuite(c)
+
+ // Set up 10 http servers to play the role of services running
+ // inside a container. (crunchrun.GatewayTargetStub will allow
+ // our crunchrun.Gateway to connect to them directly on
+ // localhost, rather than actually running them inside a
+ // container.)
+ for i := 0; i < 10; i++ {
+ srv := &httpserver.Server{
+ Addr: ":0",
+ Server: http.Server{
+ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ body := fmt.Sprintf("handled %s %s with Host %s", r.Method, r.URL.String(), r.Host)
+ c.Logf("%s", body)
+ w.Write([]byte(body))
+ }),
+ },
+ }
+ srv.Start()
+ s.containerServices = append(s.containerServices, srv)
+ }
+
+ // s.containerServices[0] will be unlisted
+ // s.containerServices[1] will be listed with access=public
+ // s.containerServices[2,...] will be listed with access=private
+ publishedPorts := make(map[string]arvados.RequestPublishedPort)
+ for i, srv := range s.containerServices {
+ access := arvados.PublishedPortAccessPrivate
+ _, port, _ := net.SplitHostPort(srv.Addr)
+ if i == 1 {
+ access = arvados.PublishedPortAccessPublic
+ }
+ if i > 0 {
+ publishedPorts[port] = arvados.RequestPublishedPort{
+ Access: access,
+ Label: "port " + port,
+ }
+ }
+ }
- cr, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{
+ s.reqCreateOptions = arvados.CreateOptions{
Attrs: map[string]interface{}{
"command": []string{"echo", time.Now().Format(time.RFC3339Nano)},
"container_count_max": 1,
@@ -69,7 +120,26 @@ func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
"runtime_constraints": map[string]interface{}{
"vcpus": 1,
"ram": 2,
- }}})
+ },
+ "published_ports": publishedPorts}}
+}
+
+func (s *ContainerGatewaySuite) TearDownSuite(c *check.C) {
+ for _, srv := range s.containerServices {
+ go srv.Close()
+ }
+ s.containerServices = nil
+ s.localdbSuite.TearDownSuite(c)
+}
+
+func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
+
+ s.localdb.cluster.Services.ContainerWebServices.ExternalURL.Host = "*.containers.example.com"
+ s.localdb.cluster.Services.ContainerWebServices.ExternalPortMin = 0
+ s.localdb.cluster.Services.ContainerWebServices.ExternalPortMax = 0
+
+ cr, err := s.localdb.ContainerRequestCreate(s.userctx, s.reqCreateOptions)
c.Assert(err, check.IsNil)
s.reqUUID = cr.UUID
s.ctrUUID = cr.ContainerUUID
@@ -78,7 +148,9 @@ func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
fmt.Fprint(h, s.ctrUUID)
authKey := fmt.Sprintf("%x", h.Sum(nil))
- rtr := router.New(s.localdb, router.Config{})
+ rtr := router.New(s.localdb, router.Config{
+ ContainerWebServices: &s.localdb.cluster.Services.ContainerWebServices,
+ })
s.srv = httptest.NewUnstartedServer(httpserver.AddRequestIDs(httpserver.LogRequests(rtr)))
s.srv.StartTLS()
// the test setup doesn't use lib/service so
@@ -86,9 +158,10 @@ func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
// is how we advertise our internal URL and enable
// proxy-to-other-controller mode,
forceInternalURLForTest = &arvados.URL{Scheme: "https", Host: s.srv.Listener.Addr().String()}
+ s.cluster.Services.Controller.InternalURLs[*forceInternalURLForTest] = arvados.ServiceInstance{}
ac := &arvados.Client{
APIHost: s.srv.Listener.Addr().String(),
- AuthToken: arvadostest.Dispatch1Token,
+ AuthToken: arvadostest.SystemRootToken,
Insecure: true,
}
s.gw = &crunchrun.Gateway{
@@ -118,10 +191,22 @@ func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
s.cluster.Containers.ShellAccess.User = true
_, err = s.db.Exec(`update containers set interactive_session_started=$1 where uuid=$2`, false, s.ctrUUID)
c.Check(err, check.IsNil)
+
+ s.assignedExtPort.Store(testDynamicPortMin)
}
func (s *ContainerGatewaySuite) TearDownTest(c *check.C) {
- s.srv.Close()
+ forceProxyForTest = false
+ if s.reqUUID != "" {
+ _, err := s.localdb.ContainerRequestDelete(s.userctx, arvados.DeleteOptions{UUID: s.reqUUID})
+ c.Check(err, check.IsNil)
+ }
+ if s.srv != nil {
+ s.srv.Close()
+ s.srv = nil
+ }
+ _, err := s.db.Exec(`delete from container_ports where external_port >= $1 and external_port <= $2`, testDynamicPortMin, testDynamicPortMax)
+ c.Check(err, check.IsNil)
s.localdbSuite.TearDownTest(c)
}
@@ -226,6 +311,484 @@ func (s *ContainerGatewaySuite) TestDirectTCP(c *check.C) {
}
}
+// Connect to crunch-run container gateway directly, using container
+// UUID.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Direct(c *check.C) {
+ s.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)
+}
+
+// Connect to crunch-run container gateway directly, using container
+// request UUID.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Direct_ContainerRequestUUID(c *check.C) {
+ s.testContainerHTTPProxy(c, s.reqUUID, s.vhostAndTargetForWildcard)
+}
+
+// Connect through a tunnel terminated at this controller process.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Tunnel(c *check.C) {
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)
+}
+
+// Connect through a tunnel terminated at a different controller
+// process.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ProxyTunnel(c *check.C) {
+ forceProxyForTest = true
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_DynamicPort(c *check.C) {
+ s.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForDynamicPort)
+}
+
+func (s *ContainerGatewaySuite) testContainerHTTPProxy(c *check.C, targetUUID string, vhostAndTargetFunc func(*check.C, string, string) (string, string)) {
+ testMethods := []string{"GET", "POST", "PATCH", "OPTIONS", "DELETE"}
+
+ var wg sync.WaitGroup
+ for idx, srv := range s.containerServices {
+ idx, srv := idx, srv
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ method := testMethods[idx%len(testMethods)]
+ _, port, err := net.SplitHostPort(srv.Addr)
+ c.Assert(err, check.IsNil, check.Commentf("%s", srv.Addr))
+ vhost, target := vhostAndTargetFunc(c, targetUUID, port)
+ comment := check.Commentf("srv.Addr %s => proxy vhost %s, target %s", srv.Addr, vhost, target)
+ c.Logf("%s", comment.CheckCommentString())
+ req, err := http.NewRequest(method, "https://"+vhost+"/via-"+s.gw.Address, nil)
+ c.Assert(err, check.IsNil)
+ // Token is already passed to
+ // ContainerHTTPProxy() call in s.userctx, but
+ // we also need to add an auth cookie to the
+ // http request: if the request gets passed
+ // through http (see forceProxyForTest), the
+ // target router will start with a fresh
+ // context and load tokens from the request.
+ req.AddCookie(&http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(arvadostest.ActiveTokenV2)),
+ })
+ handler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{
+ Target: target,
+ Request: req,
+ })
+ c.Assert(err, check.IsNil, comment)
+ rw := httptest.NewRecorder()
+ handler.ServeHTTP(rw, req)
+ resp := rw.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ if cookie := getCookie(resp, "arvados_container_uuid"); c.Check(cookie, check.NotNil) {
+ c.Check(cookie.Value, check.Equals, s.ctrUUID)
+ }
+ body, err := io.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(body), check.Matches, `handled `+method+` /via-.* with Host \Q`+vhost+`\E`)
+ }()
+ }
+ wg.Wait()
+}
+
+// Return the virtualhost (in the http request) and opts.Target that
+// lib/controller/router.Router will pass to ContainerHTTPProxy() when
+// Services.ContainerWebServices.ExternalURL is a wildcard like
+// "*.containers.example.com".
+func (s *ContainerGatewaySuite) vhostAndTargetForWildcard(c *check.C, targetUUID, targetPort string) (string, string) {
+ return targetUUID + "-" + targetPort + ".containers.example.com", fmt.Sprintf("%s-%s", targetUUID, targetPort)
+}
+
+// Return the virtualhost (in the http request) and opts.Target that
+// lib/controller/router.Router will pass to ContainerHTTPProxy() when
+// Services.ContainerWebServices.ExternalPortMin and
+// Services.ContainerWebServices.ExternalPortMax are positive, and
+// Services.ContainerWebServices.ExternalURL is not a wildcard.
+func (s *ContainerGatewaySuite) vhostAndTargetForDynamicPort(c *check.C, targetUUID, targetPort string) (string, string) {
+ exthost := "containers.example.com"
+ s.localdb.cluster.Services.ContainerWebServices.ExternalURL.Host = exthost
+ s.localdb.cluster.Services.ContainerWebServices.ExternalPortMin = testDynamicPortMin
+ s.localdb.cluster.Services.ContainerWebServices.ExternalPortMax = testDynamicPortMax
+ assignedPort := s.assignedExtPort.Add(1)
+ _, err := s.db.Exec(`insert into container_ports (external_port, container_uuid, container_port) values ($1, $2, $3)`,
+ assignedPort, targetUUID, targetPort)
+ c.Assert(err, check.IsNil)
+ return fmt.Sprintf("%s:%d", exthost, assignedPort), fmt.Sprintf(":%d", assignedPort)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_NoToken_Unlisted(c *check.C) {
+ s.testContainerHTTPProxyError(c, 0, "", s.vhostAndTargetForWildcard, http.StatusUnauthorized)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_NoToken_Private(c *check.C) {
+ s.testContainerHTTPProxyError(c, 2, "", s.vhostAndTargetForWildcard, http.StatusUnauthorized)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_InvalidToken(c *check.C) {
+ s.testContainerHTTPProxyError(c, 0, arvadostest.ActiveTokenV2+"bogus", s.vhostAndTargetForWildcard, http.StatusUnauthorized)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_AnonymousToken_Unlisted(c *check.C) {
+ s.testContainerHTTPProxyError(c, 0, arvadostest.AnonymousToken, s.vhostAndTargetForWildcard, http.StatusNotFound)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_AnonymousToken_Private(c *check.C) {
+ s.testContainerHTTPProxyError(c, 2, arvadostest.AnonymousToken, s.vhostAndTargetForWildcard, http.StatusNotFound)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_CRsDifferentUsers(c *check.C) {
+ rootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)
+ cr, err := s.localdb.ContainerRequestCreate(rootctx, s.reqCreateOptions)
+ defer s.localdb.ContainerRequestDelete(rootctx, arvados.DeleteOptions{UUID: cr.UUID})
+ c.Assert(err, check.IsNil)
+ c.Assert(cr.ContainerUUID, check.Equals, s.ctrUUID)
+ s.testContainerHTTPProxyError(c, 0, arvadostest.ActiveTokenV2, s.vhostAndTargetForWildcard, http.StatusForbidden)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_ContainerNotReadable(c *check.C) {
+ s.testContainerHTTPProxyError(c, 0, arvadostest.SpectatorToken, s.vhostAndTargetForWildcard, http.StatusNotFound)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxyError_DynamicPort(c *check.C) {
+ s.testContainerHTTPProxyError(c, 0, arvadostest.SpectatorToken, s.vhostAndTargetForDynamicPort, http.StatusNotFound)
+}
+
+func (s *ContainerGatewaySuite) testContainerHTTPProxyError(c *check.C, svcIdx int, token string, vhostAndTargetFunc func(*check.C, string, string) (string, string), expectCode int) {
+ _, svcPort, err := net.SplitHostPort(s.containerServices[svcIdx].Addr)
+ c.Assert(err, check.IsNil)
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, token)
+ vhost, target := vhostAndTargetFunc(c, s.ctrUUID, svcPort)
+ req, err := http.NewRequest("GET", "https://"+vhost+"/via-"+s.gw.Address, nil)
+ c.Assert(err, check.IsNil)
+ _, err = s.localdb.ContainerHTTPProxy(ctx, arvados.ContainerHTTPProxyOptions{
+ Target: target,
+ Request: req,
+ })
+ c.Check(err, check.NotNil)
+ var se httpserver.HTTPStatusError
+ c.Assert(errors.As(err, &se), check.Equals, true)
+ c.Check(se.HTTPStatus(), check.Equals, expectCode)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_CookieAuth(c *check.C) {
+ s.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, "GET", "/foobar")
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_CookieAuth_POST(c *check.C) {
+ s.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, "POST", "/foobar")
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth(c *check.C) {
+ s.testContainerHTTPProxyUsingCurl(c, 0, "", "GET", "/foobar?arvados_api_token="+arvadostest.ActiveTokenV2)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_Tunnel(c *check.C) {
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.testContainerHTTPProxyUsingCurl(c, 0, "", "GET", "/foobar?arvados_api_token="+arvadostest.ActiveTokenV2)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_ProxyTunnel(c *check.C) {
+ forceProxyForTest = true
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.testContainerHTTPProxyUsingCurl(c, 0, "", "GET", "/foobar?arvados_api_token="+arvadostest.ActiveTokenV2)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Anonymous(c *check.C) {
+ s.testContainerHTTPProxyUsingCurl(c, 1, "", "GET", "/foobar")
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Anonymous_OPTIONS(c *check.C) {
+ s.testContainerHTTPProxyUsingCurl(c, 1, "", "OPTIONS", "/foobar")
+}
+
+// Check other query parameters are preserved in the
+// redirect-with-cookie.
+//
+// Note the original request has "?baz&baz&..." and this changes to
+// "?baz=&baz=&..." in the redirect location. We trust the target
+// service won't be sensitive to this difference.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_PreserveQuery(c *check.C) {
+ body := s.testContainerHTTPProxyUsingCurl(c, 0, "", "GET", "/foobar?baz&baz&arvados_api_token="+arvadostest.ActiveTokenV2+"&waz=quux")
+ c.Check(body, check.Matches, `handled GET /foobar\?baz=&baz=&waz=quux with Host `+s.ctrUUID+`.*`)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Patch(c *check.C) {
+ body := s.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, "PATCH", "/foobar")
+ c.Check(body, check.Matches, `handled PATCH /foobar with Host `+s.ctrUUID+`.*`)
+}
+
+func (s *ContainerGatewaySuite) testContainerHTTPProxyUsingCurl(c *check.C, svcIdx int, cookietoken, method, path string) string {
+ _, svcPort, err := net.SplitHostPort(s.containerServices[svcIdx].Addr)
+ c.Assert(err, check.IsNil)
+
+ vhost, err := url.Parse(s.srv.URL)
+ c.Assert(err, check.IsNil)
+ controllerHost := vhost.Host
+ vhost.Host = s.ctrUUID + "-" + svcPort + ".containers.example.com"
+ target, err := vhost.Parse(path)
+ c.Assert(err, check.IsNil)
+
+ tempdir := c.MkDir()
+ cmd := exec.Command("curl")
+ if cookietoken != "" {
+ cmd.Args = append(cmd.Args, "--cookie", "arvados_api_token="+string(auth.EncodeTokenCookie([]byte(cookietoken))))
+ } else {
+ cmd.Args = append(cmd.Args, "--cookie-jar", filepath.Join(tempdir, "cookies.txt"))
+ }
+ if method != "GET" {
+ cmd.Args = append(cmd.Args, "--request", method)
+ }
+ cmd.Args = append(cmd.Args, "--silent", "--insecure", "--location", "--connect-to", vhost.Hostname()+":443:"+controllerHost, target.String())
+ cmd.Dir = tempdir
+ stdout, err := cmd.StdoutPipe()
+ c.Assert(err, check.Equals, nil)
+ cmd.Stderr = cmd.Stdout
+ c.Logf("cmd: %v", cmd.Args)
+ go cmd.Start()
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, stdout)
+ c.Check(err, check.Equals, nil)
+ err = cmd.Wait()
+ c.Check(err, check.Equals, nil)
+ c.Check(buf.String(), check.Matches, `handled `+method+` /.*`)
+ return buf.String()
+}
+
+// See testContainerHTTPProxy_ReusedPort(). These integration tests
+// use curl to check the redirect-with-cookie behavior when a request
+// arrives on a dynamically-assigned port and it has cookies
+// indicating that the client has previously accessed a different
+// container's web services on this same port, i.e., it is susceptible
+// to leaking cache/cookie/localstorage data from the previous
+// container's service to the current container's service.
+type testReusedPortCurl struct {
+ svcIdx int
+ method string
+ querytoken string
+ cookietoken string
+}
+
+// Reject non-GET requests. In principle we could 303 them, but in
+// the most obvious case (an AJAX request initiated by the previous
+// container's web application), delivering the request to the new
+// container would surely not be the intended behavior.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_Curl_RejectPOST(c *check.C) {
+ log := s.testContainerHTTPProxy_ReusedPort_Curl(c, testReusedPortCurl{
+ method: "POST",
+ cookietoken: arvadostest.ActiveTokenV2,
+ })
+ c.Check(log, check.Matches, `(?ms).*410 Gone.*`)
+ c.Check(log, check.Not(check.Matches), `(?ms).*handled POST.*`)
+ c.Check(log, check.Not(check.Matches), `(?ms).*Set-Cookie: .*`)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_Curl_WithoutToken_ClearApplicationCookie(c *check.C) {
+ log := s.testContainerHTTPProxy_ReusedPort_Curl(c, testReusedPortCurl{
+ svcIdx: 1,
+ method: "GET",
+ cookietoken: arvadostest.ActiveTokenV2,
+ })
+ c.Check(log, check.Matches, `(?ms).*HTTP/1.1 303 See Other.*`)
+ c.Check(log, check.Matches, `(?ms).*Set-Cookie: stale_cookie=.*`)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_Curl_WithToken_ClearApplicationCookie(c *check.C) {
+ log := s.testContainerHTTPProxy_ReusedPort_Curl(c, testReusedPortCurl{
+ method: "GET",
+ querytoken: arvadostest.ActiveTokenV2,
+ })
+ c.Check(log, check.Matches, `(?ms).*HTTP/1.1 303 See Other.*`)
+ c.Check(log, check.Matches, `(?ms).*Location: /foobar\r\n.*`)
+ c.Check(log, check.Matches, `(?ms).*Set-Cookie: stale_cookie=.*`)
+ c.Check(log, check.Matches, `(?ms).*handled GET.*`)
+ c.Check(log, check.Not(check.Matches), `(?ms).*handled GET.*handled GET.*`)
+}
+
+func (s *ContainerGatewaySuite) testContainerHTTPProxy_ReusedPort_Curl(c *check.C, t testReusedPortCurl) string {
+ _, svcPort, err := net.SplitHostPort(s.containerServices[t.svcIdx].Addr)
+ c.Assert(err, check.IsNil)
+
+ srvurl, err := url.Parse(s.srv.URL)
+ c.Assert(err, check.IsNil)
+ controllerHost := srvurl.Host
+
+ vhost, _ := s.vhostAndTargetForDynamicPort(c, s.ctrUUID, svcPort)
+ requrl := url.URL{
+ Scheme: "https",
+ Host: vhost,
+ Path: "/foobar",
+ }
+ if t.querytoken != "" {
+ requrl.RawQuery = "arvados_api_token=" + t.querytoken
+ }
+
+ // Initialize cookie jar.
+ //
+ // We can't use "--cookie" arguments to set individual cookies
+ // here, because curl doesn't clear/replace those when
+ // instructed by Set-Cookie headers in redirect responses.
+ tempdir := c.MkDir()
+ cookiejar := filepath.Join(tempdir, "cookies.txt")
+ c.Assert(ioutil.WriteFile(cookiejar, []byte(`
+containers.example.com FALSE / FALSE 0 arvados_container_uuid zzzzz-dz642-compltcontainer
+containers.example.com FALSE / FALSE 0 stale_cookie abcdefghij
+`), 0666), check.IsNil)
+
+ cmd := exec.Command("curl", "--cookie-jar", cookiejar, "--cookie", cookiejar)
+ if t.cookietoken != "" {
+ cmd.Args = append(cmd.Args, "--cookie", "arvados_api_token="+string(auth.EncodeTokenCookie([]byte(t.cookietoken))))
+ }
+ if t.method != "GET" {
+ cmd.Args = append(cmd.Args, "--request", t.method)
+ }
+ cmd.Args = append(cmd.Args, "--verbose", "--no-progress-meter", "--insecure", "--location", "--connect-to", requrl.Host+":"+controllerHost, requrl.String())
+ cmd.Dir = tempdir
+ c.Logf("cmd: %v", cmd.Args)
+ buf, _ := cmd.CombinedOutput()
+
+ return string(buf)
+}
+
+// Unit tests for clear-cookies-and-redirect behavior when the client
+// still has active cookies (and possibly client-side cache) from a
+// different container that used to be served on the same
+// dynamically-assigned port.
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_QueryToken(c *check.C) {
+ s.testContainerHTTPProxy_ReusedPort(c, arvadostest.ActiveTokenV2, "")
+}
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_CookieToken(c *check.C) {
+ s.testContainerHTTPProxy_ReusedPort(c, "", arvadostest.ActiveTokenV2)
+}
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_NoToken(c *check.C) {
+ s.testContainerHTTPProxy_ReusedPort(c, "", "")
+}
+func (s *ContainerGatewaySuite) testContainerHTTPProxy_ReusedPort(c *check.C, querytoken, cookietoken string) {
+ srv := s.containerServices[0]
+ method := "GET"
+ _, port, err := net.SplitHostPort(srv.Addr)
+ c.Assert(err, check.IsNil, check.Commentf("%s", srv.Addr))
+ vhost, target := s.vhostAndTargetForDynamicPort(c, s.ctrUUID, port)
+
+ var tokenCookie *http.Cookie
+ if cookietoken != "" {
+ tokenCookie = &http.Cookie{
+ Name: "arvados_api_token",
+ Value: string(auth.EncodeTokenCookie([]byte(cookietoken))),
+ }
+ }
+
+ initialURL := "https://" + vhost + "/via-" + s.gw.Address + "/preserve-path?preserve-param=preserve-value"
+ if querytoken != "" {
+ initialURL += "&arvados_api_token=" + querytoken
+ }
+ req, err := http.NewRequest(method, initialURL, nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Add("Cookie", "arvados_container_uuid=zzzzz-dz642-compltcontainer")
+ req.Header.Add("Cookie", "stale_cookie=abcdefghij")
+ if tokenCookie != nil {
+ req.Header.Add("Cookie", tokenCookie.String())
+ }
+ handler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{
+ Target: target,
+ Request: req,
+ })
+ c.Assert(err, check.IsNil)
+ rw := httptest.NewRecorder()
+ handler.ServeHTTP(rw, req)
+ resp := rw.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusSeeOther)
+ c.Logf("Received Location: %s", resp.Header.Get("Location"))
+ c.Logf("Received cookies: %v", resp.Cookies())
+ newTokenCookie := getCookie(resp, "arvados_api_token")
+ if querytoken != "" {
+ if c.Check(newTokenCookie, check.NotNil) {
+ c.Check(newTokenCookie.Expires.IsZero(), check.Equals, true)
+ }
+ }
+ if newTokenCookie != nil {
+ tokenCookie = newTokenCookie
+ }
+ if staleCookie := getCookie(resp, "stale_cookie"); c.Check(staleCookie, check.NotNil) {
+ c.Check(staleCookie.Expires.Before(time.Now()), check.Equals, true)
+ c.Check(staleCookie.Value, check.Equals, "")
+ }
+ if ctrCookie := getCookie(resp, "arvados_container_uuid"); c.Check(ctrCookie, check.NotNil) {
+ c.Check(ctrCookie.Expires.Before(time.Now()), check.Equals, true)
+ c.Check(ctrCookie.Value, check.Equals, "")
+ }
+ c.Check(resp.Header.Get("Clear-Site-Data"), check.Equals, `"cache", "storage"`)
+
+ req, err = http.NewRequest(method, resp.Header.Get("Location"), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Add("Cookie", tokenCookie.String())
+ handler, err = s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{
+ Target: target,
+ Request: req,
+ })
+ c.Assert(err, check.IsNil)
+ rw = httptest.NewRecorder()
+ handler.ServeHTTP(rw, req)
+ resp = rw.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ if ctrCookie := getCookie(resp, "arvados_container_uuid"); c.Check(ctrCookie, check.NotNil) {
+ c.Check(ctrCookie.Expires.IsZero(), check.Equals, true)
+ c.Check(ctrCookie.Value, check.Equals, s.ctrUUID)
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ c.Check(string(body), check.Matches, `handled GET /via-localhost:\d+/preserve-path\?preserve-param=preserve-value with Host containers.example.com:\d+`)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_PublishedPortByName_ProxyTunnel(c *check.C) {
+ forceProxyForTest = true
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.testContainerHTTPProxy_PublishedPortByName(c)
+}
+
+func (s *ContainerGatewaySuite) TestContainerHTTPProxy_PublishedPortByName(c *check.C) {
+ s.testContainerHTTPProxy_PublishedPortByName(c)
+}
+
+func (s *ContainerGatewaySuite) testContainerHTTPProxy_PublishedPortByName(c *check.C) {
+ srv := s.containerServices[1]
+ _, port, _ := net.SplitHostPort(srv.Addr)
+ portnum, err := strconv.Atoi(port)
+ c.Assert(err, check.IsNil)
+ namelink, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "link_class": "published_port",
+ "name": "warthogfacedbuffoon",
+ "head_uuid": s.reqUUID,
+ "properties": map[string]interface{}{
+ "port": portnum}}})
+ c.Assert(err, check.IsNil)
+ defer s.localdb.LinkDelete(s.userctx, arvados.DeleteOptions{UUID: namelink.UUID})
+
+ vhost := namelink.Name + ".containers.example.com"
+ req, err := http.NewRequest("METHOD", "https://"+vhost+"/path", nil)
+ c.Assert(err, check.IsNil)
+ // Token is already passed to ContainerHTTPProxy() call in
+ // s.userctx, but we also need to add an auth cookie to the
+ // http request: if the request gets passed through http (see
+ // forceProxyForTest), the target router will start with a
+ // fresh context and load tokens from the request.
+ req.AddCookie(&http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(arvadostest.ActiveTokenV2)),
+ })
+ handler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{
+ Target: namelink.Name,
+ Request: req,
+ })
+ c.Assert(err, check.IsNil)
+ rw := httptest.NewRecorder()
+ handler.ServeHTTP(rw, req)
+ resp := rw.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ body, err := io.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(body), check.Matches, `handled METHOD /path with Host \Q`+vhost+`\E`)
+}
+
func (s *ContainerGatewaySuite) setupLogCollection(c *check.C) {
files := map[string]string{
"stderr.txt": "hello world\n",
@@ -285,8 +848,6 @@ func (s *ContainerGatewaySuite) saveLogAndCloseGateway(c *check.C) {
func (s *ContainerGatewaySuite) TestContainerRequestLogViaTunnel(c *check.C) {
forceProxyForTest = true
- defer func() { forceProxyForTest = false }()
-
s.gw = s.setupGatewayWithTunnel(c)
s.setupLogCollection(c)
@@ -295,9 +856,6 @@ func (s *ContainerGatewaySuite) TestContainerRequestLogViaTunnel(c *check.C) {
if broken {
delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
- } else {
- s.cluster.Services.Controller.InternalURLs[*forceInternalURLForTest] = arvados.ServiceInstance{}
- defer delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
}
r, err := http.NewRequestWithContext(s.userctx, "GET", "https://controller.example/arvados/v1/container_requests/"+s.reqUUID+"/log/"+s.ctrUUID+"/stderr.txt", nil)
@@ -557,15 +1115,15 @@ func (s *ContainerGatewaySuite) TestConnect(c *check.C) {
c.Check(ctr.InteractiveSessionStarted, check.Equals, true)
}
-func (s *ContainerGatewaySuite) TestConnectFail(c *check.C) {
- c.Log("trying with no token")
+func (s *ContainerGatewaySuite) TestConnectFail_NoToken(c *check.C) {
ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, "")
_, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Check(err, check.ErrorMatches, `.* 401 .*`)
+}
- c.Log("trying with anonymous token")
- ctx = ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AnonymousToken)
- _, err = s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
+func (s *ContainerGatewaySuite) TestConnectFail_AnonymousToken(c *check.C) {
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AnonymousToken)
+ _, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Check(err, check.ErrorMatches, `.* 404 .*`)
}
@@ -596,17 +1154,12 @@ func (s *ContainerGatewaySuite) TestCreateTunnel(c *check.C) {
func (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyOK(c *check.C) {
forceProxyForTest = true
- defer func() { forceProxyForTest = false }()
- s.cluster.Services.Controller.InternalURLs[*forceInternalURLForTest] = arvados.ServiceInstance{}
- defer delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
s.testConnectThroughTunnel(c, "")
}
func (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyError(c *check.C) {
forceProxyForTest = true
- defer func() { forceProxyForTest = false }()
- // forceInternalURLForTest will not be usable because it isn't
- // listed in s.cluster.Services.Controller.InternalURLs
+ delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
s.testConnectThroughTunnel(c, `.*tunnel endpoint is invalid.*`)
}
@@ -704,3 +1257,12 @@ func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectError
c.Check(err, check.IsNil)
c.Check(ctr.InteractiveSessionStarted, check.Equals, true)
}
+
+func getCookie(resp *http.Response, name string) *http.Cookie {
+ for _, cookie := range resp.Cookies() {
+ if cookie.Name == name {
+ return cookie
+ }
+ }
+ return nil
+}
diff --git a/lib/controller/localdb/container_request.go b/lib/controller/localdb/container_request.go
index 0234ee8fa6..fd244d6fb9 100644
--- a/lib/controller/localdb/container_request.go
+++ b/lib/controller/localdb/container_request.go
@@ -70,7 +70,7 @@ func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, opts arva
return ret, err
}
if cr.ContainerUUID == "" {
- ret.SchedulingStatus = "no container assigned"
+ ret.SchedulingStatus = "No container is assigned."
return ret, nil
}
// We use admin credentials to get the container record so we
@@ -142,6 +142,6 @@ func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, opts arva
// Scheduling status does not apply.
return ret, nil
}
- ret.SchedulingStatus = "waiting for dispatch"
+ ret.SchedulingStatus = "Waiting in queue."
return ret, nil
}
diff --git a/lib/controller/localdb/docker_test.go b/lib/controller/localdb/docker_test.go
index 90c98b7d57..66bcad00f2 100644
--- a/lib/controller/localdb/docker_test.go
+++ b/lib/controller/localdb/docker_test.go
@@ -13,29 +13,21 @@ import (
check "gopkg.in/check.v1"
)
-type pgproxy struct {
+type tcpProxy struct {
net.Listener
}
-// newPgProxy sets up a TCP proxy, listening on all interfaces, that
-// forwards all connections to the cluster's PostgreSQL server. This
-// allows the caller to run a docker container that can connect to a
-// postgresql instance that listens on the test host's loopback
-// interface.
+// newTCPProxy sets up a TCP proxy that forwards all connections to the
+// given host and port. This allows the caller to run a docker container that
+// can connect to cluster service on the test host's loopback interface.
//
-// Caller is responsible for calling Close() on the returned pgproxy.
-func newPgProxy(c *check.C, cluster *arvados.Cluster) *pgproxy {
- host := cluster.PostgreSQL.Connection["host"]
- if host == "" {
- host = "localhost"
- }
- port := cluster.PostgreSQL.Connection["port"]
- if port == "" {
- port = "5432"
- }
+// listenAddr is the IP address of the interface to listen on. Pass an empty
+// string to listen on all interfaces.
+//
+// Caller is responsible for calling Close() on the returned tcpProxy.
+func newTCPProxy(c *check.C, listenAddr, host, port string) *tcpProxy {
target := net.JoinHostPort(host, port)
-
- ln, err := net.Listen("tcp", ":")
+ ln, err := net.Listen("tcp", net.JoinHostPort(listenAddr, ""))
c.Assert(err, check.IsNil)
go func() {
for {
@@ -45,7 +37,7 @@ func newPgProxy(c *check.C, cluster *arvados.Cluster) *pgproxy {
}
c.Assert(err, check.IsNil)
go func() {
- c.Logf("pgproxy accepted connection from %s", downstream.RemoteAddr().String())
+ c.Logf("tcpProxy accepted connection from %s", downstream.RemoteAddr().String())
defer downstream.Close()
upstream, err := net.Dial("tcp", target)
if err != nil {
@@ -58,11 +50,36 @@ func newPgProxy(c *check.C, cluster *arvados.Cluster) *pgproxy {
}()
}
}()
- c.Logf("pgproxy listening at %s", ln.Addr().String())
- return &pgproxy{Listener: ln}
+ c.Logf("tcpProxy listening at %s", ln.Addr().String())
+ return &tcpProxy{Listener: ln}
}
-func (proxy *pgproxy) Port() string {
+func (proxy *tcpProxy) Port() string {
_, port, _ := net.SplitHostPort(proxy.Addr().String())
return port
}
+
+// newPgProxy sets up a tcpProxy for the cluster's PostgreSQL database.
+func newPgProxy(c *check.C, cluster *arvados.Cluster, listenAddr string) *tcpProxy {
+ host := cluster.PostgreSQL.Connection["host"]
+ if host == "" {
+ host = "localhost"
+ }
+ port := cluster.PostgreSQL.Connection["port"]
+ if port == "" {
+ port = "5432"
+ }
+ return newTCPProxy(c, listenAddr, host, port)
+}
+
+// newInternalProxy sets up a tcpProxy for an InternalURL of the given service.
+func newInternalProxy(c *check.C, service arvados.Service, listenAddr string) *tcpProxy {
+ for intURL, _ := range service.InternalURLs {
+ host, port, err := net.SplitHostPort(intURL.Host)
+ if err == nil && port != "" {
+ return newTCPProxy(c, listenAddr, host, port)
+ }
+ }
+ c.Fatal("no valid InternalURLs found for service")
+ return nil
+}
diff --git a/lib/controller/localdb/log_activity.go b/lib/controller/localdb/log_activity.go
index 9c9660aec4..9f9674e08b 100644
--- a/lib/controller/localdb/log_activity.go
+++ b/lib/controller/localdb/log_activity.go
@@ -13,10 +13,15 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
)
+var loggedLogActivityDisabled = false
+
func (conn *Conn) logActivity(ctx context.Context) {
p := conn.cluster.Users.ActivityLoggingPeriod.Duration()
if p < 1 {
- ctxlog.FromContext(ctx).Debug("logActivity disabled by config")
+ if !loggedLogActivityDisabled {
+ ctxlog.FromContext(ctx).Debug("logActivity disabled by config")
+ loggedLogActivityDisabled = true
+ }
return
}
user, _, err := ctrlctx.CurrentAuth(ctx)
diff --git a/lib/controller/localdb/login_docker_test.go b/lib/controller/localdb/login_docker_test.go
new file mode 100644
index 0000000000..3baa86b603
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test.go
@@ -0,0 +1,298 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LoginDockerSuite{})
+
+// LoginDockerSuite is an integration test of controller's different Login
+// methods. Each test creates a different Login configuration and runs
+// controller in a Docker container with it. It runs other Docker containers
+// for supporting services.
+type LoginDockerSuite struct {
+ localdbSuite
+ tmpdir string
+ netName string
+ netAddr string
+ pgProxy *tcpProxy
+ railsProxy *tcpProxy
+}
+
+func (s *LoginDockerSuite) setUpDockerNetwork() (string, error) {
+ netName := "arvados-net-" + path.Base(path.Dir(s.tmpdir))
+ cmd := exec.Command("docker", "network", "create", netName)
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+ return netName, nil
+}
+
+// Run cmd and read stdout looking for an IP address on a line by itself.
+// Return the last one found.
+func (s *LoginDockerSuite) ipFromCmd(cmd *exec.Cmd) (string, error) {
+ cmd.Stderr = os.Stderr
+ out, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+ lines := bytes.Split(out, []byte{'\n'})
+ slices.Reverse(lines)
+ for _, line := range lines {
+ if ip := net.ParseIP(string(line)); ip != nil {
+ return ip.String(), nil
+ }
+ }
+ return "", fmt.Errorf("no IP address found in the output of %v", cmd)
+}
+
+// SetUpSuite creates a Docker network, starts an openldap server in it, and
+// creates user account fixtures in LDAP.
+// We used to use the LDAP server for multiple tests. We don't currently, but
+// there are pros and cons to starting it here vs. in each individaul test, so
+// it's staying here for now.
+func (s *LoginDockerSuite) SetUpSuite(c *check.C) {
+ s.localdbSuite.SetUpSuite(c)
+ s.tmpdir = c.MkDir()
+ var err error
+ s.netName, err = s.setUpDockerNetwork()
+ c.Assert(err, check.IsNil)
+ s.netAddr, err = s.ipFromCmd(exec.Command("docker", "network", "inspect",
+ "--format", "{{(index .IPAM.Config 0).Gateway}}", s.netName))
+ c.Assert(err, check.IsNil)
+ setup := exec.Command("login_docker_test/setup_suite.sh", s.netName, s.tmpdir)
+ setup.Stderr = os.Stderr
+ err = setup.Run()
+ c.Assert(err, check.IsNil)
+}
+
+// TearDownSuite stops all containers running on the Docker network we set up,
+// then deletes the network itself.
+func (s *LoginDockerSuite) TearDownSuite(c *check.C) {
+ if s.netName != "" {
+ cmd := exec.Command("login_docker_test/teardown_suite.sh", s.netName)
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ c.Check(err, check.IsNil)
+ }
+ s.localdbSuite.TearDownSuite(c)
+}
+
+// Create a test cluster configuration in the test temporary directory.
+// Update it to use the current PostgreSQL and RailsAPI proxies.
+func (s *LoginDockerSuite) setUpConfig(c *check.C) {
+ src, err := os.Open(os.Getenv("ARVADOS_CONFIG"))
+ c.Assert(err, check.IsNil)
+ defer src.Close()
+ dst, err := os.Create(path.Join(s.tmpdir, "arvados.yml"))
+ c.Assert(err, check.IsNil)
+ _, err = io.Copy(dst, src)
+ closeErr := dst.Close()
+ c.Assert(err, check.IsNil)
+ c.Assert(closeErr, check.IsNil)
+
+ pgconn := map[string]interface{}{
+ "host": s.netAddr,
+ "port": s.pgProxy.Port(),
+ }
+ err = s.updateConfig(".Clusters.zzzzz.PostgreSQL.Connection |= (. * $arg)", pgconn)
+ c.Assert(err, check.IsNil)
+ intVal := make(map[string]string)
+ intURLs := make(map[string]interface{})
+ railsURL := "https://" + net.JoinHostPort(s.netAddr, s.railsProxy.Port())
+ intURLs[railsURL] = intVal
+ err = s.updateConfig(".Clusters.zzzzz.Services.RailsAPI.InternalURLs = $arg", intURLs)
+ c.Assert(err, check.IsNil)
+ intURLs = make(map[string]interface{})
+ intURLs["http://0.0.0.0:80"] = intVal
+ err = s.updateConfig(".Clusters.zzzzz.Services.Controller.InternalURLs = $arg", intURLs)
+ c.Assert(err, check.IsNil)
+}
+
+// Update the test cluster configuration with the given yq expression.
+// The expression can use `$arg` to refer to the object passed in as `arg`.
+func (s *LoginDockerSuite) updateConfig(expr string, arg map[string]interface{}) error {
+ jsonArg, err := json.Marshal(arg)
+ if err != nil {
+ return err
+ }
+ cmd := exec.Command("yq", "-yi",
+ "--argjson", "arg", string(jsonArg),
+ expr, path.Join(s.tmpdir, "arvados.yml"))
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+// Update the test cluster configuration to use the named login method.
+func (s *LoginDockerSuite) enableLogin(key string) error {
+ login := make(map[string]interface{})
+ login["Test"] = map[string]bool{"Enable": false}
+ login[key] = map[string]bool{"Enable": true}
+ return s.updateConfig(".Clusters.zzzzz.Login |= (. * $arg)", login)
+}
+
+// SetUpTest does all the common preparation for a controller test container:
+// it creates TCP proxies for PostgreSQL and RailsAPI on the test host, then
+// writes a new Arvados cluster configuration pointed at those for servers to
+// use.
+func (s *LoginDockerSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
+ s.pgProxy = newPgProxy(c, s.cluster, s.netAddr)
+ s.railsProxy = newInternalProxy(c, s.cluster.Services.RailsAPI, s.netAddr)
+ s.setUpConfig(c)
+}
+
+// TearDownTest looks for the `controller.cid` file created when we start the
+// test container. If found, it stops that container and deletes the file.
+// Then it closes the TCP proxies created by SetUpTest.
+func (s *LoginDockerSuite) TearDownTest(c *check.C) {
+ cidPath := path.Join(s.tmpdir, "controller.cid")
+ if cid, err := os.ReadFile(cidPath); err == nil {
+ cmd := exec.Command("docker", "stop", strings.TrimSpace(string(cid)))
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ c.Check(err, check.IsNil)
+ }
+ if err := os.Remove(cidPath); err != nil {
+ c.Check(os.IsNotExist(err), check.Equals, true)
+ }
+ s.railsProxy.Close()
+ s.pgProxy.Close()
+ s.localdbSuite.TearDownTest(c)
+}
+
+func (s *LoginDockerSuite) startController(args ...string) (*url.URL, error) {
+ args = append([]string{s.netName, s.tmpdir}, args...)
+ cmd := exec.Command("login_docker_test/start_controller_container.sh", args...)
+ ip, err := s.ipFromCmd(cmd)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: ip,
+ }, nil
+}
+
+func (s *LoginDockerSuite) parseResponse(resp *http.Response, body any) error {
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode < 400 {
+ return json.Unmarshal(respBody, body)
+ }
+ var errResp struct {
+ Errors []string
+ }
+ err = json.Unmarshal(respBody, &errResp)
+ if err != nil {
+ return fmt.Errorf("%s with malformed JSON response: %w", resp.Status, err)
+ } else if len(errResp.Errors) == 0 {
+ return fmt.Errorf("%s with no Errors in response", resp.Status)
+ } else {
+ return fmt.Errorf("%s: %s", resp.Status, strings.Join(errResp.Errors, ":"))
+ }
+}
+
+func (s *LoginDockerSuite) authenticate(server *url.URL, username, password string) (*arvados.APIClientAuthorization, error) {
+ reqURL := server.JoinPath("/arvados/v1/users/authenticate").String()
+ reqValues := url.Values{
+ "username": {username},
+ "password": {password},
+ }
+ resp, err := http.PostForm(reqURL, reqValues)
+ if err != nil {
+ return nil, err
+ }
+ token := &arvados.APIClientAuthorization{}
+ err = s.parseResponse(resp, token)
+ return token, err
+}
+
+func (s *LoginDockerSuite) getCurrentUser(server *url.URL, token string) (*arvados.User, error) {
+ reqURL := server.JoinPath("/arvados/v1/users/current").String()
+ req, err := http.NewRequest("GET", reqURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("Authorization", "Bearer "+token)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ user := &arvados.User{}
+ err = s.parseResponse(resp, user)
+ return user, err
+}
+
+func (s *LoginDockerSuite) TestLoginPAM(c *check.C) {
+ err := s.enableLogin("PAM")
+ c.Assert(err, check.IsNil)
+ setupPath, err := filepath.Abs("login_docker_test/setup_pam_test.sh")
+ c.Assert(err, check.IsNil)
+ arvURL, err := s.startController("-v", setupPath+":/setup.sh:ro")
+ c.Assert(err, check.IsNil)
+
+ _, err = s.authenticate(arvURL, "foo-bar", "nosecret")
+ c.Check(err, check.ErrorMatches,
+ `401 Unauthorized: PAM: Authentication failure \(with username "foo-bar" and password\)`)
+
+ _, err = s.authenticate(arvURL, "expired", "secret")
+ c.Check(err, check.ErrorMatches,
+ `401 Unauthorized: PAM: Authentication failure; "Your account has expired; please contact your system administrator\."`)
+
+ aca, err := s.authenticate(arvURL, "foo-bar", "secret")
+ if c.Check(err, check.IsNil) {
+ user, err := s.getCurrentUser(arvURL, aca.TokenV2())
+ if c.Check(err, check.IsNil) {
+ // Check PAMDefaultEmailDomain was propagated as expected
+ c.Check(user.Email, check.Equals, "foo-bar@example.com")
+ }
+ }
+}
+
+func (s *LoginDockerSuite) TestLoginLDAPBuiltin(c *check.C) {
+ err := s.enableLogin("LDAP")
+ c.Assert(err, check.IsNil)
+ arvURL, err := s.startController()
+ c.Assert(err, check.IsNil)
+
+ _, err = s.authenticate(arvURL, "foo-bar", "nosecret")
+ c.Check(err, check.ErrorMatches,
+ `401 Unauthorized: LDAP: Authentication failure \(with username "foo-bar" and password\)`)
+
+ aca, err := s.authenticate(arvURL, "foo-bar", "secret")
+ if c.Check(err, check.IsNil) {
+ user, err := s.getCurrentUser(arvURL, aca.TokenV2())
+ if c.Check(err, check.IsNil) {
+ // User fields come from LDAP attributes
+ c.Check(user.FirstName, check.Equals, "Foo")
+ c.Check(user.LastName, check.Equals, "Bar")
+ // "-" character removed by RailsAPI
+ c.Check(user.Username, check.Equals, "foobar")
+ c.Check(user.Email, check.Equals, "foo-bar-baz@example.com")
+ }
+ }
+}
diff --git a/lib/controller/localdb/login_docker_test/add_example_user.ldif b/lib/controller/localdb/login_docker_test/add_example_user.ldif
new file mode 100644
index 0000000000..6e94e87083
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/add_example_user.ldif
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+dn: cn=bar,dc=example,dc=org
+objectClass: posixGroup
+objectClass: top
+cn: bar
+gidNumber: 11111
+description: "Example group 'bar'"
+
+dn: uid=foo-bar,dc=example,dc=org
+uid: foo-bar
+cn: "Foo Bar"
+givenName: Foo
+sn: Bar
+mail: foo-bar-baz@example.com
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: -1
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11111
+gidNumber: 11111
+homeDirectory: /home/foo-bar
+userPassword: ${passwordhash}
+
+dn: uid=expired,dc=example,dc=org
+uid: expired
+cn: "Exp Ired"
+givenName: Exp
+sn: Ired
+mail: expired@example.com
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: 180
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11112
+gidNumber: 11111
+homeDirectory: /home/expired
+userPassword: ${passwordhash}
diff --git a/lib/controller/localdb/login_docker_test/run_controller.sh b/lib/controller/localdb/login_docker_test/run_controller.sh
new file mode 100755
index 0000000000..64964ef321
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/run_controller.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+# This script is the entrypoint for test containers. If the test mounts a
+# /setup.sh script in the container, it runs that first, then starts the
+# controller.
+
+set -e
+set -u
+set -o pipefail
+
+if [[ -e /setup.sh ]]; then
+ . /setup.sh
+fi
+exec arvados-server controller
diff --git a/lib/controller/localdb/login_docker_test/setup_pam_test.sh b/lib/controller/localdb/login_docker_test/setup_pam_test.sh
new file mode 100755
index 0000000000..8a76455a18
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/setup_pam_test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+# This script should be mounted in the PAM test controller at /setup.sh.
+# It creates the user account fixtures necessary for the test in passwd.
+
+set -e
+set -u
+set -o pipefail
+
+useradd --no-create-home foo-bar
+useradd --no-create-home expired
+chpasswd <&2 "don't know what Docker image corresponds to $NAME $VERSION"
+ exit 3 # EXIT_NOTIMPLEMENTED
+ ;;
+esac
+# Pull the image if we don't have it already
+docker run --rm "$controller_image" true
+echo "$controller_image" >"$tmpdir/controller_image"
+
+go build -o "${tmpdir}" ../../../../cmd/arvados-server
+
+docker run --rm --detach \
+ --name=arvados-test-openldap \
+ --network="$net_name" \
+ bitnami/openldap:2.6
+
+awk -v passhash="$(docker exec -i arvados-test-openldap slappasswd -s "secret")" -- '
+($1 == "userPassword:") { $2 = passhash; }
+{ print; }
+' add_example_user.ldif >"$tmpdir/add_example_user.ldif"
+
+docker run --rm \
+ --entrypoint=/setup_suite_users.sh \
+ --network="$net_name" \
+ -v "$PWD/setup_suite_users.sh":/setup_suite_users.sh:ro \
+ -v "${tmpdir}/add_example_user.ldif":/add_example_user.ldif:ro \
+ bitnami/openldap:2.6
diff --git a/lib/controller/localdb/login_docker_test/setup_suite_users.sh b/lib/controller/localdb/login_docker_test/setup_suite_users.sh
new file mode 100755
index 0000000000..ed1f238cca
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/setup_suite_users.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+# This script is the entrypoint for a container run by setup_suite.sh to create
+# user account fixtures in LDAP.
+
+set -e
+set -u
+set -o pipefail
+
+result=0
+for tries in $(seq 9 -1 0); do
+ ldapadd \
+ -H ldap://arvados-test-openldap:1389/ \
+ -D cn=admin,dc=example,dc=org \
+ -w adminpassword \
+ -f /add_example_user.ldif ||
+ result=$?
+ # ldapadd uses exit code 68 to mean "user already exists."
+ if [[ "$result" = 0 ]] || [[ "$result" = 68 ]]; then
+ exit 0
+ elif [[ "$tries" != 0 ]]; then
+ sleep 1
+ fi
+done
+
+echo 'error: failed to add user entry' >&2
+exit "$result"
diff --git a/lib/controller/localdb/login_docker_test/start_controller_container.sh b/lib/controller/localdb/login_docker_test/start_controller_container.sh
new file mode 100755
index 0000000000..9fef435d1e
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/start_controller_container.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+# This script starts a test controller container, waits for it to respond, then
+# writes its IP address on stdout. It requires the Docker network name and test
+# temporary directory path as arguments. Additional arguments are passed through
+# to `docker run` so you can, e.g., mount additional files like `/setup.sh`.
+
+set -e
+set -u
+set -o pipefail
+
+net_name="$1"; shift
+tmpdir="$1"; shift
+selfdir="$(readlink -e "$(dirname "$0")")"
+
+docker run --detach --rm \
+ --cidfile="$tmpdir/controller.cid" \
+ --entrypoint=/run.sh \
+ --network="$net_name" \
+ -v "${tmpdir}/arvados.yml":/etc/arvados/config.yml:ro \
+ -v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
+ -v "$(readlink -e ../../..)":/arvados:ro \
+ -v "${selfdir}/run_controller.sh":/run.sh:ro \
+ "$@" "$(cat "$tmpdir/controller_image")"
+
+cont_addr="$(xargs -a "$tmpdir/controller.cid" docker inspect --format "{{(index .NetworkSettings.Networks \"${net_name}\").IPAddress}}")"
+cont_url="http://${cont_addr}/arvados/v1/config"
+for tries in $(seq 19 -1 0); do
+ if curl -fsL "$cont_url" >/dev/null; then
+ # Write the container address for the Go test code to record.
+ # We had to get it here anyway so we might as well pass it up.
+ echo "$cont_addr"
+ exit
+ elif [[ "$tries" != 0 ]]; then
+ sleep 1
+ fi
+done
+
+echo "error: controller did not come up" >&2
+exit 7 # EXIT_NOTRUNNING
diff --git a/lib/controller/localdb/login_docker_test/teardown_suite.sh b/lib/controller/localdb/login_docker_test/teardown_suite.sh
new file mode 100755
index 0000000000..e3016f9dc6
--- /dev/null
+++ b/lib/controller/localdb/login_docker_test/teardown_suite.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+# This script stops all Docker containers running on the named network, then
+# removes the network itself.
+
+set -e
+set -u
+set -o pipefail
+
+net_name="$1"; shift
+
+docker network inspect "$net_name" |
+ jq -r 'map(.Containers | keys) | flatten | join("\n")' |
+ xargs -r -d\\n docker stop
+docker network rm "$net_name"
diff --git a/lib/controller/localdb/login_ldap_docker_test.go b/lib/controller/localdb/login_ldap_docker_test.go
deleted file mode 100644
index 3cbf14fe0b..0000000000
--- a/lib/controller/localdb/login_ldap_docker_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package localdb
-
-import (
- "os"
- "os/exec"
- "testing"
-
- check "gopkg.in/check.v1"
-)
-
-func haveDocker() bool {
- _, err := exec.Command("docker", "info").CombinedOutput()
- return err == nil
-}
-
-func (s *LDAPSuite) TestLoginLDAPViaPAM(c *check.C) {
- if testing.Short() {
- c.Skip("skipping docker test in short mode")
- }
- if !haveDocker() {
- c.Skip("skipping docker test because docker is not available")
- }
- pgproxy := newPgProxy(c, s.cluster)
- defer pgproxy.Close()
-
- cmd := exec.Command("bash", "login_ldap_docker_test.sh")
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
- cmd.Env = append(os.Environ(), "config_method=pam", "pgport="+pgproxy.Port())
- err := cmd.Run()
- c.Check(err, check.IsNil)
-}
-
-func (s *LDAPSuite) TestLoginLDAPBuiltin(c *check.C) {
- if testing.Short() {
- c.Skip("skipping docker test in short mode")
- }
- if !haveDocker() {
- c.Skip("skipping docker test because docker is not available")
- }
- pgproxy := newPgProxy(c, s.cluster)
- defer pgproxy.Close()
-
- cmd := exec.Command("bash", "login_ldap_docker_test.sh")
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
- cmd.Env = append(os.Environ(), "config_method=ldap", "pgport="+pgproxy.Port())
- err := cmd.Run()
- c.Check(err, check.IsNil)
-}
diff --git a/lib/controller/localdb/login_ldap_docker_test.sh b/lib/controller/localdb/login_ldap_docker_test.sh
deleted file mode 100755
index c539e0e60b..0000000000
--- a/lib/controller/localdb/login_ldap_docker_test.sh
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/bin/bash
-
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# This script demonstrates using LDAP for Arvados user authentication.
-#
-# It configures arvados controller in a docker container, optionally
-# with pam_ldap(5) configured to authenticate against an OpenLDAP
-# server in a second docker container.
-#
-# After adding a "foo" user entry, it uses curl to check that the
-# Arvados controller's login endpoint accepts the "foo" account
-# username/password and rejects invalid credentials.
-#
-# It is intended to be run inside .../build/run-tests.sh (in
-# interactive mode: "test lib/controller/localdb -tags=docker
-# -check.f=LDAP -check.vv"). It assumes ARVADOS_TEST_API_HOST points
-# to a RailsAPI server and the desired version of arvados-server is
-# installed in $GOPATH/bin.
-
-set -e -o pipefail
-
-debug=/dev/null
-if [[ -n ${ARVADOS_DEBUG} ]]; then
- debug=/dev/stderr
- set -x
-fi
-
-case "${config_method}" in
- pam | ldap)
- ;;
- *)
- echo >&2 "\$config_method env var must be 'pam' or 'ldap'"
- exit 1
- ;;
-esac
-
-hostname="$(hostname)"
-tmpdir="$(mktemp -d)"
-cleanup() {
- trap - ERR
- rm -r ${tmpdir}
- for h in ${ldapctr} ${ctrlctr}; do
- if [[ -n ${h} ]]; then
- docker kill ${h}
- fi
- done
-}
-trap cleanup ERR
-
-if [[ -z "$(docker image ls -q osixia/openldap:1.3.0)" ]]; then
- echo >&2 "Pulling docker image for ldap server"
- docker pull osixia/openldap:1.3.0
-fi
-
-ldapctr=ldap-${RANDOM}
-echo >&2 "Starting ldap server in docker container ${ldapctr}"
-docker run --rm --detach \
- -p 389 -p 636 \
- --name=${ldapctr} \
- osixia/openldap:1.3.0
-docker logs --follow ${ldapctr} 2>$debug >$debug &
-ldaphostports=$(docker port ${ldapctr} 389/tcp)
-ldapport=${ldaphostports##*:}
-ldapurl="ldap://${hostname}:${ldapport}"
-passwordhash="$(docker exec -i ${ldapctr} slappasswd -s "secret")"
-
-# These are the default admin credentials for osixia/openldap:1.3.0
-adminuser=admin
-adminpassword=admin
-
-cat >"${tmpdir}/zzzzz.yml" <>"${tmpdir}/zzzzz.yml" <>"${tmpdir}/zzzzz.yml" <&2 "${tmpdir}/zzzzz.yml"
-
-cat >"${tmpdir}/pam_ldap.conf" <"${tmpdir}/add_example_user.ldif" <&2 "Adding example user entry user=foo-bar pass=secret (retrying until server comes up)"
-docker run --rm --entrypoint= \
- -v "${tmpdir}/add_example_user.ldif":/add_example_user.ldif:ro \
- osixia/openldap:1.3.0 \
- bash -c "for f in \$(seq 1 5); do if ldapadd -H '${ldapurl}' -D 'cn=${adminuser},dc=example,dc=org' -w '${adminpassword}' -f /add_example_user.ldif; then exit 0; else sleep 2; fi; done; echo 'failed to add user entry'; exit 1"
-
-echo >&2 "Building arvados controller binary to run in container"
-go build -o "${tmpdir}" ../../../cmd/arvados-server
-
-ctrlctr=ctrl-${RANDOM}
-echo >&2 "Starting arvados controller in docker container ${ctrlctr}"
-docker run --detach --rm --name=${ctrlctr} \
- -p 9999 \
- -v "${tmpdir}/pam_ldap.conf":/etc/pam_ldap.conf:ro \
- -v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
- -v "${tmpdir}/zzzzz.yml":/etc/arvados/config.yml:ro \
- -v $(realpath "${PWD}/../../.."):/arvados:ro \
- debian:11 \
- bash -c "${setup_pam_ldap:-true} && arvados-server controller"
-docker logs --follow ${ctrlctr} 2>$debug >$debug &
-ctrlhostports=$(docker port ${ctrlctr} 9999/tcp)
-ctrlport=${ctrlhostports##*:}
-
-echo >&2 "Waiting for arvados controller to come up..."
-for f in $(seq 1 20); do
- if curl -s "http://0.0.0.0:${ctrlport}/arvados/v1/config" >/dev/null; then
- break
- else
- sleep 1
- fi
- echo -n >&2 .
-done
-echo >&2
-echo >&2 "Arvados controller is up at http://0.0.0.0:${ctrlport}"
-
-check_contains() {
- resp="${1}"
- str="${2}"
- if ! echo "${resp}" | fgrep -q "${str}"; then
- echo >&2 "${resp}"
- echo >&2 "FAIL: expected in response, but not found: ${str@Q}"
- return 1
- fi
-}
-
-set +x
-
-echo >&2 "Testing authentication failure"
-resp="$(set -x; curl -s --include -d username=foo-bar -d password=nosecret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
-check_contains "${resp}" "HTTP/1.1 401"
-if [[ "${config_method}" = ldap ]]; then
- check_contains "${resp}" '{"errors":["LDAP: Authentication failure (with username \"foo-bar\" and password)"]}'
-else
- check_contains "${resp}" '{"errors":["PAM: Authentication failure (with username \"foo-bar\" and password)"]}'
-fi
-
-if [[ "${config_method}" = pam ]]; then
- echo >&2 "Testing expired credentials"
- resp="$(set -x; curl -s --include -d username=expired -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
- check_contains "${resp}" "HTTP/1.1 401"
- check_contains "${resp}" '{"errors":["PAM: Authentication failure; \"You are required to change your LDAP password immediately.\""]}'
-fi
-
-echo >&2 "Testing authentication success"
-resp="$(set -x; curl -s --include -d username=foo-bar -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
-check_contains "${resp}" "HTTP/1.1 200"
-check_contains "${resp}" '"api_token":"'
-check_contains "${resp}" '"scopes":["all"]'
-check_contains "${resp}" '"uuid":"zzzzz-gj3su-'
-
-secret="${resp##*api_token\":\"}"
-secret="${secret%%\"*}"
-uuid="${resp##*uuid\":\"}"
-uuid="${uuid%%\"*}"
-token="v2/$uuid/$secret"
-echo >&2 "New token is ${token}"
-
-resp="$(set -x; curl -s --include -H "Authorization: Bearer ${token}" "http://0.0.0.0:${ctrlport}/arvados/v1/users/current" | tee $debug)"
-check_contains "${resp}" "HTTP/1.1 200"
-if [[ "${config_method}" = ldap ]]; then
- # user fields come from LDAP attributes
- check_contains "${resp}" '"first_name":"Foo"'
- check_contains "${resp}" '"last_name":"Bar"'
- check_contains "${resp}" '"username":"foobar"' # "-" removed by rails api
- check_contains "${resp}" '"email":"foo-bar-baz@example.com"'
-else
- # PAMDefaultEmailDomain
- check_contains "${resp}" '"email":"foo-bar@example.com"'
-fi
-
-cleanup
diff --git a/lib/controller/localdb/login_ldap_test.go b/lib/controller/localdb/login_ldap_test.go
deleted file mode 100644
index c7d8390225..0000000000
--- a/lib/controller/localdb/login_ldap_test.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package localdb
-
-import (
- "encoding/json"
- "net"
- "net/http"
-
- "git.arvados.org/arvados.git/lib/controller/railsproxy"
- "git.arvados.org/arvados.git/lib/ctrlctx"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/bradleypeabody/godap"
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&LDAPSuite{})
-
-type LDAPSuite struct {
- localdbSuite
- ldap *godap.LDAPServer // fake ldap server that accepts auth goodusername/goodpassword
-}
-
-func (s *LDAPSuite) SetUpTest(c *check.C) {
- s.localdbSuite.SetUpTest(c)
-
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- c.Assert(err, check.IsNil)
- s.ldap = &godap.LDAPServer{
- Listener: ln,
- Handlers: []godap.LDAPRequestHandler{
- &godap.LDAPBindFuncHandler{
- LDAPBindFunc: func(binddn string, bindpw []byte) bool {
- return binddn == "cn=goodusername,dc=example,dc=com" && string(bindpw) == "goodpassword"
- },
- },
- &godap.LDAPSimpleSearchFuncHandler{
- LDAPSimpleSearchFunc: func(req *godap.LDAPSimpleSearchRequest) []*godap.LDAPSimpleSearchResultEntry {
- if req.FilterAttr != "uid" || req.BaseDN != "dc=example,dc=com" {
- return []*godap.LDAPSimpleSearchResultEntry{}
- }
- return []*godap.LDAPSimpleSearchResultEntry{
- {
- DN: "cn=" + req.FilterValue + "," + req.BaseDN,
- Attrs: map[string]interface{}{
- "SN": req.FilterValue,
- "CN": req.FilterValue,
- "uid": req.FilterValue,
- "mail": req.FilterValue + "@example.com",
- },
- },
- }
- },
- },
- },
- }
- go func() {
- ctxlog.TestLogger(c).Print(s.ldap.Serve())
- }()
-
- s.cluster.Login.LDAP.Enable = true
- err = json.Unmarshal([]byte(`"ldap://`+ln.Addr().String()+`"`), &s.cluster.Login.LDAP.URL)
- c.Assert(err, check.IsNil)
- s.cluster.Login.LDAP.StartTLS = false
- s.cluster.Login.LDAP.SearchBindUser = "cn=goodusername,dc=example,dc=com"
- s.cluster.Login.LDAP.SearchBindPassword = "goodpassword"
- s.cluster.Login.LDAP.SearchBase = "dc=example,dc=com"
- s.localdb.loginController = &ldapLoginController{
- Cluster: s.cluster,
- Parent: s.localdb,
- }
-}
-
-func (s *LDAPSuite) TestLoginSuccess(c *check.C) {
- resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
- Username: "goodusername",
- Password: "goodpassword",
- })
- c.Check(err, check.IsNil)
- c.Check(resp.APIToken, check.Not(check.Equals), "")
- c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
- c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
-
- ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, "v2/"+resp.UUID+"/"+resp.APIToken)
- user, err := railsproxy.NewConn(s.cluster).UserGetCurrent(ctx, arvados.GetOptions{})
- c.Check(err, check.IsNil)
- c.Check(user.Email, check.Equals, "goodusername@example.com")
- c.Check(user.Username, check.Equals, "goodusername")
-}
-
-func (s *LDAPSuite) TestLoginFailure(c *check.C) {
- // search returns no results
- s.cluster.Login.LDAP.SearchBase = "dc=example,dc=invalid"
- resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
- Username: "goodusername",
- Password: "goodpassword",
- })
- c.Check(err, check.ErrorMatches, `LDAP: Authentication failure \(with username "goodusername" and password\)`)
- hs, ok := err.(interface{ HTTPStatus() int })
- if c.Check(ok, check.Equals, true) {
- c.Check(hs.HTTPStatus(), check.Equals, http.StatusUnauthorized)
- }
- c.Check(resp.APIToken, check.Equals, "")
-
- // search returns result, but auth fails
- s.cluster.Login.LDAP.SearchBase = "dc=example,dc=com"
- resp, err = s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
- Username: "badusername",
- Password: "badpassword",
- })
- c.Check(err, check.ErrorMatches, `LDAP: Authentication failure \(with username "badusername" and password\)`)
- hs, ok = err.(interface{ HTTPStatus() int })
- if c.Check(ok, check.Equals, true) {
- c.Check(hs.HTTPStatus(), check.Equals, http.StatusUnauthorized)
- }
- c.Check(resp.APIToken, check.Equals, "")
-}
diff --git a/lib/controller/localdb/login_oidc.go b/lib/controller/localdb/login_oidc.go
index d91cdddc01..3aeabb00ed 100644
--- a/lib/controller/localdb/login_oidc.go
+++ b/lib/controller/localdb/login_oidc.go
@@ -38,7 +38,7 @@ import (
"golang.org/x/oauth2"
"google.golang.org/api/option"
"google.golang.org/api/people/v1"
- "gopkg.in/square/go-jose.v2/jwt"
+ "gopkg.in/go-jose/go-jose.v2/jwt"
)
var (
@@ -49,6 +49,11 @@ var (
pqCodeUniqueViolation = pq.ErrorCode("23505")
)
+type tokenCacheEnt struct {
+ valid bool
+ refresh time.Time
+}
+
type oidcLoginController struct {
Cluster *arvados.Cluster
Parent *Conn
@@ -470,26 +475,19 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
if tok == ta.ctrl.Cluster.SystemRootToken || strings.HasPrefix(tok, "v2/") {
return nil
}
- if cached, hit := ta.cache.Get(tok); !hit {
+ if ent, hit := ta.cache.Get(tok); !hit {
// Fall through to database and OIDC provider checks
// below
- } else if exp, ok := cached.(time.Time); ok {
- // cached negative result (value is expiry time)
- if time.Now().Before(exp) {
+ } else if ent := ent.(tokenCacheEnt); !ent.valid {
+ // cached negative result
+ if time.Now().Before(ent.refresh) {
return nil
}
ta.cache.Remove(tok)
- } else {
- // cached positive result
- aca := cached.(arvados.APIClientAuthorization)
- var expiring bool
- if !aca.ExpiresAt.IsZero() {
- t := aca.ExpiresAt
- expiring = t.Before(time.Now().Add(time.Minute))
- }
- if !expiring {
- return nil
- }
+ } else if ent.refresh.IsZero() || ent.refresh.After(time.Now().Add(time.Minute)) {
+ // cached positive result, and we're not at/near
+ // refresh time
+ return nil
}
db, err := ta.getdb(ctx)
@@ -510,17 +508,23 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
io.WriteString(mac, tok)
hmac := fmt.Sprintf("%x", mac.Sum(nil))
- var expiring bool
- err = tx.QueryRowContext(ctx, `select (expires_at is not null and expires_at - interval '1 minute' <= current_timestamp at time zone 'UTC') from api_client_authorizations where api_token=$1`, hmac).Scan(&expiring)
+ var needRefresh bool
+ err = tx.QueryRowContext(ctx, `
+ select (least(expires_at, refreshes_at) is not null
+ and least(expires_at, refreshes_at) - interval '1 minute' <= current_timestamp at time zone 'UTC')
+ from api_client_authorizations
+ where api_token=$1`, hmac).Scan(&needRefresh)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("database error while checking token: %w", err)
- } else if err == nil && !expiring {
+ } else if err == nil && !needRefresh {
// Token is already in the database as an Arvados
// token, and isn't about to expire, so we can pass it
// through to RailsAPI etc. regardless of whether it's
// an OIDC access token.
return nil
}
+ // err is either nil (meaning we need to update an existing
+ // row) or sql.ErrNoRows (meaning we need to insert a new row)
updating := err == nil
// Check whether the token is a valid OIDC access token. If
@@ -533,7 +537,10 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
}
if ok, err := ta.checkAccessTokenScope(ctx, tok); err != nil || !ok {
// Note checkAccessTokenScope logs any interesting errors
- ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
+ ta.cache.Add(tok, tokenCacheEnt{
+ valid: false,
+ refresh: time.Now().Add(tokenCacheNegativeTTL),
+ })
return err
}
oauth2Token := &oauth2.Token{
@@ -556,7 +563,10 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
return err
}
ctxlog.FromContext(ctx).WithError(err).WithField("HMAC", hmac).Debug("UserInfo failed (not an OIDC token?), caching negative result")
- ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
+ ta.cache.Add(tok, tokenCacheEnt{
+ valid: false,
+ refresh: time.Now().Add(tokenCacheNegativeTTL),
+ })
return nil
}
ctxlog.FromContext(ctx).WithField("userinfo", userinfo).Debug("(*oidcTokenAuthorizer)registerToken: got userinfo")
@@ -565,15 +575,15 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
return err
}
- // Expiry time for our token is one minute longer than our
+ // Refresh time for our token is one minute longer than our
// cache TTL, so we don't pass it through to RailsAPI just as
- // it's expiring.
- exp := time.Now().UTC().Add(tokenCacheTTL + tokenCacheRaceWindow)
+ // the refresh time is arriving.
+ refresh := time.Now().UTC().Add(tokenCacheTTL + tokenCacheRaceWindow)
if updating {
- _, err = tx.ExecContext(ctx, `update api_client_authorizations set expires_at=$1 where api_token=$2`, exp, hmac)
+ _, err = tx.ExecContext(ctx, `update api_client_authorizations set expires_at=null, refreshes_at=$1 where api_token=$2`, refresh, hmac)
if err != nil {
- return fmt.Errorf("error updating token expiry time: %w", err)
+ return fmt.Errorf("error updating token refresh time: %w", err)
}
ctxlog.FromContext(ctx).WithField("HMAC", hmac).Debug("(*oidcTokenAuthorizer)registerToken: updated api_client_authorizations row")
} else {
@@ -585,7 +595,7 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
if err != nil {
return err
}
- _, err = tx.ExecContext(ctx, `update api_client_authorizations set api_token=$1, expires_at=$2 where uuid=$3`, hmac, exp, aca.UUID)
+ _, err = tx.ExecContext(ctx, `update api_client_authorizations set api_token=$1, expires_at=null, refreshes_at=$2 where uuid=$3`, hmac, refresh, aca.UUID)
if e, ok := err.(*pq.Error); ok && e.Code == pqCodeUniqueViolation {
// unique_violation, given that the above
// query did not find a row with matching
@@ -614,7 +624,10 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
if err != nil {
return err
}
- ta.cache.Add(tok, arvados.APIClientAuthorization{ExpiresAt: exp})
+ ta.cache.Add(tok, tokenCacheEnt{
+ valid: true,
+ refresh: refresh,
+ })
return nil
}
diff --git a/lib/controller/localdb/login_oidc_test.go b/lib/controller/localdb/login_oidc_test.go
index f505f5bc49..b8e8d8219c 100644
--- a/lib/controller/localdb/login_oidc_test.go
+++ b/lib/controller/localdb/login_oidc_test.go
@@ -311,7 +311,7 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
checkTokenInDB := func() time.Time {
var exp time.Time
- err := db.QueryRow(`select expires_at at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
+ err := db.QueryRow(`select greatest(expires_at, refreshes_at) at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
c.Check(err, check.IsNil)
c.Check(exp.Sub(time.Now()) > -time.Second, check.Equals, true)
c.Check(exp.Sub(time.Now()) < time.Second, check.Equals, true)
@@ -359,8 +359,9 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
_, err = call(ctx, nil)
c.Check(err, check.IsNil)
ent, ok := oidcAuthorizer.cache.Get(accessToken)
- c.Check(ok, check.Equals, true)
- c.Check(ent, check.FitsTypeOf, time.Time{})
+ if c.Check(ok, check.Equals, true) {
+ c.Check(ent.(tokenCacheEnt).valid, check.Equals, false)
+ }
// UserInfo succeeds now, but we still have a cached
// negative result.
@@ -368,8 +369,9 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
_, err = call(ctx, nil)
c.Check(err, check.IsNil)
ent, ok = oidcAuthorizer.cache.Get(accessToken)
- c.Check(ok, check.Equals, true)
- c.Check(ent, check.FitsTypeOf, time.Time{})
+ if c.Check(ok, check.Equals, true) {
+ c.Check(ent.(tokenCacheEnt).valid, check.Equals, false)
+ }
tokenCacheNegativeTTL = time.Millisecond
cleanup()
@@ -416,7 +418,7 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
// If the token is used again after the in-memory cache
// expires, oidcAuthorizer must re-check the token and update
- // the expires_at value in the database.
+ // the refreshes_at value in the database.
time.Sleep(3 * time.Millisecond)
oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
exp := checkTokenInDB()
diff --git a/lib/controller/localdb/login_pam_test.go b/lib/controller/localdb/login_pam_test.go
deleted file mode 100644
index 2c3fa4d0f7..0000000000
--- a/lib/controller/localdb/login_pam_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package localdb
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "strings"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&PamSuite{})
-
-type PamSuite struct {
- localdbSuite
-}
-
-func (s *PamSuite) SetUpTest(c *check.C) {
- s.localdbSuite.SetUpTest(c)
- s.cluster.Login.PAM.Enable = true
- s.cluster.Login.PAM.DefaultEmailDomain = "example.com"
- s.localdb.loginController = &pamLoginController{
- Cluster: s.cluster,
- Parent: s.localdb,
- }
-}
-
-func (s *PamSuite) TestLoginFailure(c *check.C) {
- resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
- Username: "bogususername",
- Password: "boguspassword",
- })
- c.Check(err, check.ErrorMatches, `PAM: Authentication failure \(with username "bogususername" and password\)`)
- hs, ok := err.(interface{ HTTPStatus() int })
- if c.Check(ok, check.Equals, true) {
- c.Check(hs.HTTPStatus(), check.Equals, http.StatusUnauthorized)
- }
- c.Check(resp.APIToken, check.Equals, "")
-}
-
-// This test only runs if the ARVADOS_TEST_PAM_CREDENTIALS_FILE env
-// var is set. The credentials file should contain a valid username
-// and password, separated by \n.
-//
-// Depending on the host config, this test succeeds only if the test
-// credentials are for the same account being used to run tests.
-func (s *PamSuite) TestLoginSuccess(c *check.C) {
- testCredsFile := os.Getenv("ARVADOS_TEST_PAM_CREDENTIALS_FILE")
- if testCredsFile == "" {
- c.Skip("no test credentials file given in ARVADOS_TEST_PAM_CREDENTIALS_FILE")
- return
- }
- buf, err := ioutil.ReadFile(testCredsFile)
- c.Assert(err, check.IsNil)
- lines := strings.Split(string(buf), "\n")
- c.Assert(len(lines), check.Equals, 2, check.Commentf("credentials file %s should contain \"username\\npassword\"", testCredsFile))
- u, p := lines[0], lines[1]
-
- resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
- Username: u,
- Password: p,
- })
- c.Check(err, check.IsNil)
- c.Check(resp.APIToken, check.Not(check.Equals), "")
- c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
- c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
-
- authinfo := getCallbackAuthInfo(c, s.railsSpy)
- c.Check(authinfo.Email, check.Equals, u+"@"+s.cluster.Login.PAM.DefaultEmailDomain)
- c.Check(authinfo.AlternateEmails, check.DeepEquals, []string(nil))
-}
diff --git a/lib/controller/localdb/login_testuser_test.go b/lib/controller/localdb/login_testuser_test.go
index 51dcaab9db..5c0ca937ab 100644
--- a/lib/controller/localdb/login_testuser_test.go
+++ b/lib/controller/localdb/login_testuser_test.go
@@ -95,7 +95,10 @@ func (s *TestUserSuite) TestExpireTokenOnLogout(c *check.C) {
var tokenUUID string
var err error
- qry := `SELECT uuid FROM api_client_authorizations WHERE uuid=$1 AND (expires_at IS NULL OR expires_at > current_timestamp AT TIME ZONE 'UTC') LIMIT 1`
+ qry := `SELECT uuid FROM api_client_authorizations
+ WHERE uuid=$1
+ AND (least(expires_at, refreshes_at) IS NULL OR least(expires_at, refreshes_at) > current_timestamp AT TIME ZONE 'UTC')
+ LIMIT 1`
if trial.shouldExpireToken {
err = s.tx.QueryRowContext(ctx, qry, trial.expiringTokenUUID).Scan(&tokenUUID)
diff --git a/lib/controller/localdb/logout.go b/lib/controller/localdb/logout.go
index 04e7681ad7..530e9ec52e 100644
--- a/lib/controller/localdb/logout.go
+++ b/lib/controller/localdb/logout.go
@@ -69,7 +69,12 @@ func expireAPIClientAuthorization(ctx context.Context) error {
}
var retrievedUuid string
- err = tx.QueryRowContext(ctx, `SELECT uuid FROM api_client_authorizations WHERE api_token=$1 AND (expires_at IS NULL OR expires_at > current_timestamp AT TIME ZONE 'UTC') LIMIT 1`, tokenSecret).Scan(&retrievedUuid)
+ err = tx.QueryRowContext(ctx, `
+ SELECT uuid
+ FROM api_client_authorizations
+ WHERE api_token=$1
+ AND (LEAST(expires_at, refreshes_at) IS NULL OR LEAST(expires_at, refreshes_at) > current_timestamp AT TIME ZONE 'UTC')
+ LIMIT 1`, tokenSecret).Scan(&retrievedUuid)
if err == sql.ErrNoRows {
ctxlog.FromContext(ctx).Debugf("expireAPIClientAuthorization(%s): not found in database", token)
return nil
diff --git a/lib/controller/router/request.go b/lib/controller/router/request.go
index 68fffa0681..254a8b7fab 100644
--- a/lib/controller/router/request.go
+++ b/lib/controller/router/request.go
@@ -141,15 +141,17 @@ func (rtr *router) loadRequestParams(req *http.Request, attrsKey string, opts in
delete(params, attrsKey)
}
- if order, ok := params["order"].(string); ok {
+ for _, paramname := range []string{"include", "order"} {
// We must accept strings ("foo, bar desc") and arrays
// (["foo", "bar desc"]) because RailsAPI does.
// Convert to an array here before trying to unmarshal
// into options structs.
- if order == "" {
- delete(params, "order")
- } else {
- params["order"] = strings.Split(order, ",")
+ if val, ok := params[paramname].(string); ok {
+ if val == "" {
+ delete(params, paramname)
+ } else {
+ params[paramname] = strings.Split(val, ",")
+ }
}
}
diff --git a/lib/controller/router/request_test.go b/lib/controller/router/request_test.go
index b689eb681f..0e19c51682 100644
--- a/lib/controller/router/request_test.go
+++ b/lib/controller/router/request_test.go
@@ -35,7 +35,8 @@ type testReq struct {
tokenInQuery bool
noContentType bool
- body *bytes.Buffer
+ body *bytes.Buffer // provided by caller
+ bodyContent []byte // set by (*testReq)Request() if body not provided by caller
}
const noToken = "(no token)"
@@ -46,8 +47,10 @@ func (tr *testReq) Request() *http.Request {
param[k] = v
}
+ var body *bytes.Buffer
if tr.body != nil {
// caller provided a buffer
+ body = tr.body
} else if tr.json {
if tr.jsonAttrsTop {
for k, v := range tr.attrs {
@@ -72,11 +75,12 @@ func (tr *testReq) Request() *http.Request {
param[tr.attrsKey] = tr.attrs
}
}
- tr.body = bytes.NewBuffer(nil)
- err := json.NewEncoder(tr.body).Encode(param)
+ body = bytes.NewBuffer(nil)
+ err := json.NewEncoder(body).Encode(param)
if err != nil {
panic(err)
}
+ tr.bodyContent = body.Bytes()
} else {
values := make(url.Values)
for k, v := range param {
@@ -97,8 +101,9 @@ func (tr *testReq) Request() *http.Request {
}
values.Set(tr.attrsKey, string(jattrs))
}
- tr.body = bytes.NewBuffer(nil)
- io.WriteString(tr.body, values.Encode())
+ body = bytes.NewBuffer(nil)
+ io.WriteString(body, values.Encode())
+ tr.bodyContent = body.Bytes()
}
method := tr.method
if method == "" {
@@ -108,7 +113,7 @@ func (tr *testReq) Request() *http.Request {
if path == "" {
path = "example/test/path"
}
- req := httptest.NewRequest(method, "https://an.example/"+path, tr.body)
+ req := httptest.NewRequest(method, "https://an.example/"+path, body)
token := tr.token
if token == "" {
token = arvadostest.ActiveTokenV2
@@ -127,10 +132,6 @@ func (tr *testReq) Request() *http.Request {
return req
}
-func (tr *testReq) bodyContent() string {
- return string(tr.body.Bytes())
-}
-
func (s *RouterSuite) TestAttrsInBody(c *check.C) {
attrs := map[string]interface{}{"foo": "bar"}
@@ -172,7 +173,7 @@ func (s *RouterSuite) TestBoolParam(c *check.C) {
} {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
- c.Logf("tr.body: %s", tr.bodyContent())
+ c.Logf("tr.body: %s", tr.bodyContent)
var opts struct{ EnsureUniqueName bool }
params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Logf("params: %#v", params)
@@ -191,7 +192,7 @@ func (s *RouterSuite) TestBoolParam(c *check.C) {
} {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
- c.Logf("tr.body: %s", tr.bodyContent())
+ c.Logf("tr.body: %s", tr.bodyContent)
var opts struct {
EnsureUniqueName bool `json:"ensure_unique_name"`
}
@@ -205,22 +206,25 @@ func (s *RouterSuite) TestBoolParam(c *check.C) {
}
}
-func (s *RouterSuite) TestOrderParam(c *check.C) {
- for i, tr := range []testReq{
- {method: "POST", param: map[string]interface{}{"order": ""}, json: true},
- {method: "POST", param: map[string]interface{}{"order": ""}, json: false},
- {method: "POST", param: map[string]interface{}{"order": []string{}}, json: true},
- {method: "POST", param: map[string]interface{}{"order": []string{}}, json: false},
- {method: "POST", param: map[string]interface{}{}, json: true},
- {method: "POST", param: map[string]interface{}{}, json: false},
- } {
- c.Logf("#%d, tr: %#v", i, tr)
- req := tr.Request()
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey, nil)
- c.Assert(err, check.IsNil)
- c.Assert(params, check.NotNil)
- if order, ok := params["order"]; ok && order != nil {
- c.Check(order, check.DeepEquals, []interface{}{})
+func (s *RouterSuite) TestStringOrArrayParam(c *check.C) {
+ for _, paramname := range []string{"order", "include"} {
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{paramname: ""}, json: true},
+ {method: "POST", param: map[string]interface{}{paramname: ""}, json: false},
+ {method: "POST", param: map[string]interface{}{paramname: []string{}}, json: true},
+ {method: "POST", param: map[string]interface{}{paramname: []string{}}, json: false},
+ {method: "POST", param: map[string]interface{}{}, json: true},
+ {method: "POST", param: map[string]interface{}{}, json: false},
+ } {
+ c.Logf("%s #%d, tr: %#v", paramname, i, tr)
+ req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent)
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(params, check.NotNil)
+ if order, ok := params[paramname]; ok && order != nil {
+ c.Check(order, check.DeepEquals, []interface{}{})
+ }
}
}
@@ -233,6 +237,7 @@ func (s *RouterSuite) TestOrderParam(c *check.C) {
} {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent)
var opts arvados.ListOptions
params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Assert(err, check.IsNil)
@@ -243,4 +248,40 @@ func (s *RouterSuite) TestOrderParam(c *check.C) {
c.Check(params["order"], check.DeepEquals, []interface{}{"foo", "bar desc"})
}
}
+
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{"include": "container_uuid,owner_uuid"}, json: true},
+ {method: "POST", param: map[string]interface{}{"include": "container_uuid,owner_uuid"}, json: false},
+ {method: "POST", param: map[string]interface{}{"include": "[\"container_uuid\", \"owner_uuid\"]"}, json: false},
+ {method: "POST", param: map[string]interface{}{"include": []string{"container_uuid", "owner_uuid"}}, json: true},
+ {method: "POST", param: map[string]interface{}{"include": []string{"container_uuid", "owner_uuid"}}, json: false},
+ } {
+ c.Logf("#%d, tr: %#v", i, tr)
+ {
+ req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent)
+ var opts arvados.ListOptions
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
+ c.Assert(err, check.IsNil)
+ c.Check(opts.Include, check.DeepEquals, []string{"container_uuid", "owner_uuid"})
+ if _, ok := params["include"].([]string); ok {
+ c.Check(params["include"], check.DeepEquals, []string{"container_uuid", "owner_uuid"})
+ } else {
+ c.Check(params["include"], check.DeepEquals, []interface{}{"container_uuid", "owner_uuid"})
+ }
+ }
+ {
+ req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent)
+ var opts arvados.GroupContentsOptions
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
+ c.Assert(err, check.IsNil)
+ c.Check(opts.Include, check.DeepEquals, []string{"container_uuid", "owner_uuid"})
+ if _, ok := params["include"].([]string); ok {
+ c.Check(params["include"], check.DeepEquals, []string{"container_uuid", "owner_uuid"})
+ } else {
+ c.Check(params["include"], check.DeepEquals, []interface{}{"container_uuid", "owner_uuid"})
+ }
+ }
+ }
}
diff --git a/lib/controller/router/response.go b/lib/controller/router/response.go
index 42b3435593..9c96b3d4a3 100644
--- a/lib/controller/router/response.go
+++ b/lib/controller/router/response.go
@@ -115,7 +115,7 @@ func (rtr *router) sendResponse(w http.ResponseWriter, req *http.Request, resp i
slice[i] = item
}
}
- if opts.Count == "none" {
+ if opts.Count == "none" || req.URL.Path == "/arvados/v1/computed_permissions" {
delete(tmp, "items_available")
}
} else {
@@ -138,17 +138,18 @@ func (rtr *router) sendError(w http.ResponseWriter, err error) {
}
var infixMap = map[string]interface{}{
- "gj3su": arvados.APIClientAuthorization{},
+ "2x53u": arvados.VirtualMachine{},
"4zz18": arvados.Collection{},
- "xvhdp": arvados.ContainerRequest{},
+ "57u5n": arvados.Link{},
+ "7fd4e": arvados.Workflow{},
+ "bi6l4": arvados.KeepService{},
"dz642": arvados.Container{},
+ "fngyi": arvados.AuthorizedKey{},
+ "gj3su": arvados.APIClientAuthorization{},
"j7d0g": arvados.Group{},
- "8i9sb": arvados.Job{},
- "d1hrv": arvados.PipelineInstance{},
- "p5p6p": arvados.PipelineTemplate{},
- "j58dm": arvados.Specimen{},
- "q1cn2": arvados.Trait{},
- "7fd4e": arvados.Workflow{},
+ "o0j2j": arvados.Link{},
+ "tpzed": arvados.User{},
+ "xvhdp": arvados.ContainerRequest{},
}
var specialKindTransforms = map[string]string{
diff --git a/lib/controller/router/router.go b/lib/controller/router/router.go
index 054bcffaf7..271defd342 100644
--- a/lib/controller/router/router.go
+++ b/lib/controller/router/router.go
@@ -9,6 +9,8 @@ import (
"fmt"
"math"
"net/http"
+ "net/url"
+ "strconv"
"strings"
"git.arvados.org/arvados.git/lib/controller/api"
@@ -27,6 +29,9 @@ type router struct {
}
type Config struct {
+ // Services.ContainerWebServices section from cluster config.
+ ContainerWebServices *arvados.ServiceWithPortRange
+
// Return an error if request body exceeds this size. 0 means
// unlimited.
MaxRequestSize int
@@ -184,6 +189,13 @@ func (rtr *router) addRoutes() {
return rtr.backend.CollectionUntrash(ctx, *opts.(*arvados.UntrashOptions))
},
},
+ {
+ arvados.EndpointComputedPermissionList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ComputedPermissionList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
{
arvados.EndpointContainerCreate,
func() interface{} { return &arvados.CreateOptions{} },
@@ -472,41 +484,6 @@ func (rtr *router) addRoutes() {
return rtr.backend.LogDelete(ctx, *opts.(*arvados.DeleteOptions))
},
},
- {
- arvados.EndpointSpecimenCreate,
- func() interface{} { return &arvados.CreateOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.SpecimenCreate(ctx, *opts.(*arvados.CreateOptions))
- },
- },
- {
- arvados.EndpointSpecimenUpdate,
- func() interface{} { return &arvados.UpdateOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.SpecimenUpdate(ctx, *opts.(*arvados.UpdateOptions))
- },
- },
- {
- arvados.EndpointSpecimenGet,
- func() interface{} { return &arvados.GetOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.SpecimenGet(ctx, *opts.(*arvados.GetOptions))
- },
- },
- {
- arvados.EndpointSpecimenList,
- func() interface{} { return &arvados.ListOptions{Limit: -1} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.SpecimenList(ctx, *opts.(*arvados.ListOptions))
- },
- },
- {
- arvados.EndpointSpecimenDelete,
- func() interface{} { return &arvados.DeleteOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.SpecimenDelete(ctx, *opts.(*arvados.DeleteOptions))
- },
- },
{
arvados.EndpointAPIClientAuthorizationCreate,
func() interface{} { return &arvados.CreateOptions{} },
@@ -720,23 +697,7 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
ctx = arvados.ContextWithRequestID(ctx, req.Header.Get("X-Request-Id"))
req = req.WithContext(ctx)
- // Extract the token UUIDs (or a placeholder for v1 tokens)
- var tokenUUIDs []string
- for _, t := range creds.Tokens {
- if strings.HasPrefix(t, "v2/") {
- tokenParts := strings.Split(t, "/")
- if len(tokenParts) >= 3 {
- tokenUUIDs = append(tokenUUIDs, tokenParts[1])
- }
- } else {
- end := t
- if len(t) > 5 {
- end = t[len(t)-5:]
- }
- tokenUUIDs = append(tokenUUIDs, "v1 token ending in "+end)
- }
- }
- httpserver.SetResponseLogFields(ctx, logrus.Fields{"tokenUUIDs": tokenUUIDs})
+ httpserver.SetResponseLogFields(ctx, logrus.Fields{"tokenUUIDs": creds.TokenUUIDs()})
logger.WithFields(logrus.Fields{
"apiEndpoint": endpoint,
@@ -754,6 +715,9 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
}
func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if rtr.routeAsContainerHTTPProxy(w, r) {
+ return
+ }
switch strings.SplitN(strings.TrimLeft(r.URL.Path, "/"), "/", 2)[0] {
case "login", "logout", "auth":
default:
@@ -795,3 +759,92 @@ func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
rtr.mux.ServeHTTP(w, r)
}
+
+// If req is a container http proxy request, handle it and return
+// true. Otherwise, return false.
+func (rtr *router) routeAsContainerHTTPProxy(w http.ResponseWriter, req *http.Request) bool {
+ if rtr.config.ContainerWebServices == nil {
+ return false
+ }
+ configurl := url.URL(rtr.config.ContainerWebServices.ExternalURL)
+ confhostname := configurl.Hostname()
+
+ // Use req.Host (not req.URL), but use url.URL to parse it,
+ // which differs from net.SplitHostPort (port must be numeric,
+ // [] are stripped even if there is no port).
+ requrl := url.URL{Host: req.Host}
+ reqhostname := requrl.Hostname()
+ reqport := requrl.Port()
+ reqportnum, _ := strconv.Atoi(reqport)
+
+ if strings.EqualFold(confhostname, reqhostname) &&
+ rtr.config.ContainerWebServices.ExternalPortMin > 0 &&
+ rtr.config.ContainerWebServices.ExternalPortMin <= reqportnum &&
+ rtr.config.ContainerWebServices.ExternalPortMax >= reqportnum {
+ // Config uses a port range instead of a wildcard
+ // host. Pass the port number (like ":1234") as the
+ // target. The ContainerHTTPProxy API method will
+ // figure out which container it is currently assigned
+ // to.
+ rtr.serveContainerHTTPProxy(w, req, fmt.Sprintf(":%d", reqportnum))
+ return true
+ } else if !strings.HasPrefix(confhostname, "*") {
+ // Feature disabled by config
+ return false
+ }
+
+ // Check that the requested port matches the ExternalURL port.
+ // We don't know the request scheme, so we just assume it was
+ // "https" for the purpose of comparing implicit/explicit ways
+ // of spelling "default port for this scheme".
+ confport := configurl.Port()
+ if !(reqport == confport ||
+ (reqport == "" && confport == "443") ||
+ (reqport == "443" && confport == "")) {
+ return false
+ }
+ targetlen := len(reqhostname) - len(confhostname) + 1
+ if targetlen < 1 ||
+ !strings.EqualFold(reqhostname[targetlen:], confhostname[1:]) {
+ // Request host does not match config wildcard, so
+ // this is not a container http proxy request.
+ return false
+ }
+ target := reqhostname[:targetlen]
+ rtr.serveContainerHTTPProxy(w, req, target)
+ return true
+}
+
+func (rtr *router) serveContainerHTTPProxy(w http.ResponseWriter, req *http.Request, target string) {
+ // This API bypasses the generic auth middleware in
+ // addRoute(), so here we need to load tokens into ctx, log
+ // their UUIDs, and propagate the incoming X-Request-Id.
+ ctx := req.Context()
+ if cookie, err := req.Cookie("arvados_api_token"); err == nil && len(cookie.Value) != 0 {
+ if token, err := auth.DecodeTokenCookie(cookie.Value); err == nil {
+ creds := auth.NewCredentials(string(token))
+ ctx = auth.NewContext(ctx, creds)
+ httpserver.SetResponseLogFields(ctx, logrus.Fields{"tokenUUIDs": creds.TokenUUIDs()})
+ }
+ }
+
+ ctx = arvados.ContextWithRequestID(ctx, req.Header.Get("X-Request-Id"))
+ req = req.WithContext(ctx)
+
+ // Load the NoForward value from the X-Arvados-No-Forward
+ // header, but don't pass the header through in the proxied
+ // request.
+ noForward := req.Header.Get("X-Arvados-No-Forward") != ""
+ req.Header.Del("X-Arvados-No-Forward")
+
+ handler, err := rtr.backend.ContainerHTTPProxy(req.Context(), arvados.ContainerHTTPProxyOptions{
+ Target: target,
+ Request: req,
+ NoForward: noForward,
+ })
+ if err != nil {
+ rtr.sendError(w, err)
+ return
+ }
+ handler.ServeHTTP(w, req)
+}
diff --git a/lib/controller/router/router_test.go b/lib/controller/router/router_test.go
index a8359a4400..2f7f70c10b 100644
--- a/lib/controller/router/router_test.go
+++ b/lib/controller/router/router_test.go
@@ -19,6 +19,7 @@ import (
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
"github.com/gorilla/mux"
check "gopkg.in/check.v1"
)
@@ -40,6 +41,13 @@ func (s *RouterSuite) SetUpTest(c *check.C) {
s.rtr = &router{
mux: mux.NewRouter(),
backend: &s.stub,
+ config: Config{
+ ContainerWebServices: &arvados.ServiceWithPortRange{
+ Service: arvados.Service{
+ ExternalURL: arvados.URL{Host: "*.containers.zzzzz.example.com"},
+ },
+ },
+ },
}
s.rtr.addRoutes()
}
@@ -56,6 +64,7 @@ func (s *RouterSuite) TestOptions(c *check.C) {
shouldStatus int // zero value means 200
shouldCall string
withOptions interface{}
+ checkOptions func(interface{}) // if non-nil, call instead of checking withOptions
}{
{
method: "GET",
@@ -283,6 +292,37 @@ func (s *RouterSuite) TestOptions(c *check.C) {
shouldStatus: http.StatusNotFound,
shouldCall: "",
},
+ {
+ comment: "container http proxy no_forward=true",
+ unauthenticated: true,
+ method: "POST",
+ path: "/foo/bar",
+ header: http.Header{
+ "Cookie": {"arvados_api_token=" + auth.EncodeTokenCookie([]byte(arvadostest.ActiveToken))},
+ "Host": {arvadostest.RunningContainerUUID + "-12345.containers.zzzzz.example.com"},
+ "X-Arvados-No-Forward": {"1"},
+ "X-Example-Header": {"preserved header value"},
+ },
+ shouldCall: "ContainerHTTPProxy",
+ checkOptions: func(gotOptions interface{}) {
+ opts, _ := gotOptions.(arvados.ContainerHTTPProxyOptions)
+ if !c.Check(opts, check.NotNil) {
+ return
+ }
+ c.Check(opts.Request.Method, check.Equals, "POST")
+ c.Check(opts.Request.URL.Path, check.Equals, "/foo/bar")
+ c.Check(opts.Request.Host, check.Equals, arvadostest.RunningContainerUUID+"-12345.containers.zzzzz.example.com")
+ c.Check(opts.Request.Header, check.DeepEquals, http.Header{
+ "Cookie": {"arvados_api_token=" + auth.EncodeTokenCookie([]byte(arvadostest.ActiveToken))},
+ "X-Example-Header": {"preserved header value"},
+ })
+ opts.Request = nil
+ c.Check(opts, check.DeepEquals, arvados.ContainerHTTPProxyOptions{
+ Target: arvadostest.RunningContainerUUID + "-12345",
+ NoForward: true,
+ })
+ },
+ },
} {
// Reset calls captured in previous trial
s.stub = arvadostest.APIStub{}
@@ -299,10 +339,15 @@ func (s *RouterSuite) TestOptions(c *check.C) {
calls := s.stub.Calls(nil)
if trial.shouldCall == "" {
c.Check(calls, check.HasLen, 0, comment)
- } else if len(calls) != 1 {
- c.Check(calls, check.HasLen, 1, comment)
+ continue
+ }
+ if !c.Check(calls, check.HasLen, 1, comment) {
+ continue
+ }
+ c.Check(calls[0].Method, isMethodNamed, trial.shouldCall, comment)
+ if trial.checkOptions != nil {
+ trial.checkOptions(calls[0].Options)
} else {
- c.Check(calls[0].Method, isMethodNamed, trial.shouldCall, comment)
c.Check(calls[0].Options, check.DeepEquals, trial.withOptions, comment)
}
}
@@ -318,6 +363,7 @@ func (s *RouterIntegrationSuite) SetUpTest(c *check.C) {
cluster := &arvados.Cluster{}
cluster.TLS.Insecure = true
arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ arvadostest.SetServiceURL(&cluster.Services.ContainerWebServices.Service, "https://*.containers.zzzzz.example.com")
url, _ := url.Parse("https://" + os.Getenv("ARVADOS_TEST_API_HOST"))
s.rtr = New(rpc.NewConn("zzzzz", url, true, rpc.PassthroughTokenProvider), Config{})
}
@@ -550,6 +596,40 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
}
}
+func (s *RouterIntegrationSuite) TestIncluded(c *check.C) {
+ for _, trial := range []struct {
+ uuid string
+ expectOwnerUUID string
+ expectOwnerKind string
+ }{
+ {
+ uuid: arvadostest.ASubprojectUUID,
+ expectOwnerUUID: arvadostest.AProjectUUID,
+ expectOwnerKind: "arvados#group",
+ },
+ {
+ uuid: arvadostest.AProjectUUID,
+ expectOwnerUUID: arvadostest.ActiveUserUUID,
+ expectOwnerKind: "arvados#user",
+ },
+ } {
+ c.Logf("trial: %#v", trial)
+ token := arvadostest.ActiveTokenV2
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", `/arvados/v1/groups/contents?include=owner_uuid&filters=[["uuid","=","`+trial.uuid+`"]]`, true, nil, nil, jresp)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+
+ c.Assert(jresp["included"], check.FitsTypeOf, []interface{}{})
+ included, ok := jresp["included"].([]interface{})
+ c.Assert(ok, check.Equals, true)
+ c.Assert(included, check.HasLen, 1)
+ owner, ok := included[0].(map[string]interface{})
+ c.Assert(ok, check.Equals, true)
+ c.Check(owner["kind"], check.Equals, trial.expectOwnerKind)
+ c.Check(owner["uuid"], check.Equals, trial.expectOwnerUUID)
+ }
+}
+
func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {
_, rr := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, "HEAD", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, true, nil, nil, nil)
c.Check(rr.Code, check.Equals, http.StatusOK)
@@ -623,10 +703,30 @@ func (s *RouterIntegrationSuite) TestCORS(c *check.C) {
}
}
+func (s *RouterIntegrationSuite) TestComputedPermissionList(c *check.C) {
+ token := arvadostest.AdminToken
+
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", `/arvados/v1/computed_permissions?filters=[["user_uuid","=","`+arvadostest.ActiveUserUUID+`"],["target_uuid","=","`+arvadostest.AProjectUUID+`"]]&select=["perm_level"]`, true, nil, nil, jresp)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items_available"], check.IsNil)
+ if c.Check(jresp["items"], check.HasLen, 1) {
+ item := jresp["items"].([]interface{})[0].(map[string]interface{})
+ c.Check(item, check.DeepEquals, map[string]interface{}{
+ "kind": "arvados#computedPermission",
+ "perm_level": "can_manage",
+ })
+ }
+}
+
func doRequest(c *check.C, rtr http.Handler, token, method, path string, auth bool, hdrs http.Header, body io.Reader, jresp map[string]interface{}) (*http.Request, *httptest.ResponseRecorder) {
req := httptest.NewRequest(method, path, body)
for k, v := range hdrs {
- req.Header[k] = v
+ if k == "Host" && len(v) == 1 {
+ req.Host = v[0]
+ } else {
+ req.Header[k] = v
+ }
}
if auth {
req.Header.Set("Authorization", "Bearer "+token)
diff --git a/lib/controller/rpc/conn.go b/lib/controller/rpc/conn.go
index c6be679a25..c89ef1e770 100644
--- a/lib/controller/rpc/conn.go
+++ b/lib/controller/rpc/conn.go
@@ -60,7 +60,7 @@ func NewConn(clusterID string, url *url.URL, insecure bool, tp TokenProvider) *C
// It's not safe to copy *http.DefaultTransport
// because it has a mutex (which might be locked)
// protecting a private map (which might not be nil).
- // So we build our own, using the Go 1.12 default
+ // So we build our own, using the Go 1.23 default
// values, ignoring any changes the application has
// made to http.DefaultTransport.
transport = &http.Transport{
@@ -69,6 +69,7 @@ func NewConn(clusterID string, url *url.URL, insecure bool, tp TokenProvider) *C
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
+ ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
@@ -341,6 +342,13 @@ func (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.Untrash
return resp, err
}
+func (conn *Conn) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {
+ ep := arvados.EndpointComputedPermissionList
+ var resp arvados.ComputedPermissionList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
ep := arvados.EndpointContainerCreate
var resp arvados.Container
@@ -397,6 +405,10 @@ func (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOption
return resp, err
}
+func (conn *Conn) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {
+ return conn.reverseProxy("", http.Header{"X-Arvados-No-Forward": {"1"}}), nil
+}
+
// ContainerSSH returns a connection to the out-of-band SSH server for
// a running container. If the returned error is nil, the caller is
// responsible for closing sshconn.Conn.
@@ -537,16 +549,24 @@ func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, options a
}
func (conn *Conn) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (resp http.Handler, err error) {
- proxy := &httputil.ReverseProxy{
+ return conn.reverseProxy(fmt.Sprintf("no_forward=%v", options.NoForward), nil), nil
+}
+
+func (conn *Conn) reverseProxy(setRawQuery string, setHeader http.Header) http.Handler {
+ return &httputil.ReverseProxy{
Transport: conn.httpClient.Transport,
Director: func(r *http.Request) {
u := conn.baseURL
u.Path = r.URL.Path
- u.RawQuery = fmt.Sprintf("no_forward=%v", options.NoForward)
+ if setRawQuery != "" {
+ u.RawQuery = setRawQuery
+ }
+ for k, v := range setHeader {
+ r.Header[k] = v
+ }
r.URL = &u
},
}
- return proxy, nil
}
func (conn *Conn) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {
@@ -682,41 +702,6 @@ func (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions)
return resp, err
}
-func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
- ep := arvados.EndpointSpecimenCreate
- var resp arvados.Specimen
- err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
- return resp, err
-}
-
-func (conn *Conn) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
- ep := arvados.EndpointSpecimenUpdate
- var resp arvados.Specimen
- err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
- return resp, err
-}
-
-func (conn *Conn) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
- ep := arvados.EndpointSpecimenGet
- var resp arvados.Specimen
- err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
- return resp, err
-}
-
-func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
- ep := arvados.EndpointSpecimenList
- var resp arvados.SpecimenList
- err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
- return resp, err
-}
-
-func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
- ep := arvados.EndpointSpecimenDelete
- var resp arvados.Specimen
- err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
- return resp, err
-}
-
func (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
ep := arvados.EndpointSysTrashSweep
var resp struct{}
diff --git a/lib/controller/rpc/conn_test.go b/lib/controller/rpc/conn_test.go
index 0d1200fe12..ed26e04117 100644
--- a/lib/controller/rpc/conn_test.go
+++ b/lib/controller/rpc/conn_test.go
@@ -100,23 +100,24 @@ func (s *RPCSuite) TestCollectionCreate(c *check.C) {
c.Check(coll.UUID, check.HasLen, 27)
}
-func (s *RPCSuite) TestSpecimenCRUD(c *check.C) {
+func (s *RPCSuite) TestGroupCRUD(c *check.C) {
s.setupConn(c, os.Getenv("ARVADOS_TEST_API_HOST"))
- sp, err := s.conn.SpecimenCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
- "owner_uuid": arvadostest.ActiveUserUUID,
- "properties": map[string]string{"foo": "bar"},
+ sp, err := s.conn.GroupCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "group_class": "project",
+ "owner_uuid": arvadostest.ActiveUserUUID,
+ "properties": map[string]string{"foo": "bar"},
}})
c.Check(err, check.IsNil)
c.Check(sp.UUID, check.HasLen, 27)
c.Check(sp.Properties, check.HasLen, 1)
c.Check(sp.Properties["foo"], check.Equals, "bar")
- spGet, err := s.conn.SpecimenGet(s.ctx, arvados.GetOptions{UUID: sp.UUID})
+ spGet, err := s.conn.GroupGet(s.ctx, arvados.GetOptions{UUID: sp.UUID})
c.Check(err, check.IsNil)
c.Check(spGet.UUID, check.Equals, sp.UUID)
c.Check(spGet.Properties["foo"], check.Equals, "bar")
- spList, err := s.conn.SpecimenList(s.ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
+ spList, err := s.conn.GroupList(s.ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
c.Check(err, check.IsNil)
c.Check(spList.ItemsAvailable, check.Equals, 1)
c.Assert(spList.Items, check.HasLen, 1)
@@ -124,12 +125,12 @@ func (s *RPCSuite) TestSpecimenCRUD(c *check.C) {
c.Check(spList.Items[0].Properties["foo"], check.Equals, "bar")
anonCtx := context.WithValue(context.Background(), contextKeyTestTokens, []string{arvadostest.AnonymousToken})
- spList, err = s.conn.SpecimenList(anonCtx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
+ spList, err = s.conn.GroupList(anonCtx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
c.Check(err, check.IsNil)
c.Check(spList.ItemsAvailable, check.Equals, 0)
c.Check(spList.Items, check.HasLen, 0)
- spDel, err := s.conn.SpecimenDelete(s.ctx, arvados.DeleteOptions{UUID: sp.UUID})
+ spDel, err := s.conn.GroupDelete(s.ctx, arvados.DeleteOptions{UUID: sp.UUID})
c.Check(err, check.IsNil)
c.Check(spDel.UUID, check.Equals, sp.UUID)
}
diff --git a/lib/controller/trash.go b/lib/controller/trash.go
index 99e7aec0b6..662ea26751 100644
--- a/lib/controller/trash.go
+++ b/lib/controller/trash.go
@@ -25,7 +25,7 @@ func (h *Handler) periodicWorker(workerName string, interval time.Duration, lock
return
}
defer locker.Unlock()
- for time.Sleep(interval); ctx.Err() == nil; time.Sleep(interval) {
+ for ctxSleep(ctx, interval); ctx.Err() == nil; ctxSleep(ctx, interval) {
if !locker.Check() {
// context canceled
return
@@ -46,7 +46,19 @@ func (h *Handler) trashSweepWorker() {
}
func (h *Handler) containerLogSweepWorker() {
- h.periodicWorker("container log sweep", h.Cluster.Containers.Logging.SweepInterval.Duration(), dblock.ContainerLogSweep, func(ctx context.Context) error {
+ // Since #21611 we don't expect any new log entries, so the
+ // periodic worker only runs once, then becomes a no-op.
+ //
+ // The old Containers.Logging.SweepInterval config is removed.
+ // We use TrashSweepInterval here instead, for testing
+ // reasons: it prevents the default integration-testing
+ // controller service (whose TrashSweepInterval is 0) from
+ // acquiring the dblock.
+ done := false
+ h.periodicWorker("container log sweep", h.Cluster.Collections.TrashSweepInterval.Duration(), dblock.ContainerLogSweep, func(ctx context.Context) error {
+ if done {
+ return nil
+ }
db, err := h.dbConnector.GetDB(ctx)
if err != nil {
return err
@@ -56,9 +68,7 @@ DELETE FROM logs
USING containers
WHERE logs.object_uuid=containers.uuid
AND logs.event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat', 'hoststat', 'node', 'container', 'keepstore')
- AND containers.log IS NOT NULL
- AND now() - containers.finished_at > $1::interval`,
- h.Cluster.Containers.Logging.MaxAge.String())
+ AND containers.log IS NOT NULL`)
if err != nil {
return err
}
@@ -68,7 +78,19 @@ DELETE FROM logs
logger.WithError(err).Warn("unexpected error from RowsAffected()")
} else {
logger.WithField("rows", rows).Info("deleted rows from logs table")
+ if rows == 0 {
+ done = true
+ }
}
return nil
})
}
+
+// Sleep for the given duration, but return early if ctx cancels
+// before that.
+func ctxSleep(ctx context.Context, d time.Duration) {
+ select {
+ case <-ctx.Done():
+ case <-time.After(d):
+ }
+}
diff --git a/lib/costanalyzer/cmd.go b/lib/costanalyzer/cmd.go
deleted file mode 100644
index f2a7af4933..0000000000
--- a/lib/costanalyzer/cmd.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package costanalyzer
-
-import (
- "io"
- "time"
-
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
-)
-
-var Command = command{}
-
-type command struct {
- uuids arrayFlags
- resultsDir string
- cache bool
- begin time.Time
- end time.Time
-}
-
-// RunCommand implements the subcommand "costanalyzer ..."
-func (c command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
- var err error
- logger := ctxlog.New(stderr, "text", "info")
- logger.SetFormatter(cmd.NoPrefixFormatter{})
-
- exitcode, err := c.costAnalyzer(prog, args, logger, stdout, stderr)
- if err != nil {
- logger.Error("\n" + err.Error())
- }
- return exitcode
-}
diff --git a/lib/costanalyzer/costanalyzer.go b/lib/costanalyzer/costanalyzer.go
deleted file mode 100644
index e68e2cb8c1..0000000000
--- a/lib/costanalyzer/costanalyzer.go
+++ /dev/null
@@ -1,676 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package costanalyzer
-
-import (
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "strconv"
- "strings"
- "time"
-
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
- "github.com/sirupsen/logrus"
-)
-
-const timestampFormat = "2006-01-02T15:04:05"
-
-var pagesize = 1000
-
-type nodeInfo struct {
- // Legacy (records created by Arvados Node Manager with Arvados <= 1.4.3)
- Properties struct {
- CloudNode struct {
- Price float64
- Size string
- } `json:"cloud_node"`
- }
- // Modern
- ProviderType string
- Price float64
- Preemptible bool
-}
-
-type consumption struct {
- cost float64
- duration float64
-}
-
-func (c *consumption) Add(n consumption) {
- c.cost += n.cost
- c.duration += n.duration
-}
-
-type arrayFlags []string
-
-func (i *arrayFlags) String() string {
- return ""
-}
-
-func (i *arrayFlags) Set(value string) error {
- for _, s := range strings.Split(value, ",") {
- *i = append(*i, s)
- }
- return nil
-}
-
-func (c *command) parseFlags(prog string, args []string, logger *logrus.Logger, stderr io.Writer) (ok bool, exitCode int) {
- var beginStr, endStr string
- flags := flag.NewFlagSet("", flag.ContinueOnError)
- flags.Usage = func() {
- fmt.Fprintf(flags.Output(), `
-Usage:
- %s [options ...] [UUID ...]
-
- This program analyzes the cost of Arvados container requests and calculates
- the total cost across all requests. At least one UUID or a timestamp range
- must be specified.
-
- When the '-output' option is specified, a set of CSV files with cost details
- will be written to the provided directory. Each file is a CSV report that lists
- all the containers used to fulfill the container request, together with the
- machine type and cost of each container.
-
- When supplied with the UUID of a container request, it will calculate the
- cost of that container request and all its children.
-
- When supplied with the UUID of a collection, it will see if there is a
- container_request UUID in the properties of the collection, and if so, it
- will calculate the cost of that container request and all its children.
-
- When supplied with a project UUID or when supplied with multiple container
- request or collection UUIDs, it will calculate the total cost for all
- supplied UUIDs.
-
- When supplied with a 'begin' and 'end' timestamp (format:
- %s), it will calculate the cost for all top-level container
- requests whose containers finished during the specified interval.
-
- The total cost calculation takes container reuse into account: if a container
- was reused between several container requests, its cost will only be counted
- once.
-
- Caveats:
-
- - This program uses the cost data from config.yml at the time of the
- execution of the container, stored in the 'node.json' file in its log
- collection. If the cost data was not correctly configured at the time the
- container was executed, the output from this program will be incorrect.
-
- - If a container was run on a preemptible ("spot") instance, the cost data
- reported by this program may be wildly inaccurate, because it does not have
- access to the spot pricing in effect for the node then the container ran. The
- UUID report file that is generated when the '-output' option is specified has
- a column that indicates the preemptible state of the instance that ran the
- container.
-
- - This program does not take into account overhead costs like the time spent
- starting and stopping compute nodes that run containers, the cost of the
- permanent cloud nodes that provide the Arvados services, the cost of data
- stored in Arvados, etc.
-
- - When provided with a project UUID, subprojects will not be considered.
-
- In order to get the data for the UUIDs supplied, the ARVADOS_API_HOST and
- ARVADOS_API_TOKEN environment variables must be set.
-
- This program prints the total dollar amount from the aggregate cost
- accounting across all provided UUIDs on stdout.
-
-Options:
-`, prog, timestampFormat)
- flags.PrintDefaults()
- }
- loglevel := flags.String("log-level", "info", "logging `level` (debug, info, ...)")
- flags.StringVar(&c.resultsDir, "output", "", "output `directory` for the CSV reports")
- flags.StringVar(&beginStr, "begin", "", fmt.Sprintf("timestamp `begin` for date range operation (format: %s)", timestampFormat))
- flags.StringVar(&endStr, "end", "", fmt.Sprintf("timestamp `end` for date range operation (format: %s)", timestampFormat))
- flags.BoolVar(&c.cache, "cache", true, "create and use a local disk cache of Arvados objects")
- if ok, code := cmd.ParseFlags(flags, prog, args, "[uuid ...]", stderr); !ok {
- return false, code
- }
- c.uuids = flags.Args()
-
- if (len(beginStr) != 0 && len(endStr) == 0) || (len(beginStr) == 0 && len(endStr) != 0) {
- fmt.Fprintf(stderr, "When specifying a date range, both begin and end must be specified (try -help)\n")
- return false, 2
- }
-
- if len(beginStr) != 0 {
- var errB, errE error
- c.begin, errB = time.Parse(timestampFormat, beginStr)
- c.end, errE = time.Parse(timestampFormat, endStr)
- if (errB != nil) || (errE != nil) {
- fmt.Fprintf(stderr, "When specifying a date range, both begin and end must be of the format %s %+v, %+v\n", timestampFormat, errB, errE)
- return false, 2
- }
- }
-
- if (len(c.uuids) < 1) && (len(beginStr) == 0) {
- fmt.Fprintf(stderr, "error: no uuid(s) provided (try -help)\n")
- return false, 2
- }
-
- lvl, err := logrus.ParseLevel(*loglevel)
- if err != nil {
- fmt.Fprintf(stderr, "invalid argument to -log-level: %s\n", err)
- return false, 2
- }
- logger.SetLevel(lvl)
- if !c.cache {
- logger.Debug("Caching disabled")
- }
- return true, 0
-}
-
-func ensureDirectory(logger *logrus.Logger, dir string) (err error) {
- statData, err := os.Stat(dir)
- if os.IsNotExist(err) {
- err = os.MkdirAll(dir, 0700)
- if err != nil {
- return fmt.Errorf("error creating directory %s: %s", dir, err.Error())
- }
- } else {
- if !statData.IsDir() {
- return fmt.Errorf("the path %s is not a directory", dir)
- }
- }
- return
-}
-
-func addContainerLine(logger *logrus.Logger, node nodeInfo, cr arvados.ContainerRequest, container arvados.Container) (string, consumption) {
- var csv string
- var containerConsumption consumption
- csv = cr.UUID + ","
- csv += cr.Name + ","
- csv += container.UUID + ","
- csv += string(container.State) + ","
- if container.StartedAt != nil {
- csv += container.StartedAt.String() + ","
- } else {
- csv += ","
- }
-
- var delta time.Duration
- if container.FinishedAt != nil {
- csv += container.FinishedAt.String() + ","
- delta = container.FinishedAt.Sub(*container.StartedAt)
- csv += strconv.FormatFloat(delta.Seconds(), 'f', 3, 64) + ","
- } else {
- csv += ",,"
- }
- var price float64
- var size string
- if node.Properties.CloudNode.Price != 0 {
- price = node.Properties.CloudNode.Price
- size = node.Properties.CloudNode.Size
- } else {
- price = node.Price
- size = node.ProviderType
- }
- containerConsumption.cost = delta.Seconds() / 3600 * price
- containerConsumption.duration = delta.Seconds()
- csv += size + "," + fmt.Sprintf("%+v", node.Preemptible) + "," + strconv.FormatFloat(price, 'f', 8, 64) + "," + strconv.FormatFloat(containerConsumption.cost, 'f', 8, 64) + "\n"
- return csv, containerConsumption
-}
-
-func loadCachedObject(logger *logrus.Logger, file string, uuid string, object interface{}) (reload bool) {
- reload = true
- if strings.Contains(uuid, "-j7d0g-") || strings.Contains(uuid, "-4zz18-") {
- // We do not cache projects or collections, they have no final state
- return
- }
- // See if we have a cached copy of this object
- _, err := os.Stat(file)
- if err != nil {
- return
- }
- data, err := ioutil.ReadFile(file)
- if err != nil {
- logger.Errorf("error reading %q: %s", file, err)
- return
- }
- err = json.Unmarshal(data, &object)
- if err != nil {
- logger.Errorf("failed to unmarshal json: %s: %s", data, err)
- return
- }
-
- // See if it is in a final state, if that makes sense
- switch v := object.(type) {
- case *arvados.ContainerRequest:
- if v.State == arvados.ContainerRequestStateFinal {
- reload = false
- logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
- }
- case *arvados.Container:
- if v.State == arvados.ContainerStateComplete || v.State == arvados.ContainerStateCancelled {
- reload = false
- logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
- }
- }
- return
-}
-
-// Load an Arvados object.
-func loadObject(logger *logrus.Logger, ac *arvados.Client, path string, uuid string, cache bool, object interface{}) (err error) {
- file := uuid + ".json"
-
- var reload bool
- var cacheDir string
-
- if !cache {
- reload = true
- } else {
- homeDir, err := os.UserHomeDir()
- if err != nil {
- reload = true
- logger.Info("Unable to determine current user home directory, not using cache")
- } else {
- cacheDir = homeDir + "/.cache/arvados/costanalyzer/"
- err = ensureDirectory(logger, cacheDir)
- if err != nil {
- reload = true
- logger.Infof("Unable to create cache directory at %s, not using cache: %s", cacheDir, err.Error())
- } else {
- reload = loadCachedObject(logger, cacheDir+file, uuid, object)
- }
- }
- }
- if !reload {
- return
- }
-
- if strings.Contains(uuid, "-j7d0g-") {
- err = ac.RequestAndDecode(&object, "GET", "arvados/v1/groups/"+uuid, nil, nil)
- } else if strings.Contains(uuid, "-xvhdp-") {
- err = ac.RequestAndDecode(&object, "GET", "arvados/v1/container_requests/"+uuid, nil, nil)
- } else if strings.Contains(uuid, "-dz642-") {
- err = ac.RequestAndDecode(&object, "GET", "arvados/v1/containers/"+uuid, nil, nil)
- } else if strings.Contains(uuid, "-4zz18-") {
- err = ac.RequestAndDecode(&object, "GET", "arvados/v1/collections/"+uuid, nil, nil)
- } else {
- err = fmt.Errorf("unsupported object type with UUID %q:\n %s", uuid, err)
- return
- }
- if err != nil {
- err = fmt.Errorf("error loading object with UUID %q:\n %s", uuid, err)
- return
- }
- encoded, err := json.MarshalIndent(object, "", " ")
- if err != nil {
- err = fmt.Errorf("error marshaling object with UUID %q:\n %s", uuid, err)
- return
- }
- if cacheDir != "" {
- err = ioutil.WriteFile(cacheDir+file, encoded, 0644)
- if err != nil {
- err = fmt.Errorf("error writing file %s:\n %s", file, err)
- return
- }
- }
- return
-}
-
-func getNode(arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, cr arvados.ContainerRequest) (node nodeInfo, err error) {
- if cr.LogUUID == "" {
- err = errors.New("no log collection")
- return
- }
-
- var collection arvados.Collection
- err = ac.RequestAndDecode(&collection, "GET", "arvados/v1/collections/"+cr.LogUUID, nil, nil)
- if err != nil {
- err = fmt.Errorf("error getting collection: %s", err)
- return
- }
-
- var fs arvados.CollectionFileSystem
- fs, err = collection.FileSystem(ac, kc)
- if err != nil {
- err = fmt.Errorf("error opening collection as filesystem: %s", err)
- return
- }
- var f http.File
- f, err = fs.Open("node.json")
- if err != nil {
- err = fmt.Errorf("error opening file 'node.json' in collection %s: %s", cr.LogUUID, err)
- return
- }
-
- err = json.NewDecoder(f).Decode(&node)
- if err != nil {
- err = fmt.Errorf("error reading file 'node.json' in collection %s: %s", cr.LogUUID, err)
- return
- }
- return
-}
-
-func getContainerRequests(ac *arvados.Client, filters []arvados.Filter) ([]arvados.ContainerRequest, error) {
- var allItems []arvados.ContainerRequest
- for {
- pagefilters := append([]arvados.Filter(nil), filters...)
- if len(allItems) > 0 {
- pagefilters = append(pagefilters, arvados.Filter{
- Attr: "uuid",
- Operator: ">",
- Operand: allItems[len(allItems)-1].UUID,
- })
- }
- var resp arvados.ContainerRequestList
- err := ac.RequestAndDecode(&resp, "GET", "arvados/v1/container_requests", nil, arvados.ResourceListParams{
- Filters: pagefilters,
- Limit: &pagesize,
- Order: "uuid",
- Count: "none",
- })
- if err != nil {
- return nil, fmt.Errorf("error querying container_requests: %w", err)
- }
- if len(resp.Items) == 0 {
- // no more pages
- return allItems, nil
- }
- allItems = append(allItems, resp.Items...)
- }
-}
-
-func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, resultsDir string, cache bool) (cost map[string]consumption, err error) {
- cost = make(map[string]consumption)
-
- var project arvados.Group
- err = loadObject(logger, ac, uuid, uuid, cache, &project)
- if err != nil {
- return nil, fmt.Errorf("error loading object %s: %s", uuid, err.Error())
- }
- allItems, err := getContainerRequests(ac, []arvados.Filter{
- {
- Attr: "owner_uuid",
- Operator: "=",
- Operand: project.UUID,
- },
- {
- Attr: "requesting_container_uuid",
- Operator: "=",
- Operand: nil,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
- }
- if len(allItems) == 0 {
- logger.Infof("No top level container requests found in project %s", uuid)
- return
- }
- logger.Infof("Collecting top level container requests in project %s", uuid)
- for _, cr := range allItems {
- crInfo, err := generateCrInfo(logger, cr.UUID, arv, ac, kc, resultsDir, cache)
- if err != nil {
- return nil, fmt.Errorf("error generating container_request CSV for %s: %s", cr.UUID, err)
- }
- for k, v := range crInfo {
- cost[k] = v
- }
- }
- return
-}
-
-func generateCrInfo(logger *logrus.Logger, uuid string, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, resultsDir string, cache bool) (cost map[string]consumption, err error) {
-
- cost = make(map[string]consumption)
-
- csv := "CR UUID,CR name,Container UUID,State,Started At,Finished At,Duration in seconds,Compute node type,Preemptible,Hourly node cost,Total cost\n"
- var tmpCsv string
- var total, tmpTotal consumption
- logger.Debugf("Processing %s", uuid)
-
- var crUUID = uuid
- if strings.Contains(uuid, "-4zz18-") {
- // This is a collection, find the associated container request (if any)
- var c arvados.Collection
- err = loadObject(logger, ac, uuid, uuid, cache, &c)
- if err != nil {
- return nil, fmt.Errorf("error loading collection object %s: %s", uuid, err)
- }
- value, ok := c.Properties["container_request"]
- if !ok {
- return nil, fmt.Errorf("error: collection %s does not have a 'container_request' property", uuid)
- }
- crUUID, ok = value.(string)
- if !ok {
- return nil, fmt.Errorf("error: collection %s does not have a 'container_request' property of the string type", uuid)
- }
- }
-
- // This is a container request, find the container
- var cr arvados.ContainerRequest
- err = loadObject(logger, ac, crUUID, crUUID, cache, &cr)
- if err != nil {
- return nil, fmt.Errorf("error loading cr object %s: %s", uuid, err)
- }
- if len(cr.ContainerUUID) == 0 {
- // Nothing to do! E.g. a CR in 'Uncommitted' state.
- logger.Infof("No container associated with container request %s, skipping", crUUID)
- return nil, nil
- }
- var container arvados.Container
- err = loadObject(logger, ac, crUUID, cr.ContainerUUID, cache, &container)
- if err != nil {
- return nil, fmt.Errorf("error loading container object %s: %s", cr.ContainerUUID, err)
- }
-
- topNode, err := getNode(arv, ac, kc, cr)
- if err != nil {
- logger.Errorf("Skipping container request %s: error getting node %s: %s", cr.UUID, cr.UUID, err)
- return nil, nil
- }
- tmpCsv, total = addContainerLine(logger, topNode, cr, container)
- csv += tmpCsv
- cost[container.UUID] = total
-
- // Find all container requests that have the container we
- // found above as requesting_container_uuid.
- allItems, err := getContainerRequests(ac, []arvados.Filter{{
- Attr: "requesting_container_uuid",
- Operator: "=",
- Operand: container.UUID,
- }})
- logger.Infof("Looking up %d child containers for container %s (%s)", len(allItems), container.UUID, container.FinishedAt)
- progressTicker := time.NewTicker(5 * time.Second)
- defer progressTicker.Stop()
- for i, cr2 := range allItems {
- select {
- case <-progressTicker.C:
- logger.Infof("... %d of %d", i+1, len(allItems))
- default:
- }
- node, err := getNode(arv, ac, kc, cr2)
- if err != nil {
- logger.Errorf("Skipping container request %s: error getting node %s: %s", cr2.UUID, cr2.UUID, err)
- continue
- }
- logger.Debug("Child container: " + cr2.ContainerUUID)
- var c2 arvados.Container
- err = loadObject(logger, ac, cr.UUID, cr2.ContainerUUID, cache, &c2)
- if err != nil {
- return nil, fmt.Errorf("error loading object %s: %s", cr2.ContainerUUID, err)
- }
- tmpCsv, tmpTotal = addContainerLine(logger, node, cr2, c2)
- cost[cr2.ContainerUUID] = tmpTotal
- csv += tmpCsv
- total.Add(tmpTotal)
- }
- logger.Debug("Done collecting child containers")
-
- csv += "TOTAL,,,,,," + strconv.FormatFloat(total.duration, 'f', 3, 64) + ",,,," + strconv.FormatFloat(total.cost, 'f', 2, 64) + "\n"
-
- if resultsDir != "" {
- // Write the resulting CSV file
- fName := resultsDir + "/" + crUUID + ".csv"
- err = ioutil.WriteFile(fName, []byte(csv), 0644)
- if err != nil {
- return nil, fmt.Errorf("error writing file with path %s: %s", fName, err.Error())
- }
- logger.Infof("\nUUID report in %s", fName)
- }
-
- return
-}
-
-func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger, stdout, stderr io.Writer) (exitcode int, err error) {
- var ok bool
- ok, exitcode = c.parseFlags(prog, args, logger, stderr)
- if !ok {
- return
- }
- if c.resultsDir != "" {
- err = ensureDirectory(logger, c.resultsDir)
- if err != nil {
- exitcode = 3
- return
- }
- }
-
- uuidChannel := make(chan string)
-
- // Arvados Client setup
- arv, err := arvadosclient.MakeArvadosClient()
- if err != nil {
- err = fmt.Errorf("error creating Arvados object: %s", err)
- exitcode = 1
- return
- }
- kc, err := keepclient.MakeKeepClient(arv)
- if err != nil {
- err = fmt.Errorf("error creating Keep object: %s", err)
- exitcode = 1
- return
- }
-
- ac := arvados.NewClientFromEnv()
-
- // Populate uuidChannel with the requested uuid list
- go func() {
- defer close(uuidChannel)
- for _, uuid := range c.uuids {
- uuidChannel <- uuid
- }
-
- if !c.begin.IsZero() {
- initialParams := arvados.ResourceListParams{
- Filters: []arvados.Filter{{"container.finished_at", ">=", c.begin}, {"container.finished_at", "<", c.end}, {"requesting_container_uuid", "=", nil}},
- Order: "created_at",
- }
- params := initialParams
- for {
- // This list variable must be a new one declared
- // inside the loop: otherwise, items in the API
- // response would get deep-merged into the items
- // loaded in previous iterations.
- var list arvados.ContainerRequestList
-
- err := ac.RequestAndDecode(&list, "GET", "arvados/v1/container_requests", nil, params)
- if err != nil {
- logger.Errorf("Error getting container request list from Arvados API: %s", err)
- break
- }
- if len(list.Items) == 0 {
- break
- }
-
- for _, i := range list.Items {
- uuidChannel <- i.UUID
- }
- params.Offset += len(list.Items)
- }
-
- }
- }()
-
- cost := make(map[string]consumption)
-
- for uuid := range uuidChannel {
- logger.Debugf("Considering %s", uuid)
- if strings.Contains(uuid, "-j7d0g-") {
- // This is a project (group)
- cost, err = handleProject(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
- if err != nil {
- exitcode = 1
- return
- }
- for k, v := range cost {
- cost[k] = v
- }
- } else if strings.Contains(uuid, "-xvhdp-") || strings.Contains(uuid, "-4zz18-") {
- // This is a container request or collection
- var crInfo map[string]consumption
- crInfo, err = generateCrInfo(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
- if err != nil {
- err = fmt.Errorf("error generating CSV for uuid %s: %s", uuid, err.Error())
- exitcode = 2
- return
- }
- for k, v := range crInfo {
- cost[k] = v
- }
- } else if strings.Contains(uuid, "-tpzed-") {
- // This is a user. The "Home" project for a user is not a real project.
- // It is identified by the user uuid. As such, cost analysis for the
- // "Home" project is not supported by this program. Skip this uuid, but
- // keep going.
- logger.Errorf("cost analysis is not supported for the 'Home' project: %s", uuid)
- } else {
- logger.Errorf("this argument does not look like a uuid: %s", uuid)
- exitcode = 3
- return
- }
- }
-
- if len(cost) == 0 {
- logger.Info("Nothing to do!")
- return
- }
-
- var csv string
-
- csv = "# Aggregate cost accounting for uuids:\n# UUID, Duration in seconds, Total cost\n"
- for _, uuid := range c.uuids {
- csv += "# " + uuid + "\n"
- }
-
- var total consumption
- for k, v := range cost {
- csv += k + "," + strconv.FormatFloat(v.duration, 'f', 3, 64) + "," + strconv.FormatFloat(v.cost, 'f', 8, 64) + "\n"
- total.Add(v)
- }
-
- csv += "TOTAL," + strconv.FormatFloat(total.duration, 'f', 3, 64) + "," + strconv.FormatFloat(total.cost, 'f', 2, 64) + "\n"
-
- if c.resultsDir != "" {
- // Write the resulting CSV file
- aFile := c.resultsDir + "/" + time.Now().Format("2006-01-02-15-04-05") + "-aggregate-costaccounting.csv"
- err = ioutil.WriteFile(aFile, []byte(csv), 0644)
- if err != nil {
- err = fmt.Errorf("error writing file with path %s: %s", aFile, err.Error())
- exitcode = 1
- return
- }
- logger.Infof("Aggregate cost accounting for all supplied uuids in %s", aFile)
- }
-
- // Output the total dollar amount on stdout
- fmt.Fprintf(stdout, "%s\n", strconv.FormatFloat(total.cost, 'f', 2, 64))
-
- return
-}
diff --git a/lib/costanalyzer/costanalyzer_test.go b/lib/costanalyzer/costanalyzer_test.go
deleted file mode 100644
index 1054870add..0000000000
--- a/lib/costanalyzer/costanalyzer_test.go
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package costanalyzer
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "os"
- "regexp"
- "testing"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
- "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) {
- check.TestingT(t)
-}
-
-var _ = check.Suite(&Suite{})
-
-type Suite struct{}
-
-func (s *Suite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the database so they don't affect subsequent tests.
- arvadostest.ResetEnv()
-}
-
-func (s *Suite) SetUpSuite(c *check.C) {
- arvadostest.StartKeep(2, true)
-
- // Use a small page size to exercise paging without adding
- // lots of fixtures
- pagesize = 2
-
- // Get the various arvados, arvadosclient, and keep client objects
- ac := arvados.NewClientFromEnv()
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, check.Equals, nil)
- arv.ApiToken = arvadostest.ActiveToken
- kc, err := keepclient.MakeKeepClient(arv)
- c.Assert(err, check.Equals, nil)
-
- standardE4sV3JSON := `{
- "Name": "Standard_E4s_v3",
- "ProviderType": "Standard_E4s_v3",
- "VCPUs": 4,
- "RAM": 34359738368,
- "Scratch": 64000000000,
- "IncludedScratch": 64000000000,
- "AddedScratch": 0,
- "Price": 0.292,
- "Preemptible": true
-}`
- standardD32sV3JSON := `{
- "Name": "Standard_D32s_v3",
- "ProviderType": "Standard_D32s_v3",
- "VCPUs": 32,
- "RAM": 137438953472,
- "Scratch": 256000000000,
- "IncludedScratch": 256000000000,
- "AddedScratch": 0,
- "Price": 1.76,
- "Preemptible": false
-}`
-
- standardA1V2JSON := `{
- "Name": "a1v2",
- "ProviderType": "Standard_A1_v2",
- "VCPUs": 1,
- "RAM": 2147483648,
- "Scratch": 10000000000,
- "IncludedScratch": 10000000000,
- "AddedScratch": 0,
- "Price": 0.043,
- "Preemptible": false
-}`
-
- standardA2V2JSON := `{
- "Name": "a2v2",
- "ProviderType": "Standard_A2_v2",
- "VCPUs": 2,
- "RAM": 4294967296,
- "Scratch": 20000000000,
- "IncludedScratch": 20000000000,
- "AddedScratch": 0,
- "Price": 0.091,
- "Preemptible": false
-}`
-
- legacyD1V2JSON := `{
- "properties": {
- "cloud_node": {
- "price": 0.073001,
- "size": "Standard_D1_v2"
- },
- "total_cpu_cores": 1,
- "total_ram_mb": 3418,
- "total_scratch_mb": 51170
- }
-}`
-
- // Our fixtures do not actually contain file contents. Populate the log collections we're going to use with the node.json file
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedContainerRequestUUID, arvadostest.LogCollectionUUID, standardE4sV3JSON)
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedContainerRequestUUID2, arvadostest.LogCollectionUUID2, standardD32sV3JSON)
-
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedDiagnosticsContainerRequest1UUID, arvadostest.DiagnosticsContainerRequest1LogCollectionUUID, standardA1V2JSON)
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedDiagnosticsContainerRequest2UUID, arvadostest.DiagnosticsContainerRequest2LogCollectionUUID, standardA1V2JSON)
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedDiagnosticsHasher1ContainerRequestUUID, arvadostest.Hasher1LogCollectionUUID, standardA1V2JSON)
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedDiagnosticsHasher2ContainerRequestUUID, arvadostest.Hasher2LogCollectionUUID, standardA2V2JSON)
- createNodeJSON(c, arv, ac, kc, arvadostest.CompletedDiagnosticsHasher3ContainerRequestUUID, arvadostest.Hasher3LogCollectionUUID, legacyD1V2JSON)
-}
-
-func createNodeJSON(c *check.C, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, crUUID string, logUUID string, nodeJSON string) {
- // Get the CR
- var cr arvados.ContainerRequest
- err := ac.RequestAndDecode(&cr, "GET", "arvados/v1/container_requests/"+crUUID, nil, nil)
- c.Assert(err, check.Equals, nil)
- c.Assert(cr.LogUUID, check.Equals, logUUID)
-
- // Get the log collection
- var coll arvados.Collection
- err = ac.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+cr.LogUUID, nil, nil)
- c.Assert(err, check.IsNil)
-
- // Create a node.json file -- the fixture doesn't actually contain the contents of the collection.
- fs, err := coll.FileSystem(ac, kc)
- c.Assert(err, check.IsNil)
- f, err := fs.OpenFile("node.json", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0777)
- c.Assert(err, check.IsNil)
- _, err = io.WriteString(f, nodeJSON)
- c.Assert(err, check.IsNil)
- err = f.Close()
- c.Assert(err, check.IsNil)
-
- // Flush the data to Keep
- mtxt, err := fs.MarshalManifest(".")
- c.Assert(err, check.IsNil)
- c.Assert(mtxt, check.NotNil)
-
- // Update collection record
- err = ac.RequestAndDecode(&coll, "PUT", "arvados/v1/collections/"+cr.LogUUID, nil, map[string]interface{}{
- "collection": map[string]interface{}{
- "manifest_text": mtxt,
- },
- })
- c.Assert(err, check.IsNil)
-}
-
-func (*Suite) TestUsage(c *check.C) {
- var stdout, stderr bytes.Buffer
- exitcode := Command.RunCommand("costanalyzer.test", []string{"-help", "-log-level=debug"}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Check(stdout.String(), check.Equals, "")
- c.Check(stderr.String(), check.Matches, `(?ms).*Usage:.*`)
-}
-
-func (*Suite) TestTimestampRange(c *check.C) {
- var stdout, stderr bytes.Buffer
- resultsDir := c.MkDir()
- // Run costanalyzer with a timestamp range. This should pick up two container requests in "Final" state.
- exitcode := Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, "-begin", "2020-11-02T00:00:00", "-end", "2020-11-03T23:59:00"}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedDiagnosticsContainerRequest1UUID + ".csv")
- c.Assert(err, check.IsNil)
- uuid2Report, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedDiagnosticsContainerRequest2UUID + ".csv")
- c.Assert(err, check.IsNil)
-
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,763.467,,,,0.01")
- c.Check(string(uuid2Report), check.Matches, "(?ms).*TOTAL,,,,,,488.775,,,,0.01")
- re := regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches := re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err := ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,1245.564,0.01")
-}
-
-func (*Suite) TestContainerRequestUUID(c *check.C) {
- var stdout, stderr bytes.Buffer
- resultsDir := c.MkDir()
- // Run costanalyzer with 1 container request uuid
- exitcode := Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, arvadostest.CompletedContainerRequestUUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID + ".csv")
- c.Assert(err, check.IsNil)
- // Make sure the 'preemptible' flag was picked up
- c.Check(string(uuidReport), check.Matches, "(?ms).*,Standard_E4s_v3,true,.*")
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,7.01")
- re := regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches := re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err := ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,86462.000,7.01")
-}
-
-func (*Suite) TestCollectionUUID(c *check.C) {
- var stdout, stderr bytes.Buffer
- resultsDir := c.MkDir()
-
- // Create a collection with no container_request property
- ac := arvados.NewClientFromEnv()
- var coll arvados.Collection
- err := ac.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, nil)
- c.Assert(err, check.IsNil)
-
- exitcode := Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, coll.UUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 2)
- c.Assert(stderr.String(), check.Matches, "(?ms).*does not have a 'container_request' property.*")
-
- stdout.Truncate(0)
- stderr.Truncate(0)
-
- // Add a container_request property
- err = ac.RequestAndDecode(&coll, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
- "collection": map[string]interface{}{
- "properties": map[string]interface{}{
- "container_request": arvadostest.CompletedContainerRequestUUID,
- },
- },
- })
- c.Assert(err, check.IsNil)
-
- // Re-run costanalyzer on the updated collection
- resultsDir = c.MkDir()
- exitcode = Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, coll.UUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,7.01")
- re := regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches := re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err := ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,86462.000,7.01")
-}
-
-func (*Suite) TestDoubleContainerRequestUUID(c *check.C) {
- var stdout, stderr bytes.Buffer
- resultsDir := c.MkDir()
- // Run costanalyzer with 2 container request uuids
- exitcode := Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, arvadostest.CompletedContainerRequestUUID, arvadostest.CompletedContainerRequestUUID2}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,7.01")
-
- uuidReport2, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID2 + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport2), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,42.27")
-
- re := regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches := re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err := ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,172924.000,49.28")
- stdout.Truncate(0)
- stderr.Truncate(0)
-
- // Now move both container requests into an existing project, and then re-run
- // the analysis with the project uuid. The results should be identical.
- ac := arvados.NewClientFromEnv()
- var cr arvados.ContainerRequest
- err = ac.RequestAndDecode(&cr, "PUT", "arvados/v1/container_requests/"+arvadostest.CompletedContainerRequestUUID, nil, map[string]interface{}{
- "container_request": map[string]interface{}{
- "owner_uuid": arvadostest.AProjectUUID,
- },
- })
- c.Assert(err, check.IsNil)
- err = ac.RequestAndDecode(&cr, "PUT", "arvados/v1/container_requests/"+arvadostest.CompletedContainerRequestUUID2, nil, map[string]interface{}{
- "container_request": map[string]interface{}{
- "owner_uuid": arvadostest.AProjectUUID,
- },
- })
- c.Assert(err, check.IsNil)
-
- // Run costanalyzer with the project uuid
- resultsDir = c.MkDir()
- exitcode = Command.RunCommand("costanalyzer.test", []string{"-cache=false", "-log-level", "debug", "-output", resultsDir, arvadostest.AProjectUUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err = ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,7.01")
-
- uuidReport2, err = ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedContainerRequestUUID2 + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport2), check.Matches, "(?ms).*TOTAL,,,,,,86462.000,,,,42.27")
-
- re = regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches = re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err = ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,172924.000,49.28")
-}
-
-func (*Suite) TestUncommittedContainerRequest(c *check.C) {
- var stdout, stderr bytes.Buffer
- // Run costanalyzer with 2 container request uuids, one of which is in the Uncommitted state, without output directory specified
- exitcode := Command.RunCommand("costanalyzer.test", []string{arvadostest.UncommittedContainerRequestUUID, arvadostest.CompletedDiagnosticsContainerRequest2UUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Not(check.Matches), "(?ms).*supplied uuids in .*")
- c.Assert(stderr.String(), check.Matches, "(?ms).*No container associated with container request .*")
-
- // Check that the total amount was printed to stdout
- c.Check(stdout.String(), check.Matches, "0.01\n")
-}
-
-func (*Suite) TestMultipleContainerRequestUUIDWithReuse(c *check.C) {
- var stdout, stderr bytes.Buffer
- // Run costanalyzer with 2 container request uuids, without output directory specified
- exitcode := Command.RunCommand("costanalyzer.test", []string{arvadostest.CompletedDiagnosticsContainerRequest1UUID, arvadostest.CompletedDiagnosticsContainerRequest2UUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Not(check.Matches), "(?ms).*supplied uuids in .*")
-
- // Check that the total amount was printed to stdout
- c.Check(stdout.String(), check.Matches, "0.01\n")
-
- stdout.Truncate(0)
- stderr.Truncate(0)
-
- // Run costanalyzer with 2 container request uuids
- resultsDir := c.MkDir()
- exitcode = Command.RunCommand("costanalyzer.test", []string{"-output", resultsDir, arvadostest.CompletedDiagnosticsContainerRequest1UUID, arvadostest.CompletedDiagnosticsContainerRequest2UUID}, &bytes.Buffer{}, &stdout, &stderr)
- c.Check(exitcode, check.Equals, 0)
- c.Assert(stderr.String(), check.Matches, "(?ms).*supplied uuids in .*")
-
- uuidReport, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedDiagnosticsContainerRequest1UUID + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport), check.Matches, "(?ms).*TOTAL,,,,,,763.467,,,,0.01")
-
- uuidReport2, err := ioutil.ReadFile(resultsDir + "/" + arvadostest.CompletedDiagnosticsContainerRequest2UUID + ".csv")
- c.Assert(err, check.IsNil)
- c.Check(string(uuidReport2), check.Matches, "(?ms).*TOTAL,,,,,,488.775,,,,0.01")
-
- re := regexp.MustCompile(`(?ms).*supplied uuids in (.*?)\n`)
- matches := re.FindStringSubmatch(stderr.String()) // matches[1] contains a string like 'results/2020-11-02-18-57-45-aggregate-costaccounting.csv'
-
- aggregateCostReport, err := ioutil.ReadFile(matches[1])
- c.Assert(err, check.IsNil)
-
- c.Check(string(aggregateCostReport), check.Matches, "(?ms).*TOTAL,1245.564,0.01")
-}
diff --git a/lib/crunchrun/cgroup.go b/lib/crunchrun/cgroup.go
index a722e5f142..e382ac101c 100644
--- a/lib/crunchrun/cgroup.go
+++ b/lib/crunchrun/cgroup.go
@@ -6,8 +6,14 @@ package crunchrun
import (
"bytes"
+ "errors"
"fmt"
"io/fs"
+ "os"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "sync"
)
// Return the current process's cgroup for the given subsystem.
@@ -46,3 +52,108 @@ func findCgroup(fsys fs.FS, subsystem string) (string, error) {
}
return "", fmt.Errorf("subsystem %q not found in /proc/self/cgroup", subsystem)
}
+
+var (
+ // After calling checkCgroupSupport, cgroupSupport indicates
+ // support for singularity resource limits.
+ //
+ // E.g., cgroupSupport["memory"]==true if systemd is installed
+ // and configured such that singularity can use the "memory"
+ // cgroup controller to set resource limits.
+ cgroupSupport map[string]bool
+ cgroupSupportLock sync.Mutex
+)
+
+// checkCgroupSupport should be called before looking up strings like
+// "memory" and "cpu" in cgroupSupport.
+func checkCgroupSupport(logf func(string, ...interface{})) {
+ cgroupSupportLock.Lock()
+ defer cgroupSupportLock.Unlock()
+ if cgroupSupport != nil {
+ return
+ }
+ cgroupSupport = make(map[string]bool)
+ if os.Getuid() != 0 {
+ xrd := os.Getenv("XDG_RUNTIME_DIR")
+ if xrd == "" || os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
+ logf("not running as root, and empty XDG_RUNTIME_DIR or DBUS_SESSION_BUS_ADDRESS -- singularity resource limits are not supported")
+ return
+ }
+ if fi, err := os.Stat(xrd + "/systemd"); err != nil || !fi.IsDir() {
+ logf("not running as root, and %s/systemd is not a directory -- singularity resource limits are not supported", xrd)
+ return
+ }
+ version, err := exec.Command("systemd-run", "--version").CombinedOutput()
+ if match := regexp.MustCompile(`^systemd (\d+)`).FindSubmatch(version); err != nil || match == nil {
+ logf("not running as root, and could not get systemd version -- singularity resource limits are not supported")
+ return
+ } else if v, _ := strconv.ParseInt(string(match[1]), 10, 64); v < 224 {
+ logf("not running as root, and systemd version %s < minimum 224 -- singularity resource limits are not supported", match[1])
+ return
+ }
+ }
+ mount, err := cgroupMount()
+ if err != nil {
+ if os.Getuid() == 0 && checkCgroup1Support(os.DirFS("/"), logf) {
+ // If running as root, singularity also
+ // supports cgroups v1.
+ return
+ }
+ logf("no cgroup support: %s", err)
+ return
+ }
+ cgroup, err := findCgroup(os.DirFS("/"), "")
+ if err != nil {
+ logf("cannot find cgroup: %s", err)
+ return
+ }
+ controllers, err := os.ReadFile(mount + cgroup + "/cgroup.controllers")
+ if err != nil {
+ logf("cannot read cgroup.controllers file: %s", err)
+ return
+ }
+ for _, controller := range bytes.Split(bytes.TrimRight(controllers, "\n"), []byte{' '}) {
+ cgroupSupport[string(controller)] = true
+ }
+ if !cgroupSupport["memory"] && !cgroupSupport["cpu"] && os.Getuid() == 0 {
+ // On a system running in "unified" mode, the
+ // controllers we need might be mounted under the v1
+ // hierarchy, in which case we will not have seen them
+ // in the cgroup2 mount, but (if running as root)
+ // singularity can use them through v1. See #22185.
+ checkCgroup1Support(os.DirFS("/"), logf)
+ }
+}
+
+// Check for legacy cgroups v1 support. Caller must have
+// cgroupSupportLock.
+func checkCgroup1Support(fsys fs.FS, logf func(string, ...interface{})) bool {
+ cgroup, err := fs.ReadFile(fsys, "proc/self/cgroup")
+ if err != nil {
+ logf("%s", err)
+ return false
+ }
+ for _, line := range bytes.Split(cgroup, []byte{'\n'}) {
+ if toks := bytes.SplitN(line, []byte{':'}, 3); len(toks) == 3 && len(toks[1]) > 0 {
+ for _, controller := range bytes.Split(toks[1], []byte{','}) {
+ cgroupSupport[string(controller)] = true
+ }
+ }
+ }
+ return true
+}
+
+// Return the cgroup2 mount point, typically "/sys/fs/cgroup".
+func cgroupMount() (string, error) {
+ mounts, err := os.ReadFile("/proc/mounts")
+ if err != nil {
+ return "", err
+ }
+ for _, mount := range bytes.Split(mounts, []byte{'\n'}) {
+ toks := bytes.Split(mount, []byte{' '})
+ if len(toks) > 2 && bytes.Equal(toks[0], []byte("cgroup2")) {
+ return string(toks[1]), nil
+ }
+ }
+ return "", errors.New("cgroup2 mount not found")
+}
diff --git a/lib/crunchrun/cgroup_test.go b/lib/crunchrun/cgroup_test.go
index a1acb6fb92..22cda8506c 100644
--- a/lib/crunchrun/cgroup_test.go
+++ b/lib/crunchrun/cgroup_test.go
@@ -10,6 +10,7 @@ import (
"os/exec"
"strings"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
)
@@ -72,3 +73,60 @@ func (s *CgroupSuite) TestFindCgroup(c *C) {
}
}
}
+
+func (s *CgroupSuite) TestCgroupSupport(c *C) {
+ var logbuf bytes.Buffer
+ logger := logrus.New()
+ logger.Out = &logbuf
+ checkCgroupSupport(logger.Printf)
+ c.Check(logbuf.String(), Equals, "")
+ c.Check(cgroupSupport, NotNil)
+ c.Check(cgroupSupport["memory"], Equals, true)
+ c.Check(cgroupSupport["entropy"], Equals, false)
+}
+
+func (s *CgroupSuite) TestCgroup1Support(c *C) {
+ defer func() {
+ // Reset global state. Other tests need to re-check
+ // the real system config instead of using the results
+ // from our fake /proc/self/cgroup.
+ cgroupSupport = nil
+ }()
+ tmpdir := c.MkDir()
+ err := os.MkdirAll(tmpdir+"/proc/self", 0777)
+ c.Assert(err, IsNil)
+ err = os.WriteFile(tmpdir+"/proc/self/cgroup", []byte(`12:blkio:/user.slice
+11:perf_event:/
+10:freezer:/
+9:pids:/user.slice/user-1000.slice/session-5.scope
+8:hugetlb:/
+7:rdma:/
+6:cpu,cpuacct:/user.slice
+5:devices:/user.slice
+4:memory:/user.slice/user-1000.slice/session-5.scope
+3:net_cls,net_prio:/
+2:cpuset:/
+1:name=systemd:/user.slice/user-1000.slice/session-5.scope
+0::/user.slice/user-1000.slice/session-5.scope
+`), 0777)
+ c.Assert(err, IsNil)
+ cgroupSupport = map[string]bool{}
+ ok := checkCgroup1Support(os.DirFS(tmpdir), c.Logf)
+ c.Check(ok, Equals, true)
+ c.Check(cgroupSupport, DeepEquals, map[string]bool{
+ "blkio": true,
+ "cpu": true,
+ "cpuacct": true,
+ "cpuset": true,
+ "devices": true,
+ "freezer": true,
+ "hugetlb": true,
+ "memory": true,
+ "name=systemd": true,
+ "net_cls": true,
+ "net_prio": true,
+ "perf_event": true,
+ "pids": true,
+ "rdma": true,
+ })
+}
diff --git a/lib/crunchrun/container_gateway.go b/lib/crunchrun/container_gateway.go
index 5b68e2c50e..1ff7b142f5 100644
--- a/lib/crunchrun/container_gateway.go
+++ b/lib/crunchrun/container_gateway.go
@@ -168,6 +168,15 @@ func (gw *Gateway) Start() error {
TLSConfig: &tls.Config{
Certificates: []tls.Certificate{cert},
},
+ // Typically the client is arvados-controller,
+ // which disables keep-alive, so we mostly
+ // don't rely on IdleTimeout. But in general,
+ // these timeouts prevent abandoned open
+ // sockets from piling up if client
+ // connections don't get terminated properly
+ // (e.g., network mishap).
+ IdleTimeout: time.Minute,
+ ReadHeaderTimeout: time.Minute,
},
Addr: net.JoinHostPort(listenHost, extPort),
}
@@ -205,6 +214,7 @@ func (gw *Gateway) Start() error {
func (gw *Gateway) maintainTunnel(addr string) {
for ; ; time.Sleep(5 * time.Second) {
err := gw.runTunnel(addr)
+ // Note: err is never nil here, see runTunnel comment.
gw.Log.Printf("runTunnel: %s", err)
}
}
@@ -212,6 +222,10 @@ func (gw *Gateway) maintainTunnel(addr string) {
// runTunnel connects to controller and sets up a tunnel through
// which controller can connect to the gateway server at the given
// addr.
+//
+// runTunnel aims to run forever (i.e., until the current process
+// exits). If it returns at all, it returns a non-nil error indicating
+// why the tunnel was shut down.
func (gw *Gateway) runTunnel(addr string) error {
ctx := auth.NewContext(context.Background(), auth.NewCredentials(gw.ArvadosClient.AuthToken))
arpc := rpc.NewConn("", &url.URL{Scheme: "https", Host: gw.ArvadosClient.APIHost}, gw.ArvadosClient.Insecure, rpc.PassthroughTokenProvider)
@@ -234,7 +248,6 @@ func (gw *Gateway) runTunnel(addr string) error {
if err != nil {
return err
}
- gw.Log.Printf("tunnel connection %d started", muxconn.StreamID())
go func() {
defer muxconn.Close()
gwconn, err := net.Dial("tcp", addr)
@@ -262,7 +275,6 @@ func (gw *Gateway) runTunnel(addr string) error {
muxconn.Close()
}()
wg.Wait()
- gw.Log.Printf("tunnel connection %d finished", muxconn.StreamID())
}()
}
}
@@ -274,7 +286,7 @@ var webdavMethod = map[string]bool{
}
func (gw *Gateway) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- w.Header().Set("Vary", "X-Arvados-Authorization, X-Arvados-Container-Gateway-Uuid, X-Webdav-Prefix, X-Webdav-Source")
+ w.Header().Set("Vary", "X-Arvados-Authorization, X-Arvados-Container-Gateway-Uuid, X-Arvados-Container-Target-Port, X-Webdav-Prefix, X-Webdav-Source")
reqUUID := req.Header.Get("X-Arvados-Container-Gateway-Uuid")
if reqUUID == "" {
// older controller versions only send UUID as query param
@@ -292,13 +304,20 @@ func (gw *Gateway) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("X-Arvados-Authorization-Response", gw.respondAuth)
switch {
case req.Method == "POST" && req.Header.Get("Upgrade") == "ssh":
+ // SSH tunnel from
+ // (*lib/controller/localdb.Conn)ContainerSSH()
gw.handleSSH(w, req)
case req.Header.Get("X-Webdav-Source") == "/log":
+ // WebDAV request for container log data
if !webdavMethod[req.Method] {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
gw.handleLogsWebDAV(w, req)
+ case req.Header.Get("X-Arvados-Container-Target-Port") != "":
+ // HTTP forwarded through
+ // (*lib/controller/localdb.Conn)ContainerHTTPProxy()
+ gw.handleForwardedHTTP(w, req)
default:
http.Error(w, "path not found", http.StatusNotFound)
}
@@ -336,6 +355,47 @@ func (gw *Gateway) webdavLogger(r *http.Request, err error) {
}
}
+func (gw *Gateway) handleForwardedHTTP(w http.ResponseWriter, reqIn *http.Request) {
+ port := reqIn.Header.Get("X-Arvados-Container-Target-Port")
+ var host string
+ var err error
+ if gw.Target != nil {
+ host, err = gw.Target.IPAddress()
+ if err != nil {
+ http.Error(w, "container has no IP address: "+err.Error(), http.StatusServiceUnavailable)
+ return
+ }
+ }
+ if host == "" {
+ http.Error(w, "container has no IP address", http.StatusServiceUnavailable)
+ return
+ }
+ client := http.Client{
+ CheckRedirect: func(*http.Request, []*http.Request) error { return http.ErrUseLastResponse },
+ }
+ url := *reqIn.URL
+ url.Scheme = "http"
+ url.Host = net.JoinHostPort(host, port)
+ req, err := http.NewRequestWithContext(reqIn.Context(), reqIn.Method, url.String(), reqIn.Body)
+ req.Host = reqIn.Host
+ req.Header = reqIn.Header
+ req.Header.Del("X-Arvados-Container-Gateway-Uuid")
+ req.Header.Del("X-Arvados-Container-Target-Port")
+ req.Header.Del("X-Arvados-Authorization")
+ req.Header.Add("Via", "HTTP/1.1 arvados-crunch-run")
+ resp, err := client.Do(req)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ defer resp.Body.Close()
+ for k, v := range resp.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(resp.StatusCode)
+ io.Copy(w, resp.Body)
+}
+
// handleSSH connects to an SSH server that allows the caller to run
// interactive commands as root (or any other desired user) inside the
// container. The tunnel itself can only be created by an
@@ -346,7 +406,7 @@ func (gw *Gateway) webdavLogger(r *http.Request, err error) {
//
// Connection: upgrade
// Upgrade: ssh
-// X-Arvados-Target-Uuid: uuid of container
+// X-Arvados-Container-Gateway-Uuid: uuid of container
// X-Arvados-Authorization: must match
// hmac(AuthSecret,certfingerprint) (this prevents other containers
// and shell nodes from connecting directly)
@@ -364,14 +424,9 @@ func (gw *Gateway) handleSSH(w http.ResponseWriter, req *http.Request) {
if username == "" {
username = "root"
}
- hj, ok := w.(http.Hijacker)
- if !ok {
- http.Error(w, "ResponseWriter does not support connection upgrade", http.StatusInternalServerError)
- return
- }
- netconn, _, err := hj.Hijack()
- if !ok {
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ netconn, _, err := http.NewResponseController(w).Hijack()
+ if err != nil {
+ http.Error(w, "connection upgrade failed: "+err.Error(), http.StatusInternalServerError)
return
}
defer netconn.Close()
@@ -380,6 +435,7 @@ func (gw *Gateway) handleSSH(w http.ResponseWriter, req *http.Request) {
netconn.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n"))
w.Header().Write(netconn)
netconn.Write([]byte("\r\n"))
+ httpserver.ExemptFromDeadline(req)
ctx := req.Context()
diff --git a/lib/crunchrun/copier.go b/lib/crunchrun/copier.go
index a081c5d325..3fc25a34aa 100644
--- a/lib/crunchrun/copier.go
+++ b/lib/crunchrun/copier.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"os"
"path/filepath"
"sort"
@@ -16,7 +17,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/keepclient"
- "git.arvados.org/arvados.git/sdk/go/manifest"
+ "github.com/bmatcuk/doublestar/v4"
)
type printfer interface {
@@ -40,8 +41,8 @@ type filetodo struct {
// copied from the local filesystem.
//
// Symlinks to mounted collections, and any collections mounted under
-// ctrOutputDir, are copied by transforming the relevant parts of the
-// existing manifests, without moving any data around.
+// ctrOutputDir, are copied by reference, without moving any data
+// around.
//
// Symlinks to other parts of the container's filesystem result in
// errors.
@@ -54,34 +55,59 @@ type copier struct {
keepClient IKeepClient
hostOutputDir string
ctrOutputDir string
+ globs []string
bindmounts map[string]bindmount
mounts map[string]arvados.Mount
secretMounts map[string]arvados.Mount
logger printfer
- dirs []string
- files []filetodo
- manifest string
+ dirs []string
+ files []filetodo
+ staged arvados.CollectionFileSystem
- manifestCache map[string]*manifest.Manifest
+ manifestCache map[string]string
+
+ // tmpfs is the filesystem representation of the source
+ // collection that was most recently handled in
+ // copyFromCollection. This improves performance slightly in
+ // the special case where many mounts reference the same
+ // source collection.
+ tmpfs arvados.CollectionFileSystem
+ tmpfsManifestText string
}
// Copy copies data as needed, and returns a new manifest.
+//
+// Copy should not be called more than once.
func (cp *copier) Copy() (string, error) {
- err := cp.walkMount("", cp.ctrOutputDir, limitFollowSymlinks, true)
+ var err error
+ cp.staged, err = (&arvados.Collection{}).FileSystem(cp.client, cp.keepClient)
+ if err != nil {
+ return "", fmt.Errorf("error creating Collection.FileSystem: %v", err)
+ }
+ err = cp.walkMount("", cp.ctrOutputDir, limitFollowSymlinks, true)
if err != nil {
return "", fmt.Errorf("error scanning files to copy to output: %v", err)
}
- fs, err := (&arvados.Collection{ManifestText: cp.manifest}).FileSystem(cp.client, cp.keepClient)
+
+ // Remove files/dirs that don't match globs (the files/dirs
+ // that were added during cp.walkMount() by copying subtree
+ // manifests into cp.staged).
+ err = cp.applyGlobsToStaged()
if err != nil {
- return "", fmt.Errorf("error creating Collection.FileSystem: %v", err)
+ return "", fmt.Errorf("error while removing non-matching files from output collection: %w", err)
}
+ // Remove files/dirs that don't match globs (the files/dirs
+ // that are stored on the local filesystem and would need to
+ // be copied in copyFile() below).
+ cp.applyGlobsToFilesAndDirs()
for _, d := range cp.dirs {
- err = fs.Mkdir(d, 0777)
+ err = cp.staged.Mkdir(d, 0777)
if err != nil && err != os.ErrExist {
return "", fmt.Errorf("error making directory %q in output collection: %v", d, err)
}
}
+
var unflushed int64
var lastparentdir string
for _, f := range cp.files {
@@ -91,20 +117,184 @@ func (cp *copier) Copy() (string, error) {
// open so f's data can be packed with it).
dir, _ := filepath.Split(f.dst)
if dir != lastparentdir || unflushed > keepclient.BLOCKSIZE {
- if err := fs.Flush("/"+lastparentdir, dir != lastparentdir); err != nil {
+ if err := cp.staged.Flush("/"+lastparentdir, dir != lastparentdir); err != nil {
return "", fmt.Errorf("error flushing output collection file data: %v", err)
}
unflushed = 0
}
lastparentdir = dir
- n, err := cp.copyFile(fs, f)
+ n, err := cp.copyFile(cp.staged, f)
if err != nil {
return "", fmt.Errorf("error copying file %q into output collection: %v", f, err)
}
unflushed += n
}
- return fs.MarshalManifest(".")
+ return cp.staged.MarshalManifest(".")
+}
+
+func (cp *copier) matchGlobs(path string, isDir bool) bool {
+ // An entry in the top level of the output directory looks
+ // like "/foo", but globs look like "foo", so we strip the
+ // leading "/" before matching.
+ path = strings.TrimLeft(path, "/")
+ for _, glob := range cp.globs {
+ if !isDir && strings.HasSuffix(glob, "/**") {
+ // doublestar.Match("f*/**", "ff") and
+ // doublestar.Match("f*/**", "ff/gg") both
+ // return true, but (to be compatible with
+ // bash shopt) "ff" should match only if it is
+ // a directory.
+ //
+ // To avoid errant matches, we add the file's
+ // basename to the end of the pattern:
+ //
+ // Match("f*/**/ff", "ff") => false
+ // Match("f*/**/gg", "ff/gg") => true
+ //
+ // Of course, we need to escape basename in
+ // case it contains *, ?, \, etc.
+ _, name := filepath.Split(path)
+ escapedName := strings.TrimSuffix(strings.Replace(name, "", "\\", -1), "\\")
+ if match, _ := doublestar.Match(glob+"/"+escapedName, path); match {
+ return true
+ }
+ } else if match, _ := doublestar.Match(glob, path); match {
+ return true
+ } else if isDir {
+ // Workaround doublestar bug (v4.6.1).
+ // "foo*/**" should match "foo", but does not,
+ // because isZeroLengthPattern does not accept
+ // "*/**" as a zero length pattern.
+ if trunc := strings.TrimSuffix(glob, "*/**"); trunc != glob {
+ if match, _ := doublestar.Match(trunc, path); match {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// Delete entries from cp.files that do not match cp.globs.
+//
+// Delete entries from cp.dirs that do not match cp.globs.
+//
+// Ensure parent/ancestor directories of remaining cp.files and
+// cp.dirs entries are still present in cp.dirs, even if they do not
+// match cp.globs themselves.
+func (cp *copier) applyGlobsToFilesAndDirs() {
+ if len(cp.globs) == 0 {
+ return
+ }
+ keepdirs := make(map[string]bool)
+ for _, path := range cp.dirs {
+ if cp.matchGlobs(path, true) {
+ keepdirs[path] = true
+ }
+ }
+ for path := range keepdirs {
+ for i, c := range path {
+ if i > 0 && c == '/' {
+ keepdirs[path[:i]] = true
+ }
+ }
+ }
+ var keepfiles []filetodo
+ for _, file := range cp.files {
+ if cp.matchGlobs(file.dst, false) {
+ keepfiles = append(keepfiles, file)
+ }
+ }
+ for _, file := range keepfiles {
+ for i, c := range file.dst {
+ if i > 0 && c == '/' {
+ keepdirs[file.dst[:i]] = true
+ }
+ }
+ }
+ cp.dirs = nil
+ for path := range keepdirs {
+ cp.dirs = append(cp.dirs, path)
+ }
+ sort.Strings(cp.dirs)
+ cp.files = keepfiles
+}
+
+// Delete files in cp.staged that do not match cp.globs. Also delete
+// directories that are empty (after deleting non-matching files) and
+// do not match cp.globs themselves.
+func (cp *copier) applyGlobsToStaged() error {
+ if len(cp.globs) == 0 {
+ return nil
+ }
+ include := make(map[string]bool)
+ err := fs.WalkDir(arvados.FS(cp.staged), "", func(path string, ent fs.DirEntry, err error) error {
+ if cp.matchGlobs(path, ent.IsDir()) {
+ for i, c := range path {
+ if i > 0 && c == '/' {
+ include[path[:i]] = true
+ }
+ }
+ include[path] = true
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ err = fs.WalkDir(arvados.FS(cp.staged), "", func(path string, ent fs.DirEntry, err error) error {
+ if err != nil || path == "" {
+ return err
+ }
+ if !include[path] {
+ err := cp.staged.RemoveAll(path)
+ if err != nil {
+ return err
+ }
+ if ent.IsDir() {
+ return fs.SkipDir
+ }
+ }
+ return nil
+ })
+ return err
+}
+
+// Return true if it's possible for any descendant of the given path
+// to match anything in cp.globs. Used by walkMount to avoid loading
+// collections that are mounted underneath ctrOutputPath but excluded
+// by globs.
+func (cp *copier) subtreeCouldMatch(path string) bool {
+ if len(cp.globs) == 0 {
+ return true
+ }
+ pathdepth := 1 + strings.Count(path, "/")
+ for _, glob := range cp.globs {
+ globdepth := 0
+ lastsep := 0
+ for i, c := range glob {
+ if c != '/' || !doublestar.ValidatePattern(glob[:i]) {
+ // Escaped "/", or "/" in a character
+ // class, is not a path separator.
+ continue
+ }
+ if glob[lastsep:i] == "**" {
+ return true
+ }
+ lastsep = i + 1
+ if globdepth++; globdepth == pathdepth {
+ if match, _ := doublestar.Match(glob[:i]+"/*", path+"/z"); match {
+ return true
+ }
+ break
+ }
+ }
+ if globdepth < pathdepth && glob[lastsep:] == "**" {
+ return true
+ }
+ }
+ return false
}
func (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) (int64, error) {
@@ -127,7 +317,7 @@ func (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) (int64,
return n, dst.Close()
}
-// Append to cp.manifest, cp.files, and cp.dirs so as to copy src (an
+// Add to cp.staged, cp.files, and cp.dirs so as to copy src (an
// absolute path in the container's filesystem) to dest (an absolute
// path in the output collection, or "" for output root).
//
@@ -161,9 +351,8 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
// copy, relative to its mount point -- ".", "./foo.txt", ...
srcRelPath := filepath.Join(".", srcMount.Path, src[len(srcRoot):])
- // outputRelPath is the path relative in the output directory
- // that corresponds to the path in the output collection where
- // the file will go, for logging
+ // outputRelPath is the destination path relative to the
+ // output directory. Used for logging and glob matching.
var outputRelPath = ""
if strings.HasPrefix(src, cp.ctrOutputDir) {
outputRelPath = strings.TrimPrefix(src[len(cp.ctrOutputDir):], "/")
@@ -177,6 +366,9 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
switch {
case srcMount.ExcludeFromOutput:
+ case outputRelPath != "*" && !cp.subtreeCouldMatch(outputRelPath):
+ cp.logger.Printf("not copying %q because contents cannot match output globs", outputRelPath)
+ return nil
case srcMount.Kind == "tmp":
// Handle by walking the host filesystem.
return cp.walkHostFS(dest, src, maxSymlinks, walkMountsBelow)
@@ -188,7 +380,10 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
if err != nil {
return err
}
- cp.manifest += mft.Extract(srcRelPath, dest).Text
+ err = cp.copyFromCollection(dest, &arvados.Collection{ManifestText: mft}, srcRelPath)
+ if err != nil {
+ return err
+ }
default:
cp.logger.Printf("copying %q", outputRelPath)
hostRoot, err := cp.hostRoot(srcRoot)
@@ -205,15 +400,44 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
if err != nil {
return err
}
- mft := manifest.Manifest{Text: coll.ManifestText}
- cp.manifest += mft.Extract(srcRelPath, dest).Text
+ err = cp.copyFromCollection(dest, &coll, srcRelPath)
+ if err != nil {
+ return err
+ }
}
+ cp.tmpfs = nil
+ cp.tmpfsManifestText = ""
if walkMountsBelow {
return cp.walkMountsBelow(dest, src)
}
return nil
}
+func (cp *copier) copyFromCollection(dest string, coll *arvados.Collection, srcRelPath string) error {
+ if coll.ManifestText == "" || coll.ManifestText != cp.tmpfsManifestText {
+ tmpfs, err := coll.FileSystem(cp.client, cp.keepClient)
+ if err != nil {
+ return err
+ }
+ cp.tmpfs = tmpfs
+ cp.tmpfsManifestText = coll.ManifestText
+ }
+ snap, err := arvados.Snapshot(cp.tmpfs, srcRelPath)
+ if err != nil {
+ return err
+ }
+ // Create ancestors of dest, if necessary.
+ for i, c := range dest {
+ if i > 0 && c == '/' {
+ err = cp.staged.Mkdir(dest[:i], 0777)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ }
+ return arvados.Splice(cp.staged, dest, snap)
+}
+
func (cp *copier) walkMountsBelow(dest, src string) error {
for mnt, mntinfo := range cp.mounts {
if !strings.HasPrefix(mnt, src+"/") {
@@ -328,6 +552,8 @@ func (cp *copier) walkHostFS(dest, src string, maxSymlinks int, includeMounts bo
// (...except mount types that are
// handled as regular files.)
continue
+ } else if isMount && !cp.subtreeCouldMatch(src[len(cp.ctrOutputDir)+1:]) {
+ continue
}
err = cp.walkHostFS(dest, src, maxSymlinks, false)
if err != nil {
@@ -366,20 +592,18 @@ func (cp *copier) copyRegularFiles(m arvados.Mount) bool {
return m.Kind == "text" || m.Kind == "json" || (m.Kind == "collection" && m.Writable)
}
-func (cp *copier) getManifest(pdh string) (*manifest.Manifest, error) {
+func (cp *copier) getManifest(pdh string) (string, error) {
if mft, ok := cp.manifestCache[pdh]; ok {
return mft, nil
}
var coll arvados.Collection
err := cp.client.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+pdh, nil, nil)
if err != nil {
- return nil, fmt.Errorf("error retrieving collection record for %q: %s", pdh, err)
+ return "", fmt.Errorf("error retrieving collection record for %q: %s", pdh, err)
}
- mft := &manifest.Manifest{Text: coll.ManifestText}
if cp.manifestCache == nil {
- cp.manifestCache = map[string]*manifest.Manifest{pdh: mft}
- } else {
- cp.manifestCache[pdh] = mft
+ cp.manifestCache = make(map[string]string)
}
- return mft, nil
+ cp.manifestCache[pdh] = coll.ManifestText
+ return coll.ManifestText, nil
}
diff --git a/lib/crunchrun/copier_test.go b/lib/crunchrun/copier_test.go
index c8936d1a9f..db64116bc2 100644
--- a/lib/crunchrun/copier_test.go
+++ b/lib/crunchrun/copier_test.go
@@ -6,13 +6,20 @@ package crunchrun
import (
"bytes"
+ "encoding/json"
+ "fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
+ "path"
+ "runtime"
+ "sort"
"syscall"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
@@ -27,8 +34,17 @@ type copierSuite struct {
func (s *copierSuite) SetUpTest(c *check.C) {
tmpdir := c.MkDir()
s.log = bytes.Buffer{}
+
+ cl, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, check.IsNil)
+ kc, err := keepclient.MakeKeepClient(cl)
+ c.Assert(err, check.IsNil)
+ collfs, err := (&arvados.Collection{}).FileSystem(arvados.NewClientFromEnv(), kc)
+ c.Assert(err, check.IsNil)
+
s.cp = copier{
client: arvados.NewClientFromEnv(),
+ keepClient: kc,
hostOutputDir: tmpdir,
ctrOutputDir: "/ctr/outdir",
mounts: map[string]arvados.Mount{
@@ -38,6 +54,7 @@ func (s *copierSuite) SetUpTest(c *check.C) {
"/secret_text": {Kind: "text", Content: "xyzzy"},
},
logger: &logrus.Logger{Out: &s.log, Formatter: &logrus.TextFormatter{}, Level: logrus.InfoLevel},
+ staged: collfs,
}
}
@@ -48,6 +65,104 @@ func (s *copierSuite) TestEmptyOutput(c *check.C) {
c.Check(len(s.cp.files), check.Equals, 0)
}
+func (s *copierSuite) TestEmptyWritableMount(c *check.C) {
+ s.writeFileInOutputDir(c, ".arvados#collection", `{"manifest_text":""}`)
+ s.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{
+ Kind: "collection",
+ Writable: true,
+ }
+
+ err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+ c.Assert(err, check.IsNil)
+ c.Check(s.cp.dirs, check.DeepEquals, []string(nil))
+ c.Check(len(s.cp.files), check.Equals, 0)
+ rootdir, err := s.cp.staged.Open(".")
+ c.Assert(err, check.IsNil)
+ defer rootdir.Close()
+ fis, err := rootdir.Readdir(-1)
+ c.Assert(err, check.IsNil)
+ c.Check(fis, check.HasLen, 0)
+}
+
+func (s *copierSuite) TestOutputCollectionWithOnlySubmounts(c *check.C) {
+ s.writeFileInOutputDir(c, ".arvados#collection", `{"manifest_text":""}`)
+ s.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{
+ Kind: "collection",
+ Writable: true,
+ }
+ s.cp.mounts[path.Join(s.cp.ctrOutputDir, "foo")] = arvados.Mount{
+ Kind: "collection",
+ Path: "foo",
+ PortableDataHash: arvadostest.FooCollectionPDH,
+ }
+
+ err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+ c.Assert(err, check.IsNil)
+
+ // s.cp.dirs and s.cp.files are empty, because nothing needs
+ // to be copied from disk.
+ c.Check(s.cp.dirs, check.DeepEquals, []string(nil))
+ c.Check(len(s.cp.files), check.Equals, 0)
+
+ // The "foo" file has already been copied from FooCollection
+ // to s.cp.staged via Snapshot+Splice.
+ rootdir, err := s.cp.staged.Open(".")
+ c.Assert(err, check.IsNil)
+ defer rootdir.Close()
+ fis, err := rootdir.Readdir(-1)
+ c.Assert(err, check.IsNil)
+ c.Assert(fis, check.HasLen, 1)
+ c.Check(fis[0].Size(), check.Equals, int64(3))
+}
+
+func (s *copierSuite) TestRepetitiveMountsInOutputDir(c *check.C) {
+ var memstats0 runtime.MemStats
+ runtime.ReadMemStats(&memstats0)
+
+ s.writeFileInOutputDir(c, ".arvados#collection", `{"manifest_text":""}`)
+ s.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{
+ Kind: "collection",
+ Writable: true,
+ }
+ nmounts := 200
+ ncollections := 1
+ pdh := make([]string, ncollections)
+ s.cp.manifestCache = make(map[string]string)
+ for i := 0; i < ncollections; i++ {
+ mtxt := arvadostest.FakeManifest(1, nmounts, 2, 4<<20)
+ pdh[i] = arvados.PortableDataHash(mtxt)
+ s.cp.manifestCache[pdh[i]] = mtxt
+ }
+ for i := 0; i < nmounts; i++ {
+ filename := fmt.Sprintf("file%d", i)
+ s.cp.mounts[path.Join(s.cp.ctrOutputDir, filename)] = arvados.Mount{
+ Kind: "collection",
+ Path: fmt.Sprintf("dir0/dir%d/file%d", i, i),
+ PortableDataHash: pdh[i%ncollections],
+ }
+ }
+ err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+ c.Assert(err, check.IsNil)
+
+ // Files mounted under output dir have been copied from the
+ // fake collections to s.cp.staged via Snapshot+Splice.
+ rootdir, err := s.cp.staged.Open(".")
+ c.Assert(err, check.IsNil)
+ defer rootdir.Close()
+ fis, err := rootdir.Readdir(-1)
+ c.Assert(err, check.IsNil)
+ c.Assert(fis, check.HasLen, nmounts)
+
+ // nmounts -- Îalloc before -> Îalloc after fixing #22827
+ // 500 -- 1542 MB -> 15 MB
+ // 200 -- 254 MB -> 5 MB
+ var memstats runtime.MemStats
+ runtime.ReadMemStats(&memstats)
+ delta := (int64(memstats.Alloc) - int64(memstats0.Alloc)) / 1000000
+ c.Logf("Îalloc %d MB", delta)
+ c.Check(delta < 40, check.Equals, true, check.Commentf("Îalloc %d MB is suspiciously high, expect ~ 5 MB", delta))
+}
+
func (s *copierSuite) TestRegularFilesAndDirs(c *check.C) {
err := os.MkdirAll(s.cp.hostOutputDir+"/dir1/dir2/dir3", 0755)
c.Assert(err, check.IsNil)
@@ -115,9 +230,7 @@ func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
}
// simulate mounted writable collection
- bindtmp, err := ioutil.TempDir("", "crunch-run.test.")
- c.Assert(err, check.IsNil)
- defer os.RemoveAll(bindtmp)
+ bindtmp := c.MkDir()
f, err := os.OpenFile(bindtmp+"/.arvados#collection", os.O_CREATE|os.O_WRONLY, 0644)
c.Assert(err, check.IsNil)
_, err = io.WriteString(f, `{"manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"}`)
@@ -138,7 +251,16 @@ func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
c.Check(err, check.IsNil)
- c.Check(s.cp.manifest, check.Matches, `(?ms)\./l_dir acbd\S+ 0:3:foo\n\. acbd\S+ 0:3:l_file\n\. 37b5\S+ 0:3:l_file_w\n`)
+ s.checkStagedFile(c, "l_dir/foo", 3)
+ s.checkStagedFile(c, "l_file", 3)
+ s.checkStagedFile(c, "l_file_w", 3)
+}
+
+func (s *copierSuite) checkStagedFile(c *check.C, path string, size int64) {
+ fi, err := s.cp.staged.Stat(path)
+ if c.Check(err, check.IsNil) {
+ c.Check(fi.Size(), check.Equals, size)
+ }
}
func (s *copierSuite) TestSymlink(c *check.C) {
@@ -215,6 +337,196 @@ func (s *copierSuite) TestWritableMountBelow(c *check.C) {
})
}
+// Check some glob-matching edge cases. In particular, check that
+// patterns like "foo/**" do not match regular files named "foo"
+// (unless of course they are inside a directory named "foo").
+func (s *copierSuite) TestMatchGlobs(c *check.C) {
+ s.cp.globs = []string{"foo*/**"}
+ c.Check(s.cp.matchGlobs("foo", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("food", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("foo", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("food", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("foo/bar", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("food/bar", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("foo/bar", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("food/bar", true), check.Equals, true)
+
+ s.cp.globs = []string{"ba[!/]/foo*/**"}
+ c.Check(s.cp.matchGlobs("bar/foo", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("bar/food", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("bar/foo", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("bar/food", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("bar/foo/z\\[", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("bar/food/z\\[", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("bar/foo/z\\[", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("bar/food/z\\[", false), check.Equals, true)
+
+ s.cp.globs = []string{"waz/**/foo*/**"}
+ c.Check(s.cp.matchGlobs("waz/quux/foo", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("waz/quux/food", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("waz/quux/foo", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("waz/quux/food", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("waz/quux/foo/foo", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("waz/quux/food/foo", true), check.Equals, true)
+ c.Check(s.cp.matchGlobs("waz/quux/foo/foo", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("waz/quux/food/foo", false), check.Equals, true)
+
+ s.cp.globs = []string{"foo/**/*"}
+ c.Check(s.cp.matchGlobs("foo", false), check.Equals, false)
+ c.Check(s.cp.matchGlobs("foo/bar", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("foo/bar/baz", false), check.Equals, true)
+ c.Check(s.cp.matchGlobs("foo/bar/baz/waz", false), check.Equals, true)
+}
+
+func (s *copierSuite) TestSubtreeCouldMatch(c *check.C) {
+ for _, trial := range []struct {
+ mount string // relative to output dir
+ glob string
+ could bool
+ }{
+ {mount: "abc", glob: "*"},
+ {mount: "abc", glob: "abc/*", could: true},
+ {mount: "abc", glob: "a*/**", could: true},
+ {mount: "abc", glob: "**", could: true},
+ {mount: "abc", glob: "*/*", could: true},
+ {mount: "abc", glob: "**/*.txt", could: true},
+ {mount: "abc/def", glob: "*"},
+ {mount: "abc/def", glob: "*/*"},
+ {mount: "abc/def", glob: "*/*.txt"},
+ {mount: "abc/def", glob: "*/*/*", could: true},
+ {mount: "abc/def", glob: "**", could: true},
+ {mount: "abc/def", glob: "**/bar", could: true},
+ {mount: "abc/def", glob: "abc/**", could: true},
+ {mount: "abc/def/ghi", glob: "*c/**/bar", could: true},
+ {mount: "abc/def/ghi", glob: "*c/*f/bar"},
+ {mount: "abc/def/ghi", glob: "abc/d[^/]f/ghi/*", could: true},
+ } {
+ c.Logf("=== %+v", trial)
+ got := (&copier{
+ globs: []string{trial.glob},
+ }).subtreeCouldMatch(trial.mount)
+ c.Check(got, check.Equals, trial.could)
+ }
+}
+
+func (s *copierSuite) TestCopyFromLargeCollection_Readonly(c *check.C) {
+ s.testCopyFromLargeCollection(c, false)
+}
+
+func (s *copierSuite) TestCopyFromLargeCollection_Writable(c *check.C) {
+ s.testCopyFromLargeCollection(c, true)
+}
+
+func (s *copierSuite) testCopyFromLargeCollection(c *check.C, writable bool) {
+ bindtmp := c.MkDir()
+ mtxt := arvadostest.FakeManifest(100, 100, 2, 4<<20)
+ pdh := arvados.PortableDataHash(mtxt)
+ json, err := json.Marshal(arvados.Collection{ManifestText: mtxt, PortableDataHash: pdh})
+ c.Assert(err, check.IsNil)
+ err = os.WriteFile(bindtmp+"/.arvados#collection", json, 0644)
+ // This symlink tricks walkHostFS into calling walkMount on
+ // the fakecollection dir. If we did the obvious thing instead
+ // (i.e., mount a collection under the output dir) walkMount
+ // would see that our fakecollection dir is actually a regular
+ // directory, conclude that the mount has been deleted and
+ // replaced by a regular directory tree, and process the tree
+ // as regular files, bypassing the manifest-copying code path
+ // we're trying to test.
+ err = os.Symlink("/fakecollection", s.cp.hostOutputDir+"/fakecollection")
+ c.Assert(err, check.IsNil)
+ s.cp.mounts["/fakecollection"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: pdh,
+ Writable: writable,
+ }
+ s.cp.bindmounts = map[string]bindmount{
+ "/fakecollection": bindmount{HostPath: bindtmp, ReadOnly: !writable},
+ }
+ s.cp.manifestCache = map[string]string{pdh: mtxt}
+ err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+ c.Check(err, check.IsNil)
+ c.Log(s.log.String())
+
+ // Check some files to ensure they were copied properly.
+ // Specifically, arbitrarily check every 17th file in every
+ // 13th dir. (This is better than checking all of the files
+ // only in that it's less likely to show up as a distracting
+ // signal in CPU profiling.)
+ for i := 0; i < 100; i += 13 {
+ for j := 0; j < 100; j += 17 {
+ fnm := fmt.Sprintf("/fakecollection/dir%d/dir%d/file%d", i, j, j)
+ _, err := s.cp.staged.Stat(fnm)
+ c.Assert(err, check.IsNil, check.Commentf("%s", fnm))
+ }
+ }
+}
+
+func (s *copierSuite) TestMountBelowExcludedByGlob(c *check.C) {
+ bindtmp := c.MkDir()
+ s.cp.mounts["/ctr/outdir/include/includer"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: arvadostest.FooCollectionPDH,
+ }
+ s.cp.mounts["/ctr/outdir/include/includew"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: arvadostest.FooCollectionPDH,
+ Writable: true,
+ }
+ s.cp.mounts["/ctr/outdir/exclude/excluder"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: arvadostest.FooCollectionPDH,
+ }
+ s.cp.mounts["/ctr/outdir/exclude/excludew"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: arvadostest.FooCollectionPDH,
+ Writable: true,
+ }
+ s.cp.mounts["/ctr/outdir/nonexistent/collection"] = arvados.Mount{
+ // As extra assurance, plant a collection that will
+ // fail if copier attempts to load its manifest. (For
+ // performance reasons it's important that copier
+ // doesn't try to load the manifest before deciding
+ // not to copy the contents.)
+ Kind: "collection",
+ PortableDataHash: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+ }
+ s.cp.globs = []string{
+ "?ncl*/*r/*",
+ "*/?ncl*/**",
+ }
+ c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/include/includer", 0755), check.IsNil)
+ c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/include/includew", 0755), check.IsNil)
+ c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/exclude/excluder", 0755), check.IsNil)
+ c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/exclude/excludew", 0755), check.IsNil)
+ s.writeFileInOutputDir(c, "include/includew/foo", "foo")
+ s.writeFileInOutputDir(c, "exclude/excludew/foo", "foo")
+ s.cp.bindmounts = map[string]bindmount{
+ "/ctr/outdir/include/includew": bindmount{HostPath: bindtmp, ReadOnly: false},
+ }
+ s.cp.bindmounts = map[string]bindmount{
+ "/ctr/outdir/include/excludew": bindmount{HostPath: bindtmp, ReadOnly: false},
+ }
+
+ err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+ c.Check(err, check.IsNil)
+ c.Log(s.log.String())
+
+ // Note it's OK that "/exclude" is not excluded by walkMount:
+ // it is just a local filesystem directory, not a mount point
+ // that's expensive to walk. In real-life usage, it will be
+ // removed from cp.dirs before any copying happens.
+ c.Check(s.cp.dirs, check.DeepEquals, []string{"/exclude", "/include", "/include/includew"})
+ c.Check(s.cp.files, check.DeepEquals, []filetodo{
+ {src: s.cp.hostOutputDir + "/include/includew/foo", dst: "/include/includew/foo", size: 3},
+ })
+ manifest, err := s.cp.staged.MarshalManifest(".")
+ c.Assert(err, check.IsNil)
+ c.Check(manifest, check.Matches, `(?ms).*\./include/includer .*`)
+ c.Check(manifest, check.Not(check.Matches), `(?ms).*exclude.*`)
+ c.Check(s.log.String(), check.Matches, `(?ms).*not copying \\"exclude/excluder\\".*`)
+ c.Check(s.log.String(), check.Matches, `(?ms).*not copying \\"nonexistent/collection\\".*`)
+}
+
func (s *copierSuite) writeFileInOutputDir(c *check.C, path, data string) {
f, err := os.OpenFile(s.cp.hostOutputDir+"/"+path, os.O_CREATE|os.O_WRONLY, 0644)
c.Assert(err, check.IsNil)
@@ -222,3 +534,184 @@ func (s *copierSuite) writeFileInOutputDir(c *check.C, path, data string) {
c.Assert(err, check.IsNil)
c.Assert(f.Close(), check.IsNil)
}
+
+// applyGlobsToFilesAndDirs uses the same glob-matching code as
+// applyGlobsToStaged, so we don't need to test all of the same
+// glob-matching behavior covered in TestApplyGlobsToCollectionFS. We
+// do need to check that (a) the glob is actually being used to filter
+// out files, and (b) non-matching dirs still included if and only if
+// they are ancestors of matching files.
+func (s *copierSuite) TestApplyGlobsToFilesAndDirs(c *check.C) {
+ dirs := []string{"dir1", "dir1/dir11", "dir1/dir12", "dir2"}
+ files := []string{"dir1/file11", "dir1/dir11/file111", "dir2/file2"}
+ for _, trial := range []struct {
+ globs []string
+ dirs []string
+ files []string
+ }{
+ {
+ globs: []string{},
+ dirs: append([]string{}, dirs...),
+ files: append([]string{}, files...),
+ },
+ {
+ globs: []string{"**"},
+ dirs: append([]string{}, dirs...),
+ files: append([]string{}, files...),
+ },
+ {
+ globs: []string{"**/file111"},
+ dirs: []string{"dir1", "dir1/dir11"},
+ files: []string{"dir1/dir11/file111"},
+ },
+ {
+ globs: []string{"nothing"},
+ dirs: nil,
+ files: nil,
+ },
+ {
+ globs: []string{"**/dir12"},
+ dirs: []string{"dir1", "dir1/dir12"},
+ files: nil,
+ },
+ {
+ globs: []string{"**/file*"},
+ dirs: []string{"dir1", "dir1/dir11", "dir2"},
+ files: append([]string{}, files...),
+ },
+ {
+ globs: []string{"**/dir1[12]"},
+ dirs: []string{"dir1", "dir1/dir11", "dir1/dir12"},
+ files: nil,
+ },
+ {
+ globs: []string{"**/dir1[^2]"},
+ dirs: []string{"dir1", "dir1/dir11"},
+ files: nil,
+ },
+ {
+ globs: []string{"dir1/**"},
+ dirs: []string{"dir1", "dir1/dir11", "dir1/dir12"},
+ files: []string{"dir1/file11", "dir1/dir11/file111"},
+ },
+ } {
+ c.Logf("=== globs: %q", trial.globs)
+ cp := copier{
+ globs: trial.globs,
+ dirs: dirs,
+ }
+ for _, path := range files {
+ cp.files = append(cp.files, filetodo{dst: path})
+ }
+ cp.applyGlobsToFilesAndDirs()
+ var gotFiles []string
+ for _, file := range cp.files {
+ gotFiles = append(gotFiles, file.dst)
+ }
+ c.Check(cp.dirs, check.DeepEquals, trial.dirs)
+ c.Check(gotFiles, check.DeepEquals, trial.files)
+ }
+}
+
+func (s *copierSuite) TestApplyGlobsToCollectionFS(c *check.C) {
+ for _, trial := range []struct {
+ globs []string
+ expect []string
+ }{
+ {
+ globs: nil,
+ expect: []string{"foo", "bar", "baz/quux", "baz/parent1/item1"},
+ },
+ {
+ globs: []string{"foo"},
+ expect: []string{"foo"},
+ },
+ {
+ globs: []string{"baz/parent1/item1"},
+ expect: []string{"baz/parent1/item1"},
+ },
+ {
+ globs: []string{"**"},
+ expect: []string{"foo", "bar", "baz/quux", "baz/parent1/item1"},
+ },
+ {
+ globs: []string{"**/*"},
+ expect: []string{"foo", "bar", "baz/quux", "baz/parent1/item1"},
+ },
+ {
+ globs: []string{"*"},
+ expect: []string{"foo", "bar"},
+ },
+ {
+ globs: []string{"baz"},
+ expect: nil,
+ },
+ {
+ globs: []string{"b*/**"},
+ expect: []string{"baz/quux", "baz/parent1/item1"},
+ },
+ {
+ globs: []string{"baz"},
+ expect: nil,
+ },
+ {
+ globs: []string{"baz/**"},
+ expect: []string{"baz/quux", "baz/parent1/item1"},
+ },
+ {
+ globs: []string{"baz/*"},
+ expect: []string{"baz/quux"},
+ },
+ {
+ globs: []string{"baz/**/*uu?"},
+ expect: []string{"baz/quux"},
+ },
+ {
+ globs: []string{"**/*m1"},
+ expect: []string{"baz/parent1/item1"},
+ },
+ {
+ globs: []string{"*/*/*/**/*1"},
+ expect: nil,
+ },
+ {
+ globs: []string{"f*", "**/q*"},
+ expect: []string{"foo", "baz/quux"},
+ },
+ {
+ globs: []string{"\\"}, // invalid pattern matches nothing
+ expect: nil,
+ },
+ {
+ globs: []string{"\\", "foo"},
+ expect: []string{"foo"},
+ },
+ {
+ globs: []string{"foo/**"},
+ expect: nil,
+ },
+ {
+ globs: []string{"foo*/**"},
+ expect: nil,
+ },
+ } {
+ c.Logf("=== globs: %q", trial.globs)
+ collfs, err := (&arvados.Collection{ManifestText: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:bar 0:0:baz/quux 0:0:baz/parent1/item1\n"}).FileSystem(nil, nil)
+ c.Assert(err, check.IsNil)
+ cp := copier{globs: trial.globs, staged: collfs}
+ err = cp.applyGlobsToStaged()
+ if !c.Check(err, check.IsNil) {
+ continue
+ }
+ var got []string
+ fs.WalkDir(arvados.FS(collfs), "", func(path string, ent fs.DirEntry, err error) error {
+ if !ent.IsDir() {
+ got = append(got, path)
+ }
+ return nil
+ })
+ sort.Strings(got)
+ sort.Strings(trial.expect)
+ c.Check(got, check.DeepEquals, trial.expect)
+ }
+}
diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go
index 556a3bfe13..a0d5f1aa69 100644
--- a/lib/crunchrun/crunchrun.go
+++ b/lib/crunchrun/crunchrun.go
@@ -40,7 +40,6 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
- "git.arvados.org/arvados.git/sdk/go/manifest"
"golang.org/x/sys/unix"
)
@@ -74,16 +73,13 @@ var ErrCancelled = errors.New("Cancelled")
// IKeepClient is the minimal Keep API methods used by crunch-run.
type IKeepClient interface {
+ BlockRead(context.Context, arvados.BlockReadOptions) (int, error)
BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
ReadAt(locator string, p []byte, off int) (int, error)
- ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
LocalLocator(locator string) (string, error)
SetStorageClasses(sc []string)
}
-// NewLogWriter is a factory function to create a new log writer.
-type NewLogWriter func(name string) (io.WriteCloser, error)
-
type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
type MkTempDir func(string, string) (string, error)
@@ -125,12 +121,12 @@ type ContainerRunner struct {
Container arvados.Container
token string
ExitCode *int
- NewLogWriter NewLogWriter
- CrunchLog *ThrottledLogger
+ CrunchLog *logWriter
logUUID string
+ logPDH string
logMtx sync.Mutex
LogCollection arvados.CollectionFileSystem
- LogsPDH *string
+ logPDHFinal *string
RunArvMount RunArvMount
MkTempDir MkTempDir
ArvMount *exec.Cmd
@@ -168,7 +164,7 @@ type ContainerRunner struct {
enableNetwork string // one of "default" or "always"
networkMode string // "none", "host", or "" -- passed through to executor
brokenNodeHook string // script to run if node appears to be broken
- arvMountLog *ThrottledLogger
+ arvMountLog io.WriteCloser
containerWatchdogInterval time.Duration
@@ -213,6 +209,7 @@ var errorBlacklist = []string{
"(?ms).*[Cc]annot connect to the Docker daemon.*",
"(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
"(?ms).*grpc: the connection is unavailable.*",
+ "(?ms).*no space left on device.*",
}
func (runner *ContainerRunner) runBrokenNodeHook() {
@@ -303,11 +300,10 @@ func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *e
}
c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
- w, err := runner.NewLogWriter("arv-mount")
+ runner.arvMountLog, err = runner.openLogFile("arv-mount")
if err != nil {
return nil, err
}
- runner.arvMountLog = NewThrottledLogger(w)
scanner := logScanner{
Patterns: []string{
"Keep write error",
@@ -321,8 +317,8 @@ func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *e
})
},
}
- c.Stdout = runner.arvMountLog
- c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
+ c.Stdout = newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr))
+ c.Stderr = io.MultiWriter(&scanner, newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr)))
runner.CrunchLog.Printf("Running %v", c.Args)
@@ -626,17 +622,6 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
// OutputPath is a staging directory.
bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
}
-
- case mnt.Kind == "git_tree":
- tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
- if err != nil {
- return nil, fmt.Errorf("creating temp dir: %v", err)
- }
- err = gitMount(mnt).extractTree(runner.containerClient, tmpdir, token)
- if err != nil {
- return nil, err
- }
- bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
}
}
@@ -746,13 +731,13 @@ func (runner *ContainerRunner) stopHoststat() error {
}
func (runner *ContainerRunner) startHoststat() error {
- w, err := runner.NewLogWriter("hoststat")
+ var err error
+ runner.hoststatLogger, err = runner.openLogFile("hoststat")
if err != nil {
return err
}
- runner.hoststatLogger = NewThrottledLogger(w)
runner.hoststatReporter = &crunchstat.Reporter{
- Logger: log.New(runner.hoststatLogger, "", 0),
+ Logger: newLogWriter(newTimestamper(runner.hoststatLogger)),
// Our own cgroup is the "host" cgroup, in the sense
// that it accounts for resource usage outside the
// container. It doesn't count _all_ resource usage on
@@ -770,15 +755,15 @@ func (runner *ContainerRunner) startHoststat() error {
}
func (runner *ContainerRunner) startCrunchstat() error {
- w, err := runner.NewLogWriter("crunchstat")
+ var err error
+ runner.statLogger, err = runner.openLogFile("crunchstat")
if err != nil {
return err
}
- runner.statLogger = NewThrottledLogger(w)
runner.statReporter = &crunchstat.Reporter{
Pid: runner.executor.Pid,
FS: runner.crunchstatFakeFS,
- Logger: log.New(runner.statLogger, "", 0),
+ Logger: newLogWriter(newTimestamper(runner.statLogger)),
MemThresholds: map[string][]crunchstat.Threshold{
"rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
},
@@ -801,7 +786,7 @@ type infoCommand struct {
// might differ from what's described in the node record (see
// LogNodeRecord).
func (runner *ContainerRunner) LogHostInfo() (err error) {
- w, err := runner.NewLogWriter("node-info")
+ w, err := runner.openLogFile("node-info")
if err != nil {
return
}
@@ -852,62 +837,40 @@ func (runner *ContainerRunner) LogHostInfo() (err error) {
// LogContainerRecord gets and saves the raw JSON container record from the API server
func (runner *ContainerRunner) LogContainerRecord() error {
- logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+ logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}})
if !logged && err == nil {
err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
}
return err
}
-// LogNodeRecord logs the current host's InstanceType config entry (or
-// the arvados#node record, if running via crunch-dispatch-slurm).
+// LogNodeRecord logs the current host's InstanceType config entry, if
+// running via arvados-dispatch-cloud.
func (runner *ContainerRunner) LogNodeRecord() error {
- if it := os.Getenv("InstanceType"); it != "" {
- // Dispatched via arvados-dispatch-cloud. Save
- // InstanceType config fragment received from
- // dispatcher on stdin.
- w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
- if err != nil {
- return err
- }
- defer w.Close()
- _, err = io.WriteString(w, it)
- if err != nil {
- return err
- }
- return w.Close()
+ it := os.Getenv("InstanceType")
+ if it == "" {
+ // Not dispatched by arvados-dispatch-cloud.
+ return nil
}
- // Dispatched via crunch-dispatch-slurm. Look up
- // apiserver's node record corresponding to
- // $SLURMD_NODENAME.
- hostname := os.Getenv("SLURMD_NODENAME")
- if hostname == "" {
- hostname, _ = os.Hostname()
+ // Save InstanceType config fragment received from dispatcher
+ // on stdin.
+ w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return err
}
- _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
- // The "info" field has admin-only info when
- // obtained with a privileged token, and
- // should not be logged.
- node, ok := resp.(map[string]interface{})
- if ok {
- delete(node, "info")
- }
- })
- return err
+ defer w.Close()
+ _, err = io.WriteString(w, it)
+ if err != nil {
+ return err
+ }
+ return w.Close()
}
-func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}) (logged bool, err error) {
writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return false, err
}
- w := &ArvLogWriter{
- ArvClient: runner.DispatcherArvClient,
- UUID: runner.Container.UUID,
- loggingStream: label,
- writeCloser: writer,
- }
-
reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
if err != nil {
return false, fmt.Errorf("error getting %s record: %v", label, err)
@@ -926,16 +889,13 @@ func (runner *ContainerRunner) logAPIResponse(label, path string, params map[str
} else if len(items) < 1 {
return false, nil
}
- if munge != nil {
- munge(items[0])
- }
// Re-encode it using indentation to improve readability
- enc := json.NewEncoder(w)
+ enc := json.NewEncoder(writer)
enc.SetIndent("", " ")
if err = enc.Encode(items[0]); err != nil {
return false, fmt.Errorf("error logging %s record: %v", label, err)
}
- err = w.Close()
+ err = writer.Close()
if err != nil {
return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
}
@@ -969,7 +929,7 @@ func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
// CreateContainer creates the docker container.
func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
- var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+ var stdin io.Reader
if mnt, ok := runner.Container.Mounts["stdin"]; ok {
switch mnt.Kind {
case "collection":
@@ -985,28 +945,35 @@ func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[st
return err
}
stdin = f
+ runner.executorStdin = f
case "json":
j, err := json.Marshal(mnt.Content)
if err != nil {
return fmt.Errorf("error encoding stdin json data: %v", err)
}
- stdin = ioutil.NopCloser(bytes.NewReader(j))
+ stdin = bytes.NewReader(j)
+ runner.executorStdin = io.NopCloser(nil)
default:
return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
}
+ } else {
+ stdin = bytes.NewReader(nil)
+ runner.executorStdin = ioutil.NopCloser(nil)
}
- var stdout, stderr io.WriteCloser
+ var stdout, stderr io.Writer
if mnt, ok := runner.Container.Mounts["stdout"]; ok {
f, err := runner.getStdoutFile(mnt.Path)
if err != nil {
return err
}
stdout = f
- } else if w, err := runner.NewLogWriter("stdout"); err != nil {
+ runner.executorStdout = f
+ } else if w, err := runner.openLogFile("stdout"); err != nil {
return err
} else {
- stdout = NewThrottledLogger(w)
+ stdout = newTimestamper(w)
+ runner.executorStdout = w
}
if mnt, ok := runner.Container.Mounts["stderr"]; ok {
@@ -1015,10 +982,12 @@ func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[st
return err
}
stderr = f
- } else if w, err := runner.NewLogWriter("stderr"); err != nil {
+ runner.executorStderr = f
+ } else if w, err := runner.openLogFile("stderr"); err != nil {
return err
} else {
- stderr = NewThrottledLogger(w)
+ stderr = newTimestamper(w)
+ runner.executorStderr = w
}
env := runner.Container.Environment
@@ -1047,29 +1016,27 @@ func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[st
if !runner.enableMemoryLimit {
ram = 0
}
- runner.executorStdin = stdin
- runner.executorStdout = stdout
- runner.executorStderr = stderr
- if runner.Container.RuntimeConstraints.CUDA.DeviceCount > 0 {
+ if runner.Container.RuntimeConstraints.GPU.Stack == "cuda" {
nvidiaModprobe(runner.CrunchLog)
}
return runner.executor.Create(containerSpec{
- Image: imageID,
- VCPUs: runner.Container.RuntimeConstraints.VCPUs,
- RAM: ram,
- WorkingDir: workdir,
- Env: env,
- BindMounts: bindmounts,
- Command: runner.Container.Command,
- EnableNetwork: enableNetwork,
- CUDADeviceCount: runner.Container.RuntimeConstraints.CUDA.DeviceCount,
- NetworkMode: runner.networkMode,
- CgroupParent: runner.setCgroupParent,
- Stdin: stdin,
- Stdout: stdout,
- Stderr: stderr,
+ Image: imageID,
+ VCPUs: runner.Container.RuntimeConstraints.VCPUs,
+ RAM: ram,
+ WorkingDir: workdir,
+ Env: env,
+ BindMounts: bindmounts,
+ Command: runner.Container.Command,
+ EnableNetwork: enableNetwork,
+ GPUStack: runner.Container.RuntimeConstraints.GPU.Stack,
+ GPUDeviceCount: runner.Container.RuntimeConstraints.GPU.DeviceCount,
+ NetworkMode: runner.networkMode,
+ CgroupParent: runner.setCgroupParent,
+ Stdin: stdin,
+ Stdout: stdout,
+ Stderr: stderr,
})
}
@@ -1193,7 +1160,7 @@ func (runner *ContainerRunner) updateLogs() {
saveAtTime = time.Now()
}
runner.logMtx.Lock()
- done := runner.LogsPDH != nil
+ done := runner.logPDHFinal != nil
runner.logMtx.Unlock()
if done {
return
@@ -1204,23 +1171,11 @@ func (runner *ContainerRunner) updateLogs() {
}
saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
- saved, err := runner.saveLogCollection(false)
+ err := runner.saveLogCollection(false)
if err != nil {
runner.CrunchLog.Printf("error updating log collection: %s", err)
continue
}
-
- err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
- "select": []string{"uuid"},
- "container": arvadosclient.Dict{
- "log": saved.PortableDataHash,
- },
- }, nil)
- if err != nil {
- runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
- continue
- }
-
savedSize = size
}
}
@@ -1235,7 +1190,7 @@ func (runner *ContainerRunner) checkSpotInterruptionNotices() {
Action string `json:"action"`
Time time.Time `json:"time"`
}
- runner.CrunchLog.Printf("Checking for spot interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
+ runner.CrunchLog.Printf("Checking for spot instance interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
var metadata ec2metadata
var token string
var tokenExp time.Time
@@ -1273,7 +1228,6 @@ func (runner *ContainerRunner) checkSpotInterruptionNotices() {
return err
}
defer resp.Body.Close()
- metadata = ec2metadata{}
switch resp.StatusCode {
case http.StatusOK:
break
@@ -1284,6 +1238,7 @@ func (runner *ContainerRunner) checkSpotInterruptionNotices() {
// instance-action is not present in the
// instance metadata and you receive an HTTP
// 404 error when you try to retrieve it."
+ metadata = ec2metadata{}
return nil
case http.StatusUnauthorized:
token = ""
@@ -1291,10 +1246,12 @@ func (runner *ContainerRunner) checkSpotInterruptionNotices() {
default:
return fmt.Errorf("%s", resp.Status)
}
- err = json.NewDecoder(resp.Body).Decode(&metadata)
+ nextmetadata := ec2metadata{}
+ err = json.NewDecoder(resp.Body).Decode(&nextmetadata)
if err != nil {
return err
}
+ metadata = nextmetadata
return nil
}
failures := 0
@@ -1302,16 +1259,17 @@ func (runner *ContainerRunner) checkSpotInterruptionNotices() {
for range time.NewTicker(spotInterruptionCheckInterval).C {
err := check()
if err != nil {
- runner.CrunchLog.Printf("Error checking spot interruptions: %s", err)
- failures++
- if failures > 5 {
- runner.CrunchLog.Printf("Giving up on checking spot interruptions after too many consecutive failures")
+ message := fmt.Sprintf("Spot instance interruption check was inconclusive: %s", err)
+ if failures++; failures > 5 {
+ runner.CrunchLog.Printf("%s -- now giving up after too many consecutive errors", message)
return
+ } else {
+ runner.CrunchLog.Printf("%s -- will retry in %v", message, spotInterruptionCheckInterval)
+ continue
}
- continue
}
failures = 0
- if metadata != lastmetadata {
+ if metadata.Action != "" && metadata != lastmetadata {
lastmetadata = metadata
text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))
runner.CrunchLog.Printf("%s", text)
@@ -1365,6 +1323,7 @@ func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) er
keepClient: runner.ContainerKeepClient,
hostOutputDir: runner.HostOutputDir,
ctrOutputDir: runner.Container.OutputPath,
+ globs: runner.Container.OutputGlob,
bindmounts: bindmounts,
mounts: runner.Container.Mounts,
secretMounts: runner.SecretMounts,
@@ -1470,19 +1429,11 @@ func (runner *ContainerRunner) CommitLogs() error {
if runner.arvMountLog != nil {
runner.arvMountLog.Close()
}
- runner.CrunchLog.Close()
-
- // Closing CrunchLog above allows them to be committed to Keep at this
- // point, but re-open crunch log with ArvClient in case there are any
- // other further errors (such as failing to write the log to Keep!)
- // while shutting down
- runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
- ArvClient: runner.DispatcherArvClient,
- UUID: runner.Container.UUID,
- loggingStream: "crunch-run",
- writeCloser: nil,
- })
- runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
+
+ // From now on just log to stderr, in case there are
+ // any other further errors (such as failing to write
+ // the log to Keep!) while shutting down
+ runner.CrunchLog = newLogWriter(newTimestamper(newStringPrefixer(os.Stderr, runner.Container.UUID+" ")))
}()
if runner.keepstoreLogger != nil {
@@ -1495,8 +1446,8 @@ func (runner *ContainerRunner) CommitLogs() error {
runner.keepstoreLogger = nil
}
- if runner.LogsPDH != nil {
- // If we have already assigned something to LogsPDH,
+ if runner.logPDHFinal != nil {
+ // If we have already assigned something to logPDHFinal,
// we must be closing the re-opened log, which won't
// end up getting attached to the container record and
// therefore doesn't need to be saved as a collection
@@ -1504,30 +1455,28 @@ func (runner *ContainerRunner) CommitLogs() error {
return nil
}
- saved, err := runner.saveLogCollection(true)
- if err != nil {
- return fmt.Errorf("error saving log collection: %s", err)
- }
- runner.logMtx.Lock()
- defer runner.logMtx.Unlock()
- runner.LogsPDH = &saved.PortableDataHash
- return nil
+ return runner.saveLogCollection(true)
}
-// Create/update the log collection. Return value has UUID and
-// PortableDataHash fields populated, but others may be blank.
-func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
+// Flush buffered logs to Keep and create/update the log collection.
+//
+// Also update the container record with the updated log PDH -- except
+// this part is skipped if (a) the container hasn't entered Running
+// state yet, meaning we can't assign a log value, or (b) final==true,
+// meaning the caller will immediately update the container record to
+// Completed state and update the log PDH in the same API call.
+func (runner *ContainerRunner) saveLogCollection(final bool) error {
runner.logMtx.Lock()
defer runner.logMtx.Unlock()
- if runner.LogsPDH != nil {
+ if runner.logPDHFinal != nil {
// Already finalized.
- return
+ return nil
}
updates := arvadosclient.Dict{
"name": "logs for " + runner.Container.UUID,
}
- mt, err1 := runner.LogCollection.MarshalManifest(".")
- if err1 == nil {
+ mt, errFlush := runner.LogCollection.MarshalManifest(".")
+ if errFlush == nil {
// Only send updated manifest text if there was no
// error.
updates["manifest_text"] = mt
@@ -1554,43 +1503,65 @@ func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.C
"select": []string{"uuid", "portable_data_hash"},
"collection": updates,
}
- var err2 error
+ var saved arvados.Collection
+ var errUpdate error
if runner.logUUID == "" {
reqBody["ensure_unique_name"] = true
- err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
+ errUpdate = runner.DispatcherArvClient.Create("collections", reqBody, &saved)
} else {
- err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
+ errUpdate = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &saved)
}
- if err2 == nil {
- runner.logUUID = response.UUID
+ if errUpdate == nil {
+ runner.logUUID = saved.UUID
+ runner.logPDH = saved.PortableDataHash
}
- if err1 != nil || err2 != nil {
- err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
+ if errFlush != nil || errUpdate != nil {
+ return fmt.Errorf("error recording logs: %q, %q", errFlush, errUpdate)
}
- return
+ if final {
+ runner.logPDHFinal = &saved.PortableDataHash
+ }
+ if final || runner.finalState == "Queued" {
+ // If final, the caller (Run -> CommitLogs) will
+ // immediately update the log attribute to logPDHFinal
+ // while setting state to Complete, so it would be
+ // redundant to do it here.
+ //
+ // If runner.finalState=="Queued", the container state
+ // has not changed to "Running", so updating the log
+ // attribute is not allowed.
+ return nil
+ }
+ return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{
+ "log": saved.PortableDataHash,
+ },
+ }, nil)
}
// UpdateContainerRunning updates the container state to "Running"
-func (runner *ContainerRunner) UpdateContainerRunning(logId string) error {
+func (runner *ContainerRunner) UpdateContainerRunning() error {
+ runner.logMtx.Lock()
+ logPDH := runner.logPDH
+ runner.logMtx.Unlock()
+
runner.cStateLock.Lock()
defer runner.cStateLock.Unlock()
if runner.cCancelled {
return ErrCancelled
}
- updates := arvadosclient.Dict{
- "gateway_address": runner.gateway.Address,
- "state": "Running",
- }
- if logId != "" {
- updates["log"] = logId
- }
return runner.DispatcherArvClient.Update(
"containers",
runner.Container.UUID,
arvadosclient.Dict{
- "select": []string{"uuid"},
- "container": updates,
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{
+ "gateway_address": runner.gateway.Address,
+ "state": "Running",
+ "log": logPDH,
+ },
},
nil,
)
@@ -1617,8 +1588,8 @@ func (runner *ContainerRunner) ContainerToken() (string, error) {
func (runner *ContainerRunner) UpdateContainerFinal() error {
update := arvadosclient.Dict{}
update["state"] = runner.finalState
- if runner.LogsPDH != nil {
- update["log"] = *runner.LogsPDH
+ if runner.logPDHFinal != nil {
+ update["log"] = *runner.logPDHFinal
}
if runner.ExitCode != nil {
update["exit_code"] = *runner.ExitCode
@@ -1642,18 +1613,8 @@ func (runner *ContainerRunner) IsCancelled() bool {
return runner.cCancelled
}
-// NewArvLogWriter creates an ArvLogWriter
-func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
- writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
- if err != nil {
- return nil, err
- }
- return &ArvLogWriter{
- ArvClient: runner.DispatcherArvClient,
- UUID: runner.Container.UUID,
- loggingStream: name,
- writeCloser: writer,
- }, nil
+func (runner *ContainerRunner) openLogFile(name string) (io.WriteCloser, error) {
+ return runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
}
// Run the full container lifecycle.
@@ -1683,9 +1644,7 @@ func (runner *ContainerRunner) Run() (err error) {
defer func() {
runner.CleanupDirs()
-
runner.CrunchLog.Printf("crunch-run finished")
- runner.CrunchLog.Close()
}()
err = runner.fetchContainerRecord()
@@ -1732,6 +1691,11 @@ func (runner *ContainerRunner) Run() (err error) {
}
if bindmounts != nil {
+ if errSave := runner.saveLogCollection(false); errSave != nil {
+ // This doesn't merit failing the
+ // container, but should be logged.
+ runner.CrunchLog.Printf("error saving log collection: %v", errSave)
+ }
checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
}
checkErr("stopHoststat", runner.stopHoststat())
@@ -1765,7 +1729,7 @@ func (runner *ContainerRunner) Run() (err error) {
// condition, probably user error.
runner.finalState = "Cancelled"
}
- err = fmt.Errorf("While loading container image: %v", err)
+ err = fmt.Errorf("failed to load container image: %v", err)
return
}
@@ -1790,14 +1754,11 @@ func (runner *ContainerRunner) Run() (err error) {
return
}
- logCollection, err := runner.saveLogCollection(false)
- var logId string
- if err == nil {
- logId = logCollection.PortableDataHash
- } else {
- runner.CrunchLog.Printf("Error committing initial log collection: %v", err)
+ err = runner.saveLogCollection(false)
+ if err != nil {
+ return
}
- err = runner.UpdateContainerRunning(logId)
+ err = runner.UpdateContainerRunning()
if err != nil {
return
}
@@ -1879,7 +1840,6 @@ func NewContainerRunner(dispatcherClient *arvados.Client,
DispatcherArvClient: dispatcherArvClient,
DispatcherKeepClient: dispatcherKeepClient,
}
- cr.NewLogWriter = cr.NewArvLogWriter
cr.RunArvMount = cr.ArvMountCmd
cr.MkTempDir = ioutil.TempDir
cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
@@ -1887,11 +1847,13 @@ func NewContainerRunner(dispatcherClient *arvados.Client,
if err != nil {
return nil, nil, nil, err
}
+ cl.Retries = 10
cl.ApiToken = token
kc, err := keepclient.MakeKeepClient(cl)
if err != nil {
return nil, nil, nil, err
}
+ kc.Retries = 10
c2 := arvados.NewClientFromEnv()
c2.AuthToken = token
return cl, kc, c2, nil
@@ -1902,14 +1864,12 @@ func NewContainerRunner(dispatcherClient *arvados.Client,
return nil, err
}
cr.Container.UUID = containerUUID
- w, err := cr.NewLogWriter("crunch-run")
+ f, err := cr.openLogFile("crunch-run")
if err != nil {
return nil, err
}
- cr.CrunchLog = NewThrottledLogger(w)
- cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
+ cr.CrunchLog = newLogWriter(newTimestamper(io.MultiWriter(f, newStringPrefixer(os.Stderr, cr.Container.UUID+" "))))
- loadLogThrottleParams(dispatcherArvClient)
go cr.updateLogs()
return cr, nil
@@ -2032,7 +1992,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
log.Printf("%s: %v", containerUUID, err)
return 1
}
- kc.Retries = 4
+ kc.Retries = 10
cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
if err != nil {
@@ -2054,12 +2014,11 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
keepstoreLogbuf.SetWriter(io.Discard)
} else {
cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
- logwriter, err := cr.NewLogWriter("keepstore")
+ cr.keepstoreLogger, err = cr.openLogFile("keepstore")
if err != nil {
log.Print(err)
return 1
}
- cr.keepstoreLogger = NewThrottledLogger(logwriter)
var writer io.WriteCloser = cr.keepstoreLogger
if logWhat == "errors" {
@@ -2085,13 +2044,11 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
default:
cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
- cr.CrunchLog.Close()
return 1
}
if err != nil {
cr.CrunchLog.Printf("%s: %v", containerUUID, err)
cr.checkBrokenNode(err)
- cr.CrunchLog.Close()
return 1
}
defer cr.executor.Close()
diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go
index 276dd36661..ec58a9aa4e 100644
--- a/lib/crunchrun/crunchrun_test.go
+++ b/lib/crunchrun/crunchrun_test.go
@@ -12,8 +12,8 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"io/ioutil"
- "log"
"math/rand"
"net/http"
"net/http/httptest"
@@ -36,11 +36,8 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/manifest"
. "gopkg.in/check.v1"
- git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
- git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
)
// Gocheck boilerplate
@@ -113,6 +110,7 @@ func (s *TestSuite) SetUpTest(c *C) {
err = ioutil.WriteFile(s.keepmount+"/by_id/"+fakeInputCollectionPDH+"/input.json", []byte(`{"input":true}`), 0644)
c.Assert(err, IsNil)
s.runner.ArvMountPoint = s.keepmount
+ os.Setenv("InstanceType", `{"ProviderType":"a1.2xlarge","Price":1.2}`)
}
type ArvTestClient struct {
@@ -121,7 +119,6 @@ type ArvTestClient struct {
Content []arvadosclient.Dict
arvados.Container
secretMounts []byte
- Logs map[string]*bytes.Buffer
sync.Mutex
WasSetRunning bool
callraw bool
@@ -129,8 +126,8 @@ type ArvTestClient struct {
type KeepTestClient struct {
Called bool
- Content []byte
StorageClasses []string
+ blocks sync.Map
}
type stubExecutor struct {
@@ -207,14 +204,7 @@ func (client *ArvTestClient) Create(resourceType string,
client.Content = append(client.Content, parameters)
if resourceType == "logs" {
- et := parameters["log"].(arvadosclient.Dict)["event_type"].(string)
- if client.Logs == nil {
- client.Logs = make(map[string]*bytes.Buffer)
- }
- if client.Logs[et] == nil {
- client.Logs[et] = &bytes.Buffer{}
- }
- client.Logs[et].Write([]byte(parameters["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"]))
+ panic("logs.create called")
}
if resourceType == "collections" && output != nil {
@@ -311,20 +301,14 @@ func (client *ArvTestClient) Update(resourceType string, uuid string, parameters
} else if resourceType == "collections" && output != nil {
mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string)
output.(*arvados.Collection).UUID = uuid
- output.(*arvados.Collection).PortableDataHash = fmt.Sprintf("%x", md5.Sum([]byte(mt)))
+ output.(*arvados.Collection).PortableDataHash = arvados.PortableDataHash(mt)
}
return nil
}
var discoveryMap = map[string]interface{}{
- "defaultTrashLifetime": float64(1209600),
- "crunchLimitLogBytesPerJob": float64(67108864),
- "crunchLogThrottleBytes": float64(65536),
- "crunchLogThrottlePeriod": float64(60),
- "crunchLogThrottleLines": float64(1024),
- "crunchLogPartialLineThrottlePeriod": float64(5),
- "crunchLogBytesPerEvent": float64(4096),
- "crunchLogSecondsBetweenEvents": float64(1),
+ "crunchLogUpdateSize": float64(crunchLogUpdateSize),
+ "crunchLogUpdatePeriod": float64(crunchLogUpdatePeriod.Seconds()),
}
func (client *ArvTestClient) Discovery(key string) (interface{}, error) {
@@ -358,18 +342,39 @@ func (client *KeepTestClient) LocalLocator(locator string) (string, error) {
}
func (client *KeepTestClient) BlockWrite(_ context.Context, opts arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
- client.Content = opts.Data
+ locator := fmt.Sprintf("%x+%d", md5.Sum(opts.Data), len(opts.Data))
+ client.blocks.Store(locator, append([]byte(nil), opts.Data...))
return arvados.BlockWriteResponse{
- Locator: fmt.Sprintf("%x+%d", md5.Sum(opts.Data), len(opts.Data)),
+ Locator: locator,
}, nil
}
-func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) {
- return 0, errors.New("not implemented")
+func (client *KeepTestClient) BlockRead(_ context.Context, opts arvados.BlockReadOptions) (int, error) {
+ loaded, ok := client.blocks.Load(opts.Locator)
+ if !ok {
+ return 0, os.ErrNotExist
+ }
+ n, err := io.Copy(opts.WriteTo, bytes.NewReader(loaded.([]byte)))
+ return int(n), err
+}
+
+func (client *KeepTestClient) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ loaded, ok := client.blocks.Load(locator)
+ if !ok {
+ return 0, os.ErrNotExist
+ }
+ data := loaded.([]byte)
+ if offset >= len(data) {
+ return 0, io.EOF
+ }
+ return copy(dst, data[offset:]), nil
}
func (client *KeepTestClient) Close() {
- client.Content = nil
+ client.blocks.Range(func(locator, value interface{}) bool {
+ client.blocks.Delete(locator)
+ return true
+ })
}
func (client *KeepTestClient) SetStorageClasses(sc []string) {
@@ -417,26 +422,12 @@ func (fw FileWrapper) Splice(*arvados.Subtree) error {
return errors.New("not implemented")
}
-func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
- if filename == hwImageID+".tar" {
- rdr := ioutil.NopCloser(&bytes.Buffer{})
- client.Called = true
- return FileWrapper{rdr, 1321984}, nil
- } else if filename == "/file1_in_main.txt" {
- rdr := ioutil.NopCloser(strings.NewReader("foo"))
- client.Called = true
- return FileWrapper{rdr, 3}, nil
- }
- return nil, nil
-}
-
type apiStubServer struct {
server *httptest.Server
proxy *httputil.ReverseProxy
intercept func(http.ResponseWriter, *http.Request) bool
container arvados.Container
- logs map[string]string
}
func apiStub() (*arvados.Client, *apiStubServer) {
@@ -455,19 +446,6 @@ func (apistub *apiStubServer) ServeHTTP(w http.ResponseWriter, r *http.Request)
if apistub.intercept != nil && apistub.intercept(w, r) {
return
}
- if r.Method == "POST" && r.URL.Path == "/arvados/v1/logs" {
- var body struct {
- Log struct {
- EventType string `json:"event_type"`
- Properties struct {
- Text string
- }
- }
- }
- json.NewDecoder(r.Body).Decode(&body)
- apistub.logs[body.Log.EventType] += body.Log.Properties.Text
- return
- }
if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+hwPDH {
json.NewEncoder(w).Encode(arvados.Collection{ManifestText: hwManifest})
return
@@ -559,10 +537,6 @@ type KeepErrorTestClient struct {
KeepTestClient
}
-func (*KeepErrorTestClient) ManifestFileReader(manifest.Manifest, string) (arvados.File, error) {
- return nil, errors.New("KeepError")
-}
-
func (*KeepErrorTestClient) BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
return arvados.BlockWriteResponse{}, errors.New("KeepError")
}
@@ -579,45 +553,6 @@ func (*KeepReadErrorTestClient) ReadAt(string, []byte, int) (int, error) {
return 0, errors.New("KeepError")
}
-type ErrorReader struct {
- FileWrapper
-}
-
-func (ErrorReader) Read(p []byte) (n int, err error) {
- return 0, errors.New("ErrorReader")
-}
-
-func (ErrorReader) Seek(int64, int) (int64, error) {
- return 0, errors.New("ErrorReader")
-}
-
-func (KeepReadErrorTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
- return ErrorReader{}, nil
-}
-
-type ClosableBuffer struct {
- bytes.Buffer
-}
-
-func (*ClosableBuffer) Close() error {
- return nil
-}
-
-type TestLogs struct {
- Stdout ClosableBuffer
- Stderr ClosableBuffer
-}
-
-func (tl *TestLogs) NewTestLoggingWriter(logstr string) (io.WriteCloser, error) {
- if logstr == "stdout" {
- return &tl.Stdout, nil
- }
- if logstr == "stderr" {
- return &tl.Stderr, nil
- }
- return nil, errors.New("???")
-}
-
func dockerLog(fd byte, msg string) []byte {
by := []byte(msg)
header := make([]byte, 8+len(by))
@@ -633,8 +568,6 @@ func (s *TestSuite) TestRunContainer(c *C) {
return 0
}
- var logs TestLogs
- s.runner.NewLogWriter = logs.NewTestLoggingWriter
s.runner.Container.ContainerImage = arvadostest.DockerImage112PDH
s.runner.Container.Command = []string{"./hw"}
s.runner.Container.OutputStorageClasses = []string{"default"}
@@ -651,8 +584,8 @@ func (s *TestSuite) TestRunContainer(c *C) {
err = s.runner.WaitFinish()
c.Assert(err, IsNil)
- c.Check(logs.Stdout.String(), Matches, ".*Hello world\n")
- c.Check(logs.Stderr.String(), Equals, "")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, `2\S+Z Hello world\n`)
+ c.Check(logFileContent(c, s.runner, "stderr.txt"), Matches, ``)
}
func (s *TestSuite) TestCommitLogs(c *C) {
@@ -661,7 +594,9 @@ func (s *TestSuite) TestCommitLogs(c *C) {
defer kc.Close()
cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+ f, err := cr.openLogFile("crunch-run")
+ c.Assert(err, IsNil)
+ cr.CrunchLog = newLogWriter(newTestTimestamper(f))
cr.CrunchLog.Print("Hello world!")
cr.CrunchLog.Print("Goodbye")
@@ -670,11 +605,11 @@ func (s *TestSuite) TestCommitLogs(c *C) {
err = cr.CommitLogs()
c.Check(err, IsNil)
- c.Check(api.Calls, Equals, 2)
- c.Check(api.Content[1]["ensure_unique_name"], Equals, true)
- c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n")
- c.Check(*cr.LogsPDH, Equals, "63da7bdacf08c40f604daad80c261e9a+60")
+ c.Check(api.Calls, Equals, 1)
+ c.Check(api.Content[0]["ensure_unique_name"], Equals, true)
+ c.Check(api.Content[0]["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ c.Check(api.Content[0]["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n")
+ c.Check(*cr.logPDHFinal, Equals, "63da7bdacf08c40f604daad80c261e9a+60")
}
func (s *TestSuite) TestUpdateContainerRunning(c *C) {
@@ -684,7 +619,7 @@ func (s *TestSuite) TestUpdateContainerRunning(c *C) {
cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- err = cr.UpdateContainerRunning("")
+ err = cr.UpdateContainerRunning()
c.Check(err, IsNil)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
@@ -697,8 +632,8 @@ func (s *TestSuite) TestUpdateContainerComplete(c *C) {
cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- cr.LogsPDH = new(string)
- *cr.LogsPDH = "d3a229d2fe3690c2c3e75a71a153c6a3+60"
+ cr.logPDHFinal = new(string)
+ *cr.logPDHFinal = "d3a229d2fe3690c2c3e75a71a153c6a3+60"
cr.ExitCode = new(int)
*cr.ExitCode = 42
@@ -707,7 +642,7 @@ func (s *TestSuite) TestUpdateContainerComplete(c *C) {
err = cr.UpdateContainerFinal()
c.Check(err, IsNil)
- c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
+ c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.logPDHFinal)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], Equals, *cr.ExitCode)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
}
@@ -794,10 +729,7 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, fn
}
if err != nil {
- for k, v := range s.api.Logs {
- c.Log(k)
- c.Log(v.String())
- }
+ dumpAllLogFiles(c, s.runner)
}
return s.api, s.runner, realTemp
@@ -825,14 +757,14 @@ func (s *TestSuite) TestFullRunHello(c *C) {
c.Check(s.executor.created.RAM, Equals, int64(1000000))
c.Check(s.executor.created.NetworkMode, Equals, "default")
c.Check(s.executor.created.EnableNetwork, Equals, false)
- c.Check(s.executor.created.CUDADeviceCount, Equals, 0)
+ c.Check(s.executor.created.GPUDeviceCount, Equals, 0)
fmt.Fprintln(s.executor.created.Stdout, "hello world")
return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, ".*hello world\n")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, `2\S+Z hello world\n`)
c.Check(s.testDispatcherKeepClient.StorageClasses, DeepEquals, []string{"default"})
c.Check(s.testContainerKeepClient.StorageClasses, DeepEquals, []string{"default"})
}
@@ -905,6 +837,48 @@ func (s *TestSuite) testSpotInterruptionNotice(c *C, failureRate float64) {
spotInterruptionCheckInterval = time.Second / 8
ec2MetadataBaseURL = stub.URL
+ checkedLogs := false
+ checkLogs := func() {
+ checkedLogs = true
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*Checking for spot instance interruptions every 125ms using instance metadata at http://.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*Spot instance interruption check was inconclusive: 503 Service Unavailable -- will retry in 125ms.*`)
+ if failureRate == 1 {
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*Spot instance interruption check was inconclusive: 503 Service Unavailable -- now giving up after too many consecutive errors.*`)
+ return
+ }
+ text := `Cloud provider scheduled instance stop at ` + stoptime.Load().(time.Time).Format(time.RFC3339)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*`+text+`.*`)
+ c.Check(s.api.CalledWith("container.runtime_status.warning", "preemption notice"), NotNil)
+ c.Check(s.api.CalledWith("container.runtime_status.warningDetail", text), NotNil)
+ c.Check(s.api.CalledWith("container.runtime_status.preemptionNotice", text), NotNil)
+
+ // Check that the log collection was saved, and the
+ // container record updated with the new PDH,
+ // immediately after the preemption notice was
+ // received -- i.e., while the container is still
+ // running.
+ lastpdh := ""
+ saved := make(map[string]string) // pdh => manifest_text
+ for _, call := range s.api.Content {
+ if ctr, ok := call["container"].(arvadosclient.Dict); ok {
+ if pdh, ok := ctr["log"].(string); ok {
+ lastpdh = pdh
+ }
+ }
+ if coll, ok := call["collection"].(arvadosclient.Dict); ok {
+ mt, _ := coll["manifest_text"].(string)
+ if strings.Contains(mt, ":crunch-run.txt") {
+ saved[arvados.PortableDataHash(mt)] = mt
+ }
+ }
+ }
+ logfs, err := (&arvados.Collection{ManifestText: saved[lastpdh]}).FileSystem(s.runner.dispatcherClient, s.runner.DispatcherKeepClient)
+ c.Assert(err, IsNil)
+ log, err := fs.ReadFile(arvados.FS(logfs), "crunch-run.txt")
+ c.Check(err, IsNil)
+ c.Check(string(log), Matches, `(?ms).*\Q`+text+`\E.*`)
+ }
+
go s.runner.checkSpotInterruptionNotices()
s.fullRunHelper(c, `{
"command": ["sleep", "3"],
@@ -921,19 +895,10 @@ func (s *TestSuite) testSpotInterruptionNotice(c *C, failureRate float64) {
stoptime.Store(time.Now().Add(time.Minute).UTC())
token = "different-fake-ec2-metadata-token"
time.Sleep(time.Second)
+ checkLogs()
return 0
})
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Checking for spot interruptions every 125ms using instance metadata at http://.*`)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Error checking spot interruptions: 503 Service Unavailable.*`)
- if failureRate == 1 {
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Giving up on checking spot interruptions after too many consecutive failures.*`)
- } else {
- text := `Cloud provider scheduled instance stop at ` + stoptime.Load().(time.Time).Format(time.RFC3339)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*`+text+`.*`)
- c.Check(s.api.CalledWith("container.runtime_status.warning", "preemption notice"), NotNil)
- c.Check(s.api.CalledWith("container.runtime_status.warningDetail", text), NotNil)
- c.Check(s.api.CalledWith("container.runtime_status.preemptionNotice", text), NotNil)
- }
+ c.Check(checkedLogs, Equals, true)
}
func (s *TestSuite) TestRunTimeExceeded(c *C) {
@@ -954,7 +919,7 @@ func (s *TestSuite) TestRunTimeExceeded(c *C) {
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*maximum run time exceeded.*")
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, "(?ms).*maximum run time exceeded.*")
}
func (s *TestSuite) TestContainerWaitFails(c *C) {
@@ -972,7 +937,7 @@ func (s *TestSuite) TestContainerWaitFails(c *C) {
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Container is not running.*")
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, "(?ms).*Container is not running.*")
}
func (s *TestSuite) TestCrunchstat(c *C) {
@@ -995,17 +960,16 @@ func (s *TestSuite) TestCrunchstat(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Assert(s.api.Logs["crunchstat"], NotNil)
- c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*mem \d+ swap \d+ pgmajfault \d+ rss.*`)
+ c.Check(logFileContent(c, s.runner, "crunchstat.txt"), Matches, `(?ms).*`+reTimestamp+` mem \d+ swap \d+ pgmajfault \d+ rss.*`)
+ c.Check(logFileContent(c, s.runner, "hoststat.txt"), Matches, `(?ms).*`+reTimestamp+` mem \d+ swap \d+ pgmajfault \d+ rss.*`)
// Check that we called (*crunchstat.Reporter)Stop().
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Maximum crunch-run memory rss usage was \d+ bytes\n.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*`+reTimestamp+` Maximum crunch-run memory rss usage was \d+ bytes\n.*`)
}
func (s *TestSuite) TestNodeInfoLog(c *C) {
- os.Setenv("SLURMD_NODENAME", "compute2")
s.fullRunHelper(c, `{
- "command": ["sleep", "1"],
+ "command": ["true"],
"container_image": "`+arvadostest.DockerImage112PDH+`",
"cwd": ".",
"environment": {},
@@ -1015,26 +979,22 @@ func (s *TestSuite) TestNodeInfoLog(c *C) {
"runtime_constraints": {},
"state": "Locked"
}`, nil, func() int {
- time.Sleep(time.Second)
return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Assert(s.api.Logs["node"], NotNil)
- json := s.api.Logs["node"].String()
- c.Check(json, Matches, `(?ms).*"uuid": *"zzzzz-7ekkf-2z3mc76g2q73aio".*`)
- c.Check(json, Matches, `(?ms).*"total_cpu_cores": *16.*`)
- c.Check(json, Not(Matches), `(?ms).*"info":.*`)
+ json := logFileContent(c, s.runner, "node.json")
+ c.Check(json, Matches, `(?ms).*"ProviderType": *"a1\.2xlarge".*`)
+ c.Check(json, Matches, `(?ms).*"Price": *1\.2.*`)
- c.Assert(s.api.Logs["node-info"], NotNil)
- json = s.api.Logs["node-info"].String()
- c.Check(json, Matches, `(?ms).*Host Information.*`)
- c.Check(json, Matches, `(?ms).*CPU Information.*`)
- c.Check(json, Matches, `(?ms).*Memory Information.*`)
- c.Check(json, Matches, `(?ms).*Disk Space.*`)
- c.Check(json, Matches, `(?ms).*Disk INodes.*`)
+ nodeinfo := logFileContent(c, s.runner, "node-info.txt")
+ c.Check(nodeinfo, Matches, `(?ms).*Host Information.*`)
+ c.Check(nodeinfo, Matches, `(?ms).*CPU Information.*`)
+ c.Check(nodeinfo, Matches, `(?ms).*Memory Information.*`)
+ c.Check(nodeinfo, Matches, `(?ms).*Disk Space.*`)
+ c.Check(nodeinfo, Matches, `(?ms).*Disk INodes.*`)
}
func (s *TestSuite) TestLogVersionAndRuntime(c *C) {
@@ -1052,11 +1012,10 @@ func (s *TestSuite) TestLogVersionAndRuntime(c *C) {
return 0
})
- c.Assert(s.api.Logs["crunch-run"], NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*crunch-run \S+ \(go\S+\) start.*`)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*crunch-run process has uid=\d+\(.+\) gid=\d+\(.+\) groups=\d+\(.+\)(,\d+\(.+\))*\n.*`)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Executing container: zzzzz-zzzzz-zzzzzzzzzzzzzzz.*`)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Using container runtime: stub.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*crunch-run \S+ \(go\S+\) start.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*crunch-run process has uid=\d+\(.+\) gid=\d+\(.+\) groups=\d+\(.+\)(,\d+\(.+\))*\n.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*Executing container: zzzzz-zzzzz-zzzzzzzzzzzzzzz.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*Using container runtime: stub.*`)
}
func (s *TestSuite) testLogRSSThresholds(c *C, ram int64, expected []int, notExpected int) {
@@ -1072,8 +1031,9 @@ func (s *TestSuite) testLogRSSThresholds(c *C, ram int64, expected []int, notExp
"runtime_constraints": {"ram": `+fmt.Sprintf("%d", ram)+`},
"state": "Locked"
}`, nil, func() int { return 0 })
- c.Logf("=== crunchstat logs\n%s\n", s.api.Logs["crunchstat"].String())
- logs := s.api.Logs["crunch-run"].String()
+ logs := logFileContent(c, s.runner, "crunch-run.txt")
+ c.Log("=== crunchstat logs")
+ c.Log(logs)
pattern := logLineStart + `Container using over %d%% of memory \(rss %d/%d bytes\)`
var threshold int
for _, threshold = range expected {
@@ -1111,7 +1071,7 @@ func (s *TestSuite) TestLogMaximaAfterRun(c *C) {
"runtime_constraints": {"ram": `+fmt.Sprintf("%d", s.debian12MemoryCurrent*10)+`},
"state": "Locked"
}`, nil, func() int { return 0 })
- logs := s.api.Logs["crunch-run"].String()
+ logs := logFileContent(c, s.runner, "crunch-run.txt")
for _, expected := range []string{
`Maximum disk usage was \d+%, \d+/\d+ bytes`,
fmt.Sprintf(`Maximum container memory swap usage was %d bytes`, s.debian12SwapCurrent),
@@ -1179,8 +1139,7 @@ func (s *TestSuite) TestContainerRecordLog(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Assert(s.api.Logs["container"], NotNil)
- c.Check(s.api.Logs["container"].String(), Matches, `(?ms).*container_image.*`)
+ c.Check(logFileContent(c, s.runner, "container.json"), Matches, `(?ms).*container_image.*`)
}
func (s *TestSuite) TestFullRunStderr(c *C) {
@@ -1205,8 +1164,8 @@ func (s *TestSuite) TestFullRunStderr(c *C) {
c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, ".*hello\n")
- c.Check(s.api.Logs["stderr"].String(), Matches, ".*world\n")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, ".*hello\n")
+ c.Check(logFileContent(c, s.runner, "stderr.txt"), Matches, ".*world\n")
}
func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
@@ -1227,8 +1186,7 @@ func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Log(s.api.Logs["stdout"])
- c.Check(s.api.Logs["stdout"].String(), Matches, `.*workdir=""\n`)
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, `.*workdir=""`)
}
func (s *TestSuite) TestFullRunSetCwd(c *C) {
@@ -1249,7 +1207,7 @@ func (s *TestSuite) TestFullRunSetCwd(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, ".*/bin\n")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, ".*/bin\n")
}
func (s *TestSuite) TestFullRunSetOutputStorageClasses(c *C) {
@@ -1271,7 +1229,7 @@ func (s *TestSuite) TestFullRunSetOutputStorageClasses(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, ".*/bin\n")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, ".*/bin\n")
c.Check(s.testDispatcherKeepClient.StorageClasses, DeepEquals, []string{"foo", "bar"})
c.Check(s.testContainerKeepClient.StorageClasses, DeepEquals, []string{"foo", "bar"})
}
@@ -1285,33 +1243,15 @@ func (s *TestSuite) TestEnableCUDADeviceCount(c *C) {
"mounts": {"/tmp": {"kind": "tmp"} },
"output_path": "/tmp",
"priority": 1,
- "runtime_constraints": {"cuda": {"device_count": 2}},
- "state": "Locked",
- "output_storage_classes": ["foo", "bar"]
-}`, nil, func() int {
- fmt.Fprintln(s.executor.created.Stdout, "ok")
- return 0
- })
- c.Check(s.executor.created.CUDADeviceCount, Equals, 2)
-}
-
-func (s *TestSuite) TestEnableCUDAHardwareCapability(c *C) {
- s.fullRunHelper(c, `{
- "command": ["pwd"],
- "container_image": "`+arvadostest.DockerImage112PDH+`",
- "cwd": "/bin",
- "environment": {},
- "mounts": {"/tmp": {"kind": "tmp"} },
- "output_path": "/tmp",
- "priority": 1,
- "runtime_constraints": {"cuda": {"hardware_capability": "foo"}},
+ "runtime_constraints": {"gpu": {"device_count": 2, "stack": "cuda", "hardware_target": ["9.0"], "driver_version": "11.0", "vram": 8000000000}},
"state": "Locked",
"output_storage_classes": ["foo", "bar"]
}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, "ok")
return 0
})
- c.Check(s.executor.created.CUDADeviceCount, Equals, 0)
+ c.Check(s.executor.created.GPUDeviceCount, Equals, 2)
+ c.Check(s.executor.created.GPUStack, Equals, "cuda")
}
func (s *TestSuite) TestStopOnSignal(c *C) {
@@ -1368,14 +1308,11 @@ func (s *TestSuite) testStopContainer(c *C) {
case err = <-done:
c.Check(err, IsNil)
}
- for k, v := range s.api.Logs {
- c.Log(k)
- c.Log(v.String(), "\n")
- }
+ dumpAllLogFiles(c, s.runner)
c.Check(s.api.CalledWith("container.log", nil), NotNil)
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, "(?ms).*foo\n$")
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, "(?ms).*foo\n$")
}
func (s *TestSuite) TestFullRunSetEnv(c *C) {
@@ -1396,7 +1333,7 @@ func (s *TestSuite) TestFullRunSetEnv(c *C) {
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["stdout"].String(), Matches, `.*map\[FROBIZ:bilbo\]\n`)
+ c.Check(logFileContent(c, s.runner, "stdout.txt"), Matches, `.*map\[FROBIZ:bilbo\]`)
}
type ArvMountCmdLine struct {
@@ -1740,54 +1677,6 @@ func (s *TestSuite) TestSetupMounts(c *C) {
cr.CleanupDirs()
checkEmpty()
}
-
- // git_tree mounts
- {
- i = 0
- cr.ArvMountPoint = ""
- git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
- cr.token = arvadostest.ActiveToken
- cr.Container.Mounts = make(map[string]arvados.Mount)
- cr.Container.Mounts = map[string]arvados.Mount{
- "/tip": {
- Kind: "git_tree",
- UUID: arvadostest.Repository2UUID,
- Commit: "fd3531f42995344f36c30b79f55f27b502f3d344",
- Path: "/",
- },
- "/non-tip": {
- Kind: "git_tree",
- UUID: arvadostest.Repository2UUID,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- Path: "/",
- },
- }
- cr.Container.OutputPath = "/tmp"
-
- bindmounts, err := cr.SetupMounts()
- c.Check(err, IsNil)
-
- for path, mount := range bindmounts {
- c.Check(mount.ReadOnly, Equals, !cr.Container.Mounts[path].Writable, Commentf("%s %#v", path, mount))
- }
-
- data, err := ioutil.ReadFile(bindmounts["/tip"].HostPath + "/dir1/dir2/file with mode 0644")
- c.Check(err, IsNil)
- c.Check(string(data), Equals, "\000\001\002\003")
- _, err = ioutil.ReadFile(bindmounts["/tip"].HostPath + "/file only on testbranch")
- c.Check(err, FitsTypeOf, &os.PathError{})
- c.Check(os.IsNotExist(err), Equals, true)
-
- data, err = ioutil.ReadFile(bindmounts["/non-tip"].HostPath + "/dir1/dir2/file with mode 0644")
- c.Check(err, IsNil)
- c.Check(string(data), Equals, "\000\001\002\003")
- data, err = ioutil.ReadFile(bindmounts["/non-tip"].HostPath + "/file only on testbranch")
- c.Check(err, IsNil)
- c.Check(string(data), Equals, "testfile\n")
-
- cr.CleanupDirs()
- checkEmpty()
- }
}
func (s *TestSuite) TestStdout(c *C) {
@@ -1869,7 +1758,7 @@ func (s *TestSuite) TestFullRunWithAPI(c *C) {
})
c.Check(s.api.CalledWith("container.exit_code", 3), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*status code 3\n.*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*`+reTimestamp+` Container exited with status code 3\n.*`)
}
func (s *TestSuite) TestFullRunSetOutput(c *C) {
@@ -1924,7 +1813,8 @@ func (s *TestSuite) TestArvMountRuntimeStatusWarning(c *C) {
c.Check(s.api.CalledWith("container.runtime_status.warning", "arv-mount: Keep write error"), NotNil)
c.Check(s.api.CalledWith("container.runtime_status.warningDetail", "Test: Keep write error: I am a teapot"), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Container exited with status code 137 \(signal 9, SIGKILL\).*`)
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, `(?ms).*`+reTimestamp+` Container exited with status code 137 \(signal 9, SIGKILL\).*`)
+ c.Check(logFileContent(c, s.runner, "arv-mount.txt"), Matches, reTimestamp+` Test: Keep write error: I am a teapot\n`)
}
func (s *TestSuite) TestStdoutWithExcludeFromOutputMountPointUnderOutputDir(c *C) {
@@ -2234,13 +2124,14 @@ func (s *TestSuite) TestFullBrokenDocker(c *C) {
"state": "Locked"
}`, nil, func() int { return 0 })
c.Check(s.api.CalledWith("container.state", nextState), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
+ logs := logFileContent(c, s.runner, "crunch-run.txt")
+ c.Check(logs, Matches, "(?ms).*unable to run containers.*")
if s.runner.brokenNodeHook != "" {
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Running broken node hook.*")
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*killme.*")
- c.Check(s.api.Logs["crunch-run"].String(), Not(Matches), "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*")
+ c.Check(logs, Matches, "(?ms).*Running broken node hook.*")
+ c.Check(logs, Matches, "(?ms).*killme.*")
+ c.Check(logs, Not(Matches), "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*")
} else {
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*")
+ c.Check(logs, Matches, "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*")
}
}
}
@@ -2265,7 +2156,7 @@ func (s *TestSuite) TestBadCommand(c *C) {
"state": "Locked"
}`, nil, func() int { return 0 })
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
- c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+ c.Check(logFileContent(c, s.runner, "crunch-run.txt"), Matches, "(?ms).*Possible causes:.*is missing.*")
}
}
@@ -2375,7 +2266,7 @@ func (s *TestSuite) TestCalculateCost(c *C) {
cr := s.runner
cr.costStartTime = now.Add(-time.Hour)
var logbuf bytes.Buffer
- cr.CrunchLog.Immediate = log.New(&logbuf, "", 0)
+ cr.CrunchLog = newLogWriter(&logbuf)
// if there's no InstanceType env var, cost is calculated as 0
os.Unsetenv("InstanceType")
@@ -2386,7 +2277,6 @@ func (s *TestSuite) TestCalculateCost(c *C) {
// hasn't found any data), cost is calculated based on
// InstanceType env var
os.Setenv("InstanceType", `{"Price":1.2}`)
- defer os.Unsetenv("InstanceType")
cost = cr.calculateCost(now)
c.Check(cost, Equals, 1.2)
@@ -2432,7 +2322,6 @@ func (s *TestSuite) TestSIGUSR2CostUpdate(c *C) {
c.Assert(err, IsNil)
os.Setenv("InstanceType", `{"Price":2.2}`)
- defer os.Unsetenv("InstanceType")
defer func(s string) { lockdir = s }(lockdir)
lockdir = c.MkDir()
@@ -2503,3 +2392,20 @@ type FakeProcess struct {
func (fp FakeProcess) CmdlineSlice() ([]string, error) {
return fp.cmdLine, nil
}
+
+func logFileContent(c *C, cr *ContainerRunner, fnm string) string {
+ buf, err := fs.ReadFile(arvados.FS(cr.LogCollection), fnm)
+ c.Assert(err, IsNil)
+ return string(buf)
+}
+
+func dumpAllLogFiles(c *C, cr *ContainerRunner) {
+ d, err := cr.LogCollection.OpenFile("/", os.O_RDONLY, 0)
+ c.Assert(err, IsNil)
+ fis, err := d.Readdir(-1)
+ c.Assert(err, IsNil)
+ for _, fi := range fis {
+ c.Logf("=== %s", fi.Name())
+ c.Log(logFileContent(c, cr, fi.Name()))
+ }
+}
diff --git a/lib/crunchrun/cuda.go b/lib/crunchrun/cuda.go
index c693dbcb96..f91a5c62cd 100644
--- a/lib/crunchrun/cuda.go
+++ b/lib/crunchrun/cuda.go
@@ -5,13 +5,15 @@
package crunchrun
import (
+ "fmt"
+ "io"
"os/exec"
)
// nvidiaModprobe makes sure all the nvidia kernel modules and devices
// are set up. If we don't have all the modules/devices set up we get
// "CUDA_ERROR_UNKNOWN".
-func nvidiaModprobe(writer *ThrottledLogger) {
+func nvidiaModprobe(writer io.Writer) {
// The underlying problem is that when normally running
// directly on the host, the CUDA SDK will automatically
// detect and set up the devices on demand. However, when
@@ -42,7 +44,7 @@ func nvidiaModprobe(writer *ThrottledLogger) {
nvidiaSmi.Stderr = writer
err := nvidiaSmi.Run()
if err != nil {
- writer.Printf("Warning %v: %v", nvidiaSmi.Args, err)
+ fmt.Fprintf(writer, "Warning %v: %v\n", nvidiaSmi.Args, err)
}
// Load the kernel modules & devices associated with
@@ -63,7 +65,7 @@ func nvidiaModprobe(writer *ThrottledLogger) {
nvmodprobe.Stderr = writer
err = nvmodprobe.Run()
if err != nil {
- writer.Printf("Warning %v: %v", nvmodprobe.Args, err)
+ fmt.Fprintf(writer, "Warning %v: %v\n", nvmodprobe.Args, err)
}
}
}
diff --git a/lib/crunchrun/docker.go b/lib/crunchrun/docker.go
index 4f449133f3..b094018e23 100644
--- a/lib/crunchrun/docker.go
+++ b/lib/crunchrun/docker.go
@@ -5,19 +5,22 @@ package crunchrun
import (
"context"
+ "encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
+ "slices"
+ "strconv"
"strings"
"sync/atomic"
+ "syscall"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerclient "github.com/docker/docker/client"
+ "github.com/docker/docker/pkg/jsonmessage"
)
// Docker daemon won't let you set a limit less than ~10 MiB
@@ -92,11 +95,19 @@ func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, cont
defer f.Close()
resp, err := e.dockerclient.ImageLoad(context.TODO(), f, true)
if err != nil {
- return fmt.Errorf("While loading container image into Docker: %v", err)
+ return fmt.Errorf("ImageLoad: %w", err)
}
defer resp.Body.Close()
- buf, _ := ioutil.ReadAll(resp.Body)
- e.logf("loaded image: response %s", buf)
+ var message jsonmessage.JSONMessage
+ err = json.NewDecoder(resp.Body).Decode(&message)
+ if err != nil {
+ return fmt.Errorf("could not parse Docker response: %w", err)
+ }
+ if message.Error != nil {
+ return fmt.Errorf("ImageLoad: %w", message.Error)
+ }
+ // message.Stream is typically "Loaded image: hello-world:latest\n"
+ e.logf("%s", strings.TrimSuffix(message.Stream, "\n"))
return nil
}
@@ -135,7 +146,7 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc
KernelMemory: spec.RAM, // kernel portion
},
}
- if spec.CUDADeviceCount != 0 {
+ if spec.GPUStack == "cuda" && spec.GPUDeviceCount > 0 {
var deviceIds []string
if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
// If a resource manager such as slurm or LSF told
@@ -143,7 +154,7 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc
deviceIds = strings.Split(cudaVisibleDevices, ",")
}
- deviceCount := spec.CUDADeviceCount
+ deviceCount := spec.GPUDeviceCount
if len(deviceIds) > 0 {
// Docker won't accept both non-empty
// DeviceIDs and a non-zero Count
@@ -172,6 +183,70 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc
Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}},
})
}
+ if spec.GPUStack == "rocm" && spec.GPUDeviceCount > 0 {
+ // there's no container toolkit or builtin Docker
+ // support for ROCm so we just provide the devices to
+ // the container ourselves.
+
+ // fortunately, the minimum version of this seems to be this:
+ // rendergroup=$(getent group render | cut -d: -f3)
+ // videogroup=$(getent group video | cut -d: -f3)
+ // docker run -it --device=/dev/kfd --device=/dev/dri/renderD128 --user $(id -u) --group-add $videogroup --group-add $rendergroup "$@"
+
+ hostCfg.Devices = append(hostCfg.Devices, dockercontainer.DeviceMapping{
+ PathInContainer: "/dev/kfd",
+ PathOnHost: "/dev/kfd",
+ CgroupPermissions: "rwm",
+ })
+ info, _ := os.Stat("/dev/kfd")
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ // Make sure the container has access
+ // to the group id that allow it to
+ // access the device.
+ hostCfg.GroupAdd = append(hostCfg.GroupAdd, fmt.Sprintf("%v", stat.Gid))
+ }
+
+ var deviceIndexes []int
+ if amdVisibleDevices := os.Getenv("AMD_VISIBLE_DEVICES"); amdVisibleDevices != "" {
+ // If a resource manager/dispatcher told us to
+ // select specific devices, so we need to
+ // propagate that.
+ for _, dev := range strings.Split(amdVisibleDevices, ",") {
+ intDev, err := strconv.Atoi(dev)
+ if err != nil {
+ continue
+ }
+ deviceIndexes = append(deviceIndexes, intDev)
+ }
+ } else {
+ // Try every device, we'll check below to see
+ // which ones actually exists.
+ for i := 0; i < 128; i++ {
+ deviceIndexes = append(deviceIndexes, i)
+ }
+ }
+ for _, intDev := range deviceIndexes {
+ devPath := fmt.Sprintf("/dev/dri/renderD%v", 128+intDev)
+ info, err := os.Stat(devPath)
+ if err != nil {
+ continue
+ }
+ hostCfg.Devices = append(hostCfg.Devices, dockercontainer.DeviceMapping{
+ PathInContainer: devPath,
+ PathOnHost: devPath,
+ CgroupPermissions: "rwm",
+ })
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ // Make sure the container has access
+ // to the group id that allow it to
+ // access the device.
+ if !slices.Contains(hostCfg.GroupAdd, fmt.Sprintf("%v", stat.Gid)) {
+ hostCfg.GroupAdd = append(hostCfg.GroupAdd, fmt.Sprintf("%v", stat.Gid))
+ }
+ }
+ }
+ }
+
for path, mount := range spec.BindMounts {
bind := mount.HostPath + ":" + path
if mount.ReadOnly {
@@ -207,11 +282,11 @@ func (e *dockerExecutor) Pid() int {
}
func (e *dockerExecutor) Start() error {
- return e.dockerclient.ContainerStart(context.TODO(), e.containerID, dockertypes.ContainerStartOptions{})
+ return e.dockerclient.ContainerStart(context.TODO(), e.containerID, dockercontainer.StartOptions{})
}
func (e *dockerExecutor) Stop() error {
- err := e.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockertypes.ContainerRemoveOptions{Force: true})
+ err := e.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockercontainer.RemoveOptions{Force: true})
if err != nil && strings.Contains(err.Error(), "No such container: "+e.containerID) {
err = nil
}
@@ -277,7 +352,7 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) {
}
func (e *dockerExecutor) startIO(stdin io.Reader, stdout, stderr io.Writer) error {
- resp, err := e.dockerclient.ContainerAttach(context.TODO(), e.containerID, dockertypes.ContainerAttachOptions{
+ resp, err := e.dockerclient.ContainerAttach(context.TODO(), e.containerID, dockercontainer.AttachOptions{
Stream: true,
Stdin: stdin != nil,
Stdout: true,
@@ -340,7 +415,7 @@ func (e *dockerExecutor) handleStdoutStderr(stdout, stderr io.Writer, reader io.
}
func (e *dockerExecutor) Close() {
- e.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockertypes.ContainerRemoveOptions{Force: true})
+ e.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockercontainer.RemoveOptions{Force: true})
}
func (e *dockerExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {
diff --git a/lib/crunchrun/docker_test.go b/lib/crunchrun/docker_test.go
index 53201b8d51..5aac6a5b9f 100644
--- a/lib/crunchrun/docker_test.go
+++ b/lib/crunchrun/docker_test.go
@@ -5,9 +5,11 @@
package crunchrun
import (
+ "io/ioutil"
"os/exec"
"time"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
dockercontainer "github.com/docker/docker/api/types/container"
. "gopkg.in/check.v1"
)
@@ -31,6 +33,14 @@ func (s *dockerSuite) SetUpSuite(c *C) {
}
}
+func (s *dockerSuite) TestLoadImageError(c *C) {
+ imagefile := c.MkDir() + "/bogus-image.tar"
+ err := ioutil.WriteFile(imagefile, []byte("this is not a docker image"), 0777)
+ c.Assert(err, IsNil)
+ err = s.executor.LoadImage("", imagefile, arvados.Container{}, "", nil)
+ c.Assert(err, ErrorMatches, "ImageLoad: unexpected EOF")
+}
+
var _ = Suite(&dockerStubSuite{})
// dockerStubSuite tests don't really connect to the docker service,
@@ -41,13 +51,14 @@ func (s *dockerStubSuite) TestDockerContainerConfig(c *C) {
e, err := newDockerExecutor("zzzzz-zzzzz-zzzzzzzzzzzzzzz", c.Logf, time.Second/2)
c.Assert(err, IsNil)
cfg, hostCfg := e.config(containerSpec{
- VCPUs: 4,
- RAM: 123123123,
- WorkingDir: "/WorkingDir",
- Env: map[string]string{"FOO": "bar"},
- BindMounts: map[string]bindmount{"/mnt": {HostPath: "/hostpath", ReadOnly: true}},
- EnableNetwork: false,
- CUDADeviceCount: 3,
+ VCPUs: 4,
+ RAM: 123123123,
+ WorkingDir: "/WorkingDir",
+ Env: map[string]string{"FOO": "bar"},
+ BindMounts: map[string]bindmount{"/mnt": {HostPath: "/hostpath", ReadOnly: true}},
+ EnableNetwork: false,
+ GPUStack: "cuda",
+ GPUDeviceCount: 3,
})
c.Check(cfg.WorkingDir, Equals, "/WorkingDir")
c.Check(cfg.Env, DeepEquals, []string{"FOO=bar"})
diff --git a/lib/crunchrun/executor.go b/lib/crunchrun/executor.go
index 308b05cdeb..259498d855 100644
--- a/lib/crunchrun/executor.go
+++ b/lib/crunchrun/executor.go
@@ -16,20 +16,21 @@ type bindmount struct {
}
type containerSpec struct {
- Image string
- VCPUs int
- RAM int64
- WorkingDir string
- Env map[string]string
- BindMounts map[string]bindmount
- Command []string
- EnableNetwork bool
- CUDADeviceCount int
- NetworkMode string // docker network mode, normally "default"
- CgroupParent string
- Stdin io.Reader
- Stdout io.Writer
- Stderr io.Writer
+ Image string
+ VCPUs int
+ RAM int64
+ WorkingDir string
+ Env map[string]string
+ BindMounts map[string]bindmount
+ Command []string
+ EnableNetwork bool
+ GPUStack string
+ GPUDeviceCount int
+ NetworkMode string // docker network mode, normally "default"
+ CgroupParent string
+ Stdin io.Reader
+ Stdout io.Writer
+ Stderr io.Writer
}
// containerExecutor is an interface to a container runtime
diff --git a/lib/crunchrun/executor_test.go b/lib/crunchrun/executor_test.go
index 3a91c78641..134ca560ce 100644
--- a/lib/crunchrun/executor_test.go
+++ b/lib/crunchrun/executor_test.go
@@ -12,7 +12,9 @@ import (
"io/ioutil"
"net"
"net/http"
+ "net/netip"
"os"
+ "regexp"
"strings"
"time"
@@ -172,7 +174,7 @@ func (s *executorSuite) TestExecStdoutStderr(c *C) {
c.Check(s.stderr.String(), Equals, "barwaz\n")
}
-func (s *executorSuite) TestIPAddress(c *C) {
+func (s *executorSuite) TestEnableNetwork_Listen(c *C) {
// Listen on an available port on the host.
ln, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", "0"))
c.Assert(err, IsNil)
@@ -191,24 +193,37 @@ func (s *executorSuite) TestIPAddress(c *C) {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
defer cancel()
- for ctx.Err() == nil {
+ for {
time.Sleep(time.Second / 10)
- _, err := s.executor.IPAddress()
- if err == nil {
+ if ctx.Err() != nil {
+ c.Error("timed out")
break
}
- }
- // When we connect to the port using s.executor.IPAddress(),
- // we should reach the nc process running inside the
- // container, not the net.Listen() running outside the
- // container, even though both listen on the same port.
- ip, err := s.executor.IPAddress()
- if c.Check(err, IsNil) && c.Check(ip, Not(Equals), "") {
- req, err := http.NewRequest("BREW", "http://"+net.JoinHostPort(ip, port), nil)
+
+ ip, err := s.executor.IPAddress()
+ if err != nil {
+ c.Logf("s.executor.IPAddress: %s", err)
+ continue
+ }
+ c.Assert(ip, Not(Equals), "")
+
+ // When we connect to the port using
+ // s.executor.IPAddress(), we should reach the nc
+ // process running inside the container, not the
+ // net.Listen() running outside the container, even
+ // though both listen on the same port.
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(time.Second))
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, "BREW", "http://"+net.JoinHostPort(ip, port), nil)
c.Assert(err, IsNil)
resp, err := http.DefaultClient.Do(req)
- c.Assert(err, IsNil)
+ if err != nil {
+ c.Logf("%s (retrying...)", err)
+ continue
+ }
c.Check(resp.StatusCode, Equals, http.StatusTeapot)
+ c.Logf("%s %q: %s", req.Method, req.URL, resp.Status)
+ break
}
s.executor.Stop()
@@ -220,6 +235,29 @@ func (s *executorSuite) TestIPAddress(c *C) {
c.Logf("stderr:\n%s\n\n", s.stderr.String())
}
+func (s *executorSuite) TestEnableNetwork_IPAddress(c *C) {
+ s.spec.Command = []string{"ip", "ad"}
+ s.spec.EnableNetwork = true
+ c.Assert(s.executor.Create(s.spec), IsNil)
+ c.Assert(s.executor.Start(), IsNil)
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
+ defer cancel()
+ code, _ := s.executor.Wait(ctx)
+ c.Check(code, Equals, 0)
+ c.Logf("stdout:\n%s\n\n", s.stdout.String())
+ c.Logf("stderr:\n%s\n\n", s.stderr.String())
+
+ found := false
+ for _, m := range regexp.MustCompile(` inet (.+?)/`).FindAllStringSubmatch(s.stdout.String(), -1) {
+ if addr, err := netip.ParseAddr(m[1]); err == nil && !addr.IsLoopback() {
+ found = true
+ c.Logf("found non-loopback IP address %q", m[1])
+ break
+ }
+ }
+ c.Check(found, Equals, true, Commentf("container does not appear to have a non-loopback IP address"))
+}
+
func (s *executorSuite) TestInject(c *C) {
hostdir := c.MkDir()
c.Assert(os.WriteFile(hostdir+"/testfile", []byte("first tube"), 0777), IsNil)
diff --git a/lib/crunchrun/git_mount.go b/lib/crunchrun/git_mount.go
deleted file mode 100644
index 561ea18de4..0000000000
--- a/lib/crunchrun/git_mount.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package crunchrun
-
-import (
- "fmt"
- "net/url"
- "os"
- "path/filepath"
- "regexp"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "gopkg.in/src-d/go-billy.v4/osfs"
- git "gopkg.in/src-d/go-git.v4"
- git_config "gopkg.in/src-d/go-git.v4/config"
- git_plumbing "gopkg.in/src-d/go-git.v4/plumbing"
- git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-)
-
-type gitMount arvados.Mount
-
-var (
- sha1re = regexp.MustCompile(`^[0-9a-f]{40}$`)
- repoUUIDre = regexp.MustCompile(`^[0-9a-z]{5}-s0uqq-[0-9a-z]{15}$`)
-)
-
-func (gm gitMount) validate() error {
- if gm.Path != "" && gm.Path != "/" {
- return fmt.Errorf("cannot mount git_tree with path %q -- only \"/\" is supported", gm.Path)
- }
- if !sha1re.MatchString(gm.Commit) {
- return fmt.Errorf("cannot mount git_tree with commit %q -- must be a 40-char SHA1", gm.Commit)
- }
- if gm.RepositoryName != "" || gm.GitURL != "" {
- return fmt.Errorf("cannot mount git_tree -- repository_name and git_url must be empty")
- }
- if !repoUUIDre.MatchString(gm.UUID) {
- return fmt.Errorf("cannot mount git_tree with uuid %q -- must be a repository UUID", gm.UUID)
- }
- if gm.Writable {
- return fmt.Errorf("writable git_tree mount is not supported")
- }
- return nil
-}
-
-// ExtractTree extracts the specified tree into dir, which is an
-// existing empty local directory.
-func (gm gitMount) extractTree(ac *arvados.Client, dir string, token string) error {
- err := gm.validate()
- if err != nil {
- return err
- }
- dd, err := ac.DiscoveryDocument()
- if err != nil {
- return fmt.Errorf("error getting discovery document: %w", err)
- }
- u, err := url.Parse(dd.GitURL)
- if err != nil {
- return fmt.Errorf("parse gitUrl %q: %s", dd.GitURL, err)
- }
- u, err = u.Parse("/" + gm.UUID + ".git")
- if err != nil {
- return fmt.Errorf("build git url from %q, %q: %s", dd.GitURL, gm.UUID, err)
- }
- store := memory.NewStorage()
- repo, err := git.Init(store, osfs.New(dir))
- if err != nil {
- return fmt.Errorf("init repo: %s", err)
- }
- _, err = repo.CreateRemote(&git_config.RemoteConfig{
- Name: "origin",
- URLs: []string{u.String()},
- })
- if err != nil {
- return fmt.Errorf("create remote %q: %s", u.String(), err)
- }
- err = repo.Fetch(&git.FetchOptions{
- RemoteName: "origin",
- Auth: &git_http.BasicAuth{
- Username: "none",
- Password: token,
- },
- })
- if err != nil {
- return fmt.Errorf("git fetch %q: %s", u.String(), err)
- }
- wt, err := repo.Worktree()
- if err != nil {
- return fmt.Errorf("worktree failed: %s", err)
- }
- err = wt.Checkout(&git.CheckoutOptions{
- Hash: git_plumbing.NewHash(gm.Commit),
- })
- if err != nil {
- return fmt.Errorf("checkout failed: %s", err)
- }
- err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- // copy user rx bits to group and other, in case
- // prevailing umask is more restrictive than 022
- mode := info.Mode()
- mode = mode | ((mode >> 3) & 050) | ((mode >> 6) & 5)
- return os.Chmod(path, mode)
- })
- if err != nil {
- return fmt.Errorf("chmod -R %q: %s", dir, err)
- }
- return nil
-}
diff --git a/lib/crunchrun/git_mount_test.go b/lib/crunchrun/git_mount_test.go
deleted file mode 100644
index ac98dcc480..0000000000
--- a/lib/crunchrun/git_mount_test.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package crunchrun
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- check "gopkg.in/check.v1"
- git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
- git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
-)
-
-type GitMountSuite struct {
- tmpdir string
-}
-
-var _ = check.Suite(&GitMountSuite{})
-
-func (s *GitMountSuite) SetUpTest(c *check.C) {
- var err error
- s.tmpdir, err = ioutil.TempDir("", "")
- c.Assert(err, check.IsNil)
- git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
-}
-
-func (s *GitMountSuite) TearDownTest(c *check.C) {
- err := os.RemoveAll(s.tmpdir)
- c.Check(err, check.IsNil)
-}
-
-// Commit fd3531f is crunch-run-tree-test
-func (s *GitMountSuite) TestExtractTree(c *check.C) {
- gm := gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- Commit: "fd3531f42995344f36c30b79f55f27b502f3d344",
- }
- ac := arvados.NewClientFromEnv()
- err := gm.extractTree(ac, s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.IsNil)
-
- fnm := filepath.Join(s.tmpdir, "dir1/dir2/file with mode 0644")
- data, err := ioutil.ReadFile(fnm)
- c.Check(err, check.IsNil)
- c.Check(data, check.DeepEquals, []byte{0, 1, 2, 3})
- fi, err := os.Stat(fnm)
- c.Check(err, check.IsNil)
- if err == nil {
- c.Check(fi.Mode(), check.Equals, os.FileMode(0644))
- }
-
- fnm = filepath.Join(s.tmpdir, "dir1/dir2/file with mode 0755")
- data, err = ioutil.ReadFile(fnm)
- c.Check(err, check.IsNil)
- c.Check(string(data), check.DeepEquals, "#!/bin/sh\nexec echo OK\n")
- fi, err = os.Stat(fnm)
- c.Check(err, check.IsNil)
- if err == nil {
- c.Check(fi.Mode(), check.Equals, os.FileMode(0755))
- }
-
- // Ensure there's no extra stuff like a ".git" dir
- s.checkTmpdirContents(c, []string{"dir1"})
-
- // Ensure tmpdir is world-readable and world-executable so the
- // UID inside the container can use it.
- fi, err = os.Stat(s.tmpdir)
- c.Check(err, check.IsNil)
- c.Check(fi.Mode()&os.ModePerm, check.Equals, os.FileMode(0755))
-}
-
-// Commit 5ebfab0 is not the tip of any branch or tag, but is
-// reachable in branch "crunch-run-non-tip-test".
-func (s *GitMountSuite) TestExtractNonTipCommit(c *check.C) {
- gm := gitMount{
- UUID: arvadostest.Repository2UUID,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- }
- err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.IsNil)
-
- fnm := filepath.Join(s.tmpdir, "file only on testbranch")
- data, err := ioutil.ReadFile(fnm)
- c.Check(err, check.IsNil)
- c.Check(string(data), check.DeepEquals, "testfile\n")
-}
-
-func (s *GitMountSuite) TestNonexistentRepository(c *check.C) {
- gm := gitMount{
- Path: "/",
- UUID: "zzzzz-s0uqq-nonexistentrepo",
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- }
- err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.NotNil)
- c.Check(err, check.ErrorMatches, ".*repository not found.*")
-
- s.checkTmpdirContents(c, []string{})
-}
-
-func (s *GitMountSuite) TestNonexistentCommit(c *check.C) {
- gm := gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- Commit: "bb66b6bb6b6bbb6b6b6b66b6b6b6b6b6b6b6b66b",
- }
- err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.NotNil)
- c.Check(err, check.ErrorMatches, ".*object not found.*")
-
- s.checkTmpdirContents(c, []string{})
-}
-
-func (s *GitMountSuite) TestGitUrlDiscoveryFails(c *check.C) {
- delete(discoveryMap, "gitUrl")
- gm := gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- }
- err := gm.extractTree(&arvados.Client{}, s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.ErrorMatches, ".*error getting discovery doc.*")
-}
-
-func (s *GitMountSuite) TestInvalid(c *check.C) {
- for _, trial := range []struct {
- gm gitMount
- matcher string
- }{
- {
- gm: gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- Commit: "abc123",
- },
- matcher: ".*SHA1.*",
- },
- {
- gm: gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- RepositoryName: arvadostest.Repository2Name,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- },
- matcher: ".*repository_name.*",
- },
- {
- gm: gitMount{
- Path: "/",
- GitURL: "https://localhost:0/" + arvadostest.Repository2Name + ".git",
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- },
- matcher: ".*git_url.*",
- },
- {
- gm: gitMount{
- Path: "/dir1/",
- UUID: arvadostest.Repository2UUID,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- },
- matcher: ".*path.*",
- },
- {
- gm: gitMount{
- Path: "/",
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- },
- matcher: ".*UUID.*",
- },
- {
- gm: gitMount{
- Path: "/",
- UUID: arvadostest.Repository2UUID,
- Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
- Writable: true,
- },
- matcher: ".*writable.*",
- },
- } {
- err := trial.gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.NotNil)
- s.checkTmpdirContents(c, []string{})
-
- err = trial.gm.validate()
- c.Check(err, check.ErrorMatches, trial.matcher)
- }
-}
-
-func (s *GitMountSuite) checkTmpdirContents(c *check.C, expect []string) {
- f, err := os.Open(s.tmpdir)
- c.Check(err, check.IsNil)
- names, err := f.Readdirnames(-1)
- c.Check(err, check.IsNil)
- c.Check(names, check.DeepEquals, expect)
-}
diff --git a/lib/crunchrun/integration_test.go b/lib/crunchrun/integration_test.go
index ef5cc567db..d1dc62d27b 100644
--- a/lib/crunchrun/integration_test.go
+++ b/lib/crunchrun/integration_test.go
@@ -148,6 +148,7 @@ func (s *integrationSuite) setup(c *C) {
"state": s.cr.State,
"command": s.cr.Command,
"output_path": s.cr.OutputPath,
+ "output_glob": s.cr.OutputGlob,
"container_image": s.cr.ContainerImage,
"mounts": s.cr.Mounts,
"runtime_constraints": s.cr.RuntimeConstraints,
@@ -168,7 +169,7 @@ func (s *integrationSuite) TestRunTrivialContainerWithDocker(c *C) {
func (s *integrationSuite) TestRunTrivialContainerWithSingularity(c *C) {
s.engine = "singularity"
s.testRunTrivialContainer(c)
- c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*Using container runtime: singularity.* version 3\.\d+.*`)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*Using container runtime: singularity.* version [34]\.\d+.*`)
}
func (s *integrationSuite) TestRunTrivialContainerWithLocalKeepstore(c *C) {
@@ -274,6 +275,19 @@ func (s *integrationSuite) TestRunTrivialContainerWithNoLocalKeepstore(c *C) {
c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*loaded config file \Q`+os.Getenv("ARVADOS_CONFIG")+`\E\n.*`)
}
+func (s *integrationSuite) TestRunTrivialContainerWithOutputGlob(c *C) {
+ s.cr.OutputGlob = []string{"js?n"}
+ s.testRunTrivialContainer(c)
+ fs, err := s.outputCollection.FileSystem(s.client, s.kc)
+ c.Assert(err, IsNil)
+ _, err = fs.Stat("json")
+ c.Check(err, IsNil)
+ _, err = fs.Stat("inputfile")
+ c.Check(err, Equals, os.ErrNotExist)
+ _, err = fs.Stat("emptydir")
+ c.Check(err, Equals, os.ErrNotExist)
+}
+
func (s *integrationSuite) testRunTrivialContainer(c *C) {
if err := exec.Command("which", s.engine).Run(); err != nil {
c.Skip(fmt.Sprintf("%s: %s", s.engine, err))
@@ -298,8 +312,44 @@ func (s *integrationSuite) testRunTrivialContainer(c *C) {
c.Assert(err, IsNil)
c.Logf("Finished container request: %#v", s.cr)
+ s.loadLogAndOutputCollections(c)
+
+ if len(s.cr.OutputGlob) == 0 {
+ fs, err := s.outputCollection.FileSystem(s.client, s.kc)
+ c.Assert(err, IsNil)
+ if f, err := fs.Open("inputfile"); c.Check(err, IsNil) {
+ defer f.Close()
+ buf, err := ioutil.ReadAll(f)
+ c.Check(err, IsNil)
+ c.Check(string(buf), Equals, "inputdata")
+ }
+ if f, err := fs.Open("json"); c.Check(err, IsNil) {
+ defer f.Close()
+ buf, err := ioutil.ReadAll(f)
+ c.Check(err, IsNil)
+ c.Check(string(buf), Equals, `["foo",{"foo":"bar"},null]`)
+ }
+ if fi, err := fs.Stat("emptydir"); c.Check(err, IsNil) {
+ c.Check(fi.IsDir(), Equals, true)
+ }
+ if d, err := fs.Open("emptydir"); c.Check(err, IsNil) {
+ defer d.Close()
+ fis, err := d.Readdir(-1)
+ c.Assert(err, IsNil)
+ // crunch-run still saves a ".keep" file to preserve
+ // empty dirs even though that shouldn't be
+ // necessary. Ideally we would do:
+ // c.Check(fis, HasLen, 0)
+ for _, fi := range fis {
+ c.Check(fi.Name(), Equals, ".keep")
+ }
+ }
+ }
+}
+
+func (s *integrationSuite) loadLogAndOutputCollections(c *C) {
var log arvados.Collection
- err = s.client.RequestAndDecode(&log, "GET", "arvados/v1/collections/"+s.cr.LogUUID, nil, nil)
+ err := s.client.RequestAndDecode(&log, "GET", "arvados/v1/collections/"+s.cr.LogUUID, nil, nil)
c.Assert(err, IsNil)
fs, err := log.FileSystem(s.client, s.kc)
c.Assert(err, IsNil)
@@ -323,34 +373,61 @@ func (s *integrationSuite) testRunTrivialContainer(c *C) {
var output arvados.Collection
err = s.client.RequestAndDecode(&output, "GET", "arvados/v1/collections/"+s.cr.OutputUUID, nil, nil)
c.Assert(err, IsNil)
- fs, err = output.FileSystem(s.client, s.kc)
+ s.outputCollection = output
+}
+
+func (s *integrationSuite) TestRunContainer_CopyManyFiles(c *C) {
+ biginput := arvados.Collection{}
+ fs, err := biginput.FileSystem(s.client, s.kc)
c.Assert(err, IsNil)
- if f, err := fs.Open("inputfile"); c.Check(err, IsNil) {
- defer f.Close()
- buf, err := ioutil.ReadAll(f)
- c.Check(err, IsNil)
- c.Check(string(buf), Equals, "inputdata")
+ for i := 0; i < 1000; i++ {
+ f, err := fs.OpenFile(fmt.Sprintf("file%d", i), os.O_CREATE|os.O_WRONLY, 0755)
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte{'a'})
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
}
- if f, err := fs.Open("json"); c.Check(err, IsNil) {
- defer f.Close()
- buf, err := ioutil.ReadAll(f)
- c.Check(err, IsNil)
- c.Check(string(buf), Equals, `["foo",{"foo":"bar"},null]`)
+ biginput.ManifestText, err = fs.MarshalManifest(".")
+ c.Assert(err, IsNil)
+ err = s.client.RequestAndDecode(&biginput, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "ensure_unique_name": true,
+ "collection": map[string]interface{}{
+ "manifest_text": biginput.ManifestText,
+ },
+ })
+ c.Assert(err, IsNil)
+ s.cr.Mounts["/mnt/out/in"] = arvados.Mount{
+ Kind: "collection",
+ PortableDataHash: biginput.PortableDataHash,
}
- if fi, err := fs.Stat("emptydir"); c.Check(err, IsNil) {
- c.Check(fi.IsDir(), Equals, true)
+ s.testRunContainer_ShellCommand(c, "set -e; cd /mnt/out/in; ls | while read f; do cp $f ../out-$f; done; cd /mnt/out; ls -R | wc -l")
+ s.loadLogAndOutputCollections(c)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*\Qcopying "in" from `+biginput.PortableDataHash+`/.\E\n.*`)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*\Qcopying "out-file999" (1 bytes)\E\n.*`)
+ c.Check(s.logFiles["stdout.txt"], Matches, `.* 2004\n`)
+}
+
+func (s *integrationSuite) testRunContainer_ShellCommand(c *C, cmdline string) {
+ if err := exec.Command("which", s.engine).Run(); err != nil {
+ c.Skip(fmt.Sprintf("%s: %s", s.engine, err))
}
- if d, err := fs.Open("emptydir"); c.Check(err, IsNil) {
- defer d.Close()
- fis, err := d.Readdir(-1)
- c.Assert(err, IsNil)
- // crunch-run still saves a ".keep" file to preserve
- // empty dirs even though that shouldn't be
- // necessary. Ideally we would do:
- // c.Check(fis, HasLen, 0)
- for _, fi := range fis {
- c.Check(fi.Name(), Equals, ".keep")
- }
+ s.cr.Command = []string{"sh", "-c", cmdline}
+ s.setup(c)
+ args := []string{
+ "-runtime-engine=" + s.engine,
+ "-enable-memory-limit=false",
}
- s.outputCollection = output
+ if s.stdin.Len() > 0 {
+ args = append(args, "-stdin-config=true")
+ }
+ args = append(args, s.args...)
+ args = append(args, s.cr.ContainerUUID)
+ code := command{}.RunCommand("crunch-run", args, &s.stdin, io.MultiWriter(&s.stdout, os.Stderr), io.MultiWriter(&s.stderr, os.Stderr))
+ c.Logf("\n===== stdout =====\n%s", s.stdout.String())
+ c.Logf("\n===== stderr =====\n%s", s.stderr.String())
+ c.Check(code, Equals, 0)
+ err := s.client.RequestAndDecode(&s.cr, "GET", "arvados/v1/container_requests/"+s.cr.UUID, nil, nil)
+ c.Assert(err, IsNil)
+ c.Logf("Finished container request: %#v", s.cr)
}
diff --git a/lib/crunchrun/logging.go b/lib/crunchrun/logging.go
index 91a1b77cf4..35f9678a31 100644
--- a/lib/crunchrun/logging.go
+++ b/lib/crunchrun/logging.go
@@ -5,373 +5,82 @@
package crunchrun
import (
- "bufio"
"bytes"
"encoding/json"
- "fmt"
"io"
"log"
- "regexp"
- "strings"
- "sync"
"time"
-
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
)
-// Timestamper is the signature for a function that takes a timestamp and
-// return a formated string value.
-type Timestamper func(t time.Time) string
-
-// Logging plumbing:
-//
-// ThrottledLogger.Logger -> ThrottledLogger.Write ->
-// ThrottledLogger.buf -> ThrottledLogger.flusher ->
-// ArvLogWriter.Write -> CollectionFileWriter.Write | Api.Create
-//
-// For stdout/stderr ReadWriteLines additionally runs as a goroutine to pull
-// data from the stdout/stderr Reader and send to the Logger.
+// rfc3339NanoFixed is a fixed-width version of time.RFC3339Nano.
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-// ThrottledLogger accepts writes, prepends a timestamp to each line of the
-// write, and periodically flushes to a downstream writer. It supports the
-// "Logger" and "WriteCloser" interfaces.
-type ThrottledLogger struct {
- *log.Logger
- buf *bytes.Buffer
- sync.Mutex
- writer io.WriteCloser
- flush chan struct{}
- stopped chan struct{}
- stopping chan struct{}
- Timestamper
- Immediate *log.Logger
- pendingFlush bool
+// prefixer wraps an io.Writer, inserting a string returned by
+// prefixFunc at the beginning of each line.
+type prefixer struct {
+ writer io.Writer
+ prefixFunc func() string
+ unfinished bool // true if the most recent write ended with a non-newline char
}
-// RFC3339NanoFixed is a fixed-width version of time.RFC3339Nano.
-const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-
-// RFC3339Timestamp formats t as RFC3339NanoFixed.
-func RFC3339Timestamp(t time.Time) string {
- return t.Format(RFC3339NanoFixed)
-}
-
-// Write prepends a timestamp to each line of the input data and
-// appends to the internal buffer. Each line is also logged to
-// tl.Immediate, if tl.Immediate is not nil.
-func (tl *ThrottledLogger) Write(p []byte) (n int, err error) {
- tl.Mutex.Lock()
- defer tl.Mutex.Unlock()
-
- if tl.buf == nil {
- tl.buf = &bytes.Buffer{}
- }
-
- now := tl.Timestamper(time.Now().UTC())
- sc := bufio.NewScanner(bytes.NewBuffer(p))
- for err == nil && sc.Scan() {
- out := fmt.Sprintf("%s %s\n", now, sc.Bytes())
- if tl.Immediate != nil {
- tl.Immediate.Print(out[:len(out)-1])
- }
- _, err = io.WriteString(tl.buf, out)
- }
- if err == nil {
- err = sc.Err()
- if err == nil {
- n = len(p)
- }
- }
-
- if int64(tl.buf.Len()) >= crunchLogBytesPerEvent {
- // Non-blocking send. Try send a flush if it is ready to
- // accept it. Otherwise do nothing because a flush is already
- // pending.
- select {
- case tl.flush <- struct{}{}:
- default:
- }
+// newTimestamper wraps an io.Writer, inserting an RFC3339NanoFixed
+// timestamp at the beginning of each line.
+func newTimestamper(w io.Writer) *prefixer {
+ return &prefixer{
+ writer: w,
+ prefixFunc: func() string { return time.Now().UTC().Format(rfc3339NanoFixed + " ") },
}
-
- return
}
-// Periodically check the current buffer; if not empty, send it on the
-// channel to the goWriter goroutine.
-func (tl *ThrottledLogger) flusher() {
- ticker := time.NewTicker(time.Duration(crunchLogSecondsBetweenEvents))
- defer ticker.Stop()
- for stopping := false; !stopping; {
- select {
- case <-tl.stopping:
- // flush tl.buf and exit the loop
- stopping = true
- case <-tl.flush:
- case <-ticker.C:
- }
-
- var ready *bytes.Buffer
-
- tl.Mutex.Lock()
- ready, tl.buf = tl.buf, &bytes.Buffer{}
- tl.Mutex.Unlock()
-
- if ready != nil && ready.Len() > 0 {
- tl.writer.Write(ready.Bytes())
- }
- }
- close(tl.stopped)
-}
-
-// Close the flusher goroutine and wait for it to complete, then close the
-// underlying Writer.
-func (tl *ThrottledLogger) Close() error {
- select {
- case <-tl.stopping:
- // already stopped
- default:
- close(tl.stopping)
+// newStringPrefixer wraps an io.Writer, inserting the given string at
+// the beginning of each line. The given string should include a
+// trailing space for readability.
+func newStringPrefixer(w io.Writer, s string) *prefixer {
+ return &prefixer{
+ writer: w,
+ prefixFunc: func() string { return s },
}
- <-tl.stopped
- return tl.writer.Close()
}
-const (
- // MaxLogLine is the maximum length of stdout/stderr lines before they are split.
- MaxLogLine = 1 << 12
-)
-
-// ReadWriteLines reads lines from a reader and writes to a Writer, with long
-// line splitting.
-func ReadWriteLines(in io.Reader, writer io.Writer, done chan<- bool) {
- reader := bufio.NewReaderSize(in, MaxLogLine)
- var prefix string
- for {
- line, isPrefix, err := reader.ReadLine()
- if err == io.EOF {
- break
- } else if err != nil {
- writer.Write([]byte(fmt.Sprintln("error reading container log:", err)))
- }
- var suffix string
- if isPrefix {
- suffix = "[...]\n"
- }
-
- if prefix == "" && suffix == "" {
- writer.Write(line)
- } else {
- writer.Write([]byte(fmt.Sprint(prefix, string(line), suffix)))
+func (tp *prefixer) Write(p []byte) (n int, err error) {
+ for len(p) > 0 && err == nil {
+ if !tp.unfinished {
+ _, err = io.WriteString(tp.writer, tp.prefixFunc())
+ if err != nil {
+ return
+ }
}
-
- // Set up prefix for following line
- if isPrefix {
- prefix = "[...]"
+ newline := bytes.IndexRune(p, '\n')
+ var nn int
+ if newline < 0 {
+ tp.unfinished = true
+ nn, err = tp.writer.Write(p)
+ p = nil
} else {
- prefix = ""
+ tp.unfinished = false
+ nn, err = tp.writer.Write(p[:newline+1])
+ p = p[nn:]
}
+ n += nn
}
- done <- true
-}
-
-// NewThrottledLogger creates a new thottled logger that
-// - prepends timestamps to each line, and
-// - batches log messages and only calls the underlying Writer
-// at most once per "crunchLogSecondsBetweenEvents" seconds.
-func NewThrottledLogger(writer io.WriteCloser) *ThrottledLogger {
- tl := &ThrottledLogger{}
- tl.flush = make(chan struct{}, 1)
- tl.stopped = make(chan struct{})
- tl.stopping = make(chan struct{})
- tl.writer = writer
- tl.Logger = log.New(tl, "", 0)
- tl.Timestamper = RFC3339Timestamp
- go tl.flusher()
- return tl
-}
-
-// Log throttling rate limiting config parameters
-var crunchLimitLogBytesPerJob int64 = 67108864
-var crunchLogThrottleBytes int64 = 65536
-var crunchLogThrottlePeriod time.Duration = time.Second * 60
-var crunchLogThrottleLines int64 = 1024
-var crunchLogPartialLineThrottlePeriod time.Duration = time.Second * 5
-var crunchLogBytesPerEvent int64 = 4096
-var crunchLogSecondsBetweenEvents = time.Second
-var crunchLogUpdatePeriod = time.Hour / 2
-var crunchLogUpdateSize = int64(1 << 25)
-
-// ArvLogWriter is an io.WriteCloser that processes each write by
-// writing it through to another io.WriteCloser (typically a
-// CollectionFileWriter) and creating an Arvados log entry.
-type ArvLogWriter struct {
- ArvClient IArvadosClient
- UUID string
- loggingStream string
- writeCloser io.WriteCloser
-
- // for rate limiting
- bytesLogged int64
- logThrottleResetTime time.Time
- logThrottleLinesSoFar int64
- logThrottleBytesSoFar int64
- logThrottleBytesSkipped int64
- logThrottleIsOpen bool
- logThrottlePartialLineNextAt time.Time
- logThrottleFirstPartialLine bool
- bufToFlush bytes.Buffer
- bufFlushedAt time.Time
- closing bool
+ return
}
-func (arvlog *ArvLogWriter) Write(p []byte) (int, error) {
- // Write to the next writer in the chain (a file in Keep)
- var err1 error
- if arvlog.writeCloser != nil {
- _, err1 = arvlog.writeCloser.Write(p)
- }
-
- // write to API after checking rate limit
- now := time.Now()
-
- if now.After(arvlog.logThrottleResetTime) {
- // It has been more than throttle_period seconds since the last
- // checkpoint; so reset the throttle
- if arvlog.logThrottleBytesSkipped > 0 {
- arvlog.bufToFlush.WriteString(fmt.Sprintf("%s Skipped %d bytes of log\n", RFC3339Timestamp(now.UTC()), arvlog.logThrottleBytesSkipped))
- }
-
- arvlog.logThrottleResetTime = now.Add(crunchLogThrottlePeriod)
- arvlog.logThrottleBytesSoFar = 0
- arvlog.logThrottleLinesSoFar = 0
- arvlog.logThrottleBytesSkipped = 0
- arvlog.logThrottleIsOpen = true
- }
-
- lines := bytes.Split(p, []byte("\n"))
-
- for _, line := range lines {
- // Short circuit the counting code if we're just going to throw
- // away the data anyway.
- if !arvlog.logThrottleIsOpen {
- arvlog.logThrottleBytesSkipped += int64(len(line))
- continue
- } else if len(line) == 0 {
- continue
- }
-
- // check rateLimit
- logOpen, msg := arvlog.rateLimit(line, now)
- if logOpen {
- arvlog.bufToFlush.WriteString(string(msg) + "\n")
- }
- }
-
- if (int64(arvlog.bufToFlush.Len()) >= crunchLogBytesPerEvent ||
- (now.Sub(arvlog.bufFlushedAt) >= crunchLogSecondsBetweenEvents) ||
- arvlog.closing) && (arvlog.bufToFlush.Len() > 0) {
- // write to API
- lr := arvadosclient.Dict{"log": arvadosclient.Dict{
- "object_uuid": arvlog.UUID,
- "event_type": arvlog.loggingStream,
- "properties": map[string]string{"text": arvlog.bufToFlush.String()}}}
- err2 := arvlog.ArvClient.Create("logs", lr, nil)
-
- arvlog.bufToFlush = bytes.Buffer{}
- arvlog.bufFlushedAt = now
-
- if err1 != nil || err2 != nil {
- return 0, fmt.Errorf("%s ; %s", err1, err2)
- }
- }
-
- return len(p), nil
+// logWriter adds log.Logger methods to an io.Writer.
+type logWriter struct {
+ io.Writer
+ *log.Logger
}
-// Close the underlying writer
-func (arvlog *ArvLogWriter) Close() (err error) {
- arvlog.closing = true
- arvlog.Write([]byte{})
- if arvlog.writeCloser != nil {
- err = arvlog.writeCloser.Close()
- arvlog.writeCloser = nil
+func newLogWriter(w io.Writer) *logWriter {
+ return &logWriter{
+ Writer: w,
+ Logger: log.New(w, "", 0),
}
- return err
}
-var lineRegexp = regexp.MustCompile(`^\S+ (.*)`)
-
-// Test for hard cap on total output and for log throttling. Returns whether
-// the log line should go to output or not. Returns message if limit exceeded.
-func (arvlog *ArvLogWriter) rateLimit(line []byte, now time.Time) (bool, []byte) {
- message := ""
- lineSize := int64(len(line))
-
- if arvlog.logThrottleIsOpen {
- matches := lineRegexp.FindStringSubmatch(string(line))
-
- if len(matches) == 2 && strings.HasPrefix(matches[1], "[...]") && strings.HasSuffix(matches[1], "[...]") {
- // This is a partial line.
-
- if arvlog.logThrottleFirstPartialLine {
- // Partial should be suppressed. First time this is happening for this line so provide a message instead.
- arvlog.logThrottleFirstPartialLine = false
- arvlog.logThrottlePartialLineNextAt = now.Add(crunchLogPartialLineThrottlePeriod)
- arvlog.logThrottleBytesSkipped += lineSize
- return true, []byte(fmt.Sprintf("%s Rate-limiting partial segments of long lines to one every %d seconds.",
- RFC3339Timestamp(now.UTC()), crunchLogPartialLineThrottlePeriod/time.Second))
- } else if now.After(arvlog.logThrottlePartialLineNextAt) {
- // The throttle period has passed. Update timestamp and let it through.
- arvlog.logThrottlePartialLineNextAt = now.Add(crunchLogPartialLineThrottlePeriod)
- } else {
- // Suppress line.
- arvlog.logThrottleBytesSkipped += lineSize
- return false, line
- }
- } else {
- // Not a partial line so reset.
- arvlog.logThrottlePartialLineNextAt = time.Time{}
- arvlog.logThrottleFirstPartialLine = true
- }
-
- arvlog.bytesLogged += lineSize
- arvlog.logThrottleBytesSoFar += lineSize
- arvlog.logThrottleLinesSoFar++
-
- if arvlog.bytesLogged > crunchLimitLogBytesPerJob {
- message = fmt.Sprintf("%s Exceeded log limit %d bytes (crunch_limit_log_bytes_per_job). Log will be truncated.",
- RFC3339Timestamp(now.UTC()), crunchLimitLogBytesPerJob)
- arvlog.logThrottleResetTime = now.Add(time.Duration(365 * 24 * time.Hour))
- arvlog.logThrottleIsOpen = false
-
- } else if arvlog.logThrottleBytesSoFar > crunchLogThrottleBytes {
- remainingTime := arvlog.logThrottleResetTime.Sub(now)
- message = fmt.Sprintf("%s Exceeded rate %d bytes per %d seconds (crunch_log_throttle_bytes). Logging will be silenced for the next %d seconds.",
- RFC3339Timestamp(now.UTC()), crunchLogThrottleBytes, crunchLogThrottlePeriod/time.Second, remainingTime/time.Second)
- arvlog.logThrottleIsOpen = false
-
- } else if arvlog.logThrottleLinesSoFar > crunchLogThrottleLines {
- remainingTime := arvlog.logThrottleResetTime.Sub(now)
- message = fmt.Sprintf("%s Exceeded rate %d lines per %d seconds (crunch_log_throttle_lines), logging will be silenced for the next %d seconds.",
- RFC3339Timestamp(now.UTC()), crunchLogThrottleLines, crunchLogThrottlePeriod/time.Second, remainingTime/time.Second)
- arvlog.logThrottleIsOpen = false
-
- }
- }
-
- if !arvlog.logThrottleIsOpen {
- // Don't log anything if any limit has been exceeded. Just count lossage.
- arvlog.logThrottleBytesSkipped += lineSize
- }
-
- if message != "" {
- // Yes, write to logs, but use our "rate exceeded" message
- // instead of the log message that exceeded the limit.
- message += " A complete log is still being written to Keep, and will be available when the job finishes."
- return true, []byte(message)
- }
- return arvlog.logThrottleIsOpen, line
-}
+var crunchLogUpdatePeriod = time.Hour / 2
+var crunchLogUpdateSize = int64(1 << 25)
// load the rate limit discovery config parameters
func loadLogThrottleParams(clnt IArvadosClient) {
@@ -394,13 +103,6 @@ func loadLogThrottleParams(clnt IArvadosClient) {
}
}
- loadInt64(&crunchLimitLogBytesPerJob, "crunchLimitLogBytesPerJob")
- loadInt64(&crunchLogThrottleBytes, "crunchLogThrottleBytes")
- loadDuration(&crunchLogThrottlePeriod, "crunchLogThrottlePeriod")
- loadInt64(&crunchLogThrottleLines, "crunchLogThrottleLines")
- loadDuration(&crunchLogPartialLineThrottlePeriod, "crunchLogPartialLineThrottlePeriod")
- loadInt64(&crunchLogBytesPerEvent, "crunchLogBytesPerEvent")
- loadDuration(&crunchLogSecondsBetweenEvents, "crunchLogSecondsBetweenEvents")
loadInt64(&crunchLogUpdateSize, "crunchLogUpdateSize")
loadDuration(&crunchLogUpdatePeriod, "crunchLogUpdatePeriod")
@@ -412,7 +114,6 @@ type filterKeepstoreErrorsOnly struct {
}
func (f *filterKeepstoreErrorsOnly) Write(p []byte) (int, error) {
- log.Printf("filterKeepstoreErrorsOnly: write %q", p)
f.buf = append(f.buf, p...)
start := 0
for i := len(f.buf) - len(p); i < len(f.buf); i++ {
diff --git a/lib/crunchrun/logging_test.go b/lib/crunchrun/logging_test.go
index 42f165fd75..29313a45f5 100644
--- a/lib/crunchrun/logging_test.go
+++ b/lib/crunchrun/logging_test.go
@@ -13,26 +13,27 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
. "gopkg.in/check.v1"
check "gopkg.in/check.v1"
)
-type LoggingTestSuite struct {
- client *arvados.Client
-}
-
-type TestTimestamper struct {
- count int
+const reTimestamp = `....-..-..T..:..:..\..........Z`
+
+// newTestTimestamper wraps an io.Writer, inserting a predictable
+// RFC3339NanoFixed timestamp at the beginning of each line.
+func newTestTimestamper(w io.Writer) *prefixer {
+ count := 0
+ return &prefixer{
+ writer: w,
+ prefixFunc: func() string {
+ count++
+ return fmt.Sprintf("2015-12-29T15:51:45.%09dZ ", count)
+ },
+ }
}
-func (stamper *TestTimestamper) Timestamp(t time.Time) string {
- stamper.count++
- t, err := time.ParseInLocation(time.RFC3339Nano, fmt.Sprintf("2015-12-29T15:51:45.%09dZ", stamper.count), t.Location())
- if err != nil {
- panic(err)
- }
- return RFC3339Timestamp(t)
+type LoggingTestSuite struct {
+ client *arvados.Client
}
// Gocheck boilerplate
@@ -48,26 +49,20 @@ func (s *LoggingTestSuite) TestWriteLogs(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
defer kc.Close()
- cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzzzzzzzzzzzz")
+ cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-dz642-zzzzzzzzzzzzzzz")
+ c.Assert(err, IsNil)
+ f, err := cr.openLogFile("crunch-run")
c.Assert(err, IsNil)
- cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+ cr.CrunchLog = newLogWriter(newTestTimestamper(f))
cr.CrunchLog.Print("Hello world!")
cr.CrunchLog.Print("Goodbye")
- cr.CrunchLog.Close()
-
- c.Check(api.Calls, Equals, 1)
-
- mt, err := cr.LogCollection.MarshalManifest(".")
- c.Check(err, IsNil)
- c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
- logtext := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
- "2015-12-29T15:51:45.000000002Z Goodbye\n"
+ c.Check(api.Calls, Equals, 0)
- c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
- c.Check(api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext)
- c.Check(string(kc.Content), Equals, logtext)
+ logs := logFileContent(c, cr, "crunch-run.txt")
+ c.Check(logs, Matches, reTimestamp+` Hello world!\n`+
+ reTimestamp+` Goodbye\n`)
}
func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
@@ -79,59 +74,34 @@ func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
defer kc.Close()
cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
- cr.CrunchLog.Immediate = nil
-
+ f, err := cr.openLogFile("crunch-run")
+ c.Assert(err, IsNil)
+ cr.CrunchLog = newLogWriter(newTestTimestamper(f))
for i := 0; i < 2000000; i++ {
cr.CrunchLog.Printf("Hello %d", i)
}
cr.CrunchLog.Print("Goodbye")
- cr.CrunchLog.Close()
-
- c.Check(api.Calls > 0, Equals, true)
- c.Check(api.Calls < 2000000, Equals, true)
-
- mt, err := cr.LogCollection.MarshalManifest(".")
- c.Check(err, IsNil)
- c.Check(mt, Equals, ". 9c2c05d1fae6aaa8af85113ba725716d+67108864 80b821383a07266c2a66a4566835e26e+21780065 0:88888929:crunch-run.txt\n")
-}
-
-func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
- api := &ArvTestClient{}
- kc := &KeepTestClient{}
- defer kc.Close()
- cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzzzzzzzzzzzz")
- c.Assert(err, IsNil)
- ts := &TestTimestamper{}
- cr.CrunchLog.Timestamper = ts.Timestamp
- w, err := cr.NewLogWriter("stdout")
- c.Assert(err, IsNil)
- stdout := NewThrottledLogger(w)
- stdout.Timestamper = ts.Timestamp
-
- cr.CrunchLog.Print("Hello world!")
- stdout.Print("Doing stuff")
- cr.CrunchLog.Print("Goodbye")
- stdout.Print("Blurb")
- cr.CrunchLog.Close()
- stdout.Close()
- logText := make(map[string]string)
- for _, content := range api.Content {
- log := content["log"].(arvadosclient.Dict)
- logText[log["event_type"].(string)] += log["properties"].(map[string]string)["text"]
- }
-
- c.Check(logText["crunch-run"], Equals, `2015-12-29T15:51:45.000000001Z Hello world!
-2015-12-29T15:51:45.000000003Z Goodbye
-`)
- c.Check(logText["stdout"], Equals, `2015-12-29T15:51:45.000000002Z Doing stuff
-2015-12-29T15:51:45.000000004Z Blurb
+ logs := logFileContent(c, cr, "crunch-run.txt")
+ c.Check(strings.Count(logs, "\n"), Equals, 2000001)
+ // Redact most of the logs except the start/end for the regexp
+ // match -- otherwise, when the regexp fails, gocheck spams
+ // the test logs with tens of megabytes of quoted strings.
+ c.Assert(len(logs) > 10000, Equals, true)
+ c.Check(logs[:500]+"\n...\n"+logs[len(logs)-500:], Matches, `(?ms)2015-12-29T15:51:45.000000001Z Hello 0
+2015-12-29T15:51:45.000000002Z Hello 1
+2015-12-29T15:51:45.000000003Z Hello 2
+2015-12-29T15:51:45.000000004Z Hello 3
+.*
+2015-12-29T15:51:45.001999998Z Hello 1999997
+2015-12-29T15:51:45.001999999Z Hello 1999998
+2015-12-29T15:51:45.002000000Z Hello 1999999
+2015-12-29T15:51:45.002000001Z Goodbye
`)
mt, err := cr.LogCollection.MarshalManifest(".")
c.Check(err, IsNil)
- c.Check(mt, Equals, ". 48f9023dc683a850b1c9b482b14c4b97+163 0:83:crunch-run.txt 83:80:stdout.txt\n")
+ c.Check(mt, Equals, ". 9c2c05d1fae6aaa8af85113ba725716d+67108864 80b821383a07266c2a66a4566835e26e+21780065 0:88888929:crunch-run.txt\n")
}
func (s *LoggingTestSuite) TestLogUpdate(c *C) {
@@ -149,14 +119,13 @@ func (s *LoggingTestSuite) TestLogUpdate(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
defer kc.Close()
- cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzzzzzzzzzzzz")
+ cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-dz642-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- ts := &TestTimestamper{}
- cr.CrunchLog.Timestamper = ts.Timestamp
- w, err := cr.NewLogWriter("stdout")
+ f, err := cr.openLogFile("crunch-run")
+ c.Assert(err, IsNil)
+ cr.CrunchLog = newLogWriter(newTestTimestamper(f))
+ stdout, err := cr.openLogFile("stdout")
c.Assert(err, IsNil)
- stdout := NewThrottledLogger(w)
- stdout.Timestamper = ts.Timestamp
c.Check(cr.logUUID, Equals, "")
cr.CrunchLog.Printf("Hello %1000s", "space")
@@ -165,68 +134,18 @@ func (s *LoggingTestSuite) TestLogUpdate(c *C) {
}
c.Check(cr.logUUID, Not(Equals), "")
cr.CrunchLog.Print("Goodbye")
- fmt.Fprint(stdout, "Goodbye\n")
- cr.CrunchLog.Close()
- stdout.Close()
- w.Close()
+ fmt.Fprintln(stdout, "Goodbye")
+
+ c.Check(logFileContent(c, cr, "crunch-run.txt"), Matches, reTimestamp+` Hello {995}space\n`+
+ reTimestamp+` Goodbye\n`)
+ c.Check(logFileContent(c, cr, "stdout.txt"), Matches, `Goodbye\n`)
mt, err := cr.LogCollection.MarshalManifest(".")
c.Check(err, IsNil)
- // Block packing depends on whether there's an update
- // between the two Goodbyes -- either way the first
- // block will be 4dc76.
- c.Check(mt, Matches, `. 4dc76e0a212bfa30c39d76d8c16da0c0\+1038 (afc503bc1b9a828b4bb543cb629e936c\+78|90699dc22545cd74a0664303f70bc05a\+39 276b49339fd5203d15a93ff3de11bfb9\+39) 0:1077:crunch-run.txt 1077:39:stdout.txt\n`)
+ c.Check(mt, Matches, `. 4dc76e0a212bfa30c39d76d8c16da0c0\+1038 5be52044a8c51e7b62dd62be07872968\+47 0:1077:crunch-run.txt 1077:8:stdout.txt\n`)
}
}
-func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytes(c *C) {
- s.testWriteLogsWithRateLimit(c, "crunchLogThrottleBytes", 50, 65536, "Exceeded rate 50 bytes per 60 seconds")
-}
-
-func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleLines(c *C) {
- s.testWriteLogsWithRateLimit(c, "crunchLogThrottleLines", 1, 1024, "Exceeded rate 1 lines per 60 seconds")
-}
-
-func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytesPerEvent(c *C) {
- s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 50, 67108864, "Exceeded log limit 50 bytes (crunch_limit_log_bytes_per_job)")
-}
-
-func (s *LoggingTestSuite) TestWriteLogsWithZeroBytesPerJob(c *C) {
- s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 0, 67108864, "Exceeded log limit 0 bytes (crunch_limit_log_bytes_per_job)")
-}
-
-func (s *LoggingTestSuite) testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, throttleDefault int, expected string) {
- discoveryMap[throttleParam] = float64(throttleValue)
- defer func() {
- discoveryMap[throttleParam] = float64(throttleDefault)
- }()
-
- api := &ArvTestClient{}
- kc := &KeepTestClient{}
- defer kc.Close()
- cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzzzzzzzzzzzz")
- c.Assert(err, IsNil)
- cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
-
- cr.CrunchLog.Print("Hello world!")
- cr.CrunchLog.Print("Goodbye")
- cr.CrunchLog.Close()
-
- c.Check(api.Calls, Equals, 1)
-
- mt, err := cr.LogCollection.MarshalManifest(".")
- c.Check(err, IsNil)
- c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
-
- logtext := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
- "2015-12-29T15:51:45.000000002Z Goodbye\n"
-
- c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
- stderrLog := api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"]
- c.Check(true, Equals, strings.Contains(stderrLog, expected))
- c.Check(string(kc.Content), Equals, logtext)
-}
-
type filterSuite struct{}
var _ = Suite(&filterSuite{})
diff --git a/lib/crunchrun/singularity.go b/lib/crunchrun/singularity.go
index fd26297713..f78c3a627b 100644
--- a/lib/crunchrun/singularity.go
+++ b/lib/crunchrun/singularity.go
@@ -7,6 +7,7 @@ package crunchrun
import (
"bytes"
"context"
+ "encoding/json"
"errors"
"fmt"
"io/ioutil"
@@ -14,6 +15,7 @@ import (
"os"
"os/exec"
"os/user"
+ "path"
"regexp"
"sort"
"strconv"
@@ -26,7 +28,7 @@ import (
type singularityExecutor struct {
logf func(string, ...interface{})
- fakeroot bool // use --fakeroot flag, allow --network=bridge when non-root (currently only used by tests)
+ sudo bool // use sudo to run singularity (only used by tests)
spec containerSpec
tmpdir string
child *exec.Cmd
@@ -87,166 +89,227 @@ func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string,
return &rgroup, nil
}
-func (e *singularityExecutor) checkImageCache(dockerImageID string, container arvados.Container, arvMountPoint string,
- containerClient *arvados.Client) (collection *arvados.Collection, err error) {
-
- // Cache the image to keep
- cacheGroup, err := e.getOrCreateProject(container.RuntimeUserUUID, ".cache", containerClient)
+func (e *singularityExecutor) getImageCacheProject(userUUID string, containerClient *arvados.Client) (*arvados.Group, error) {
+ cacheProject, err := e.getOrCreateProject(userUUID, ".cache", containerClient)
if err != nil {
return nil, fmt.Errorf("error getting '.cache' project: %v", err)
}
- imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", containerClient)
+ imageProject, err := e.getOrCreateProject(cacheProject.UUID, "auto-generated singularity images", containerClient)
if err != nil {
return nil, fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
}
+ return imageProject, nil
+}
+
+func (e *singularityExecutor) imageCacheExp() time.Time {
+ return time.Now().Add(e.imageCacheTTL()).UTC()
+}
- collectionName := fmt.Sprintf("singularity image for %v", dockerImageID)
+func (e *singularityExecutor) imageCacheTTL() time.Duration {
+ return 24 * 7 * 2 * time.Hour
+}
+
+// getCacheCollection returns an existing collection with a cached
+// singularity image with the given name, or nil if none exists.
+//
+// Note that if there is no existing collection, this is not
+// considered an error -- all return values will be nil/empty.
+func (e *singularityExecutor) getCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group, arvMountPoint string) (collection *arvados.Collection, imageFile string, err error) {
var cl arvados.CollectionList
err = containerClient.RequestAndDecode(&cl,
arvados.EndpointCollectionList.Method,
arvados.EndpointCollectionList.Path,
nil, arvados.ListOptions{Filters: []arvados.Filter{
- arvados.Filter{"owner_uuid", "=", imageGroup.UUID},
+ arvados.Filter{"owner_uuid", "=", cacheProject.UUID},
arvados.Filter{"name", "=", collectionName},
},
Limit: 1})
if err != nil {
- return nil, fmt.Errorf("error querying for collection '%v': %v", collectionName, err)
- }
- var imageCollection arvados.Collection
- if len(cl.Items) == 1 {
- imageCollection = cl.Items[0]
- } else {
- collectionName := "converting " + collectionName
- exp := time.Now().Add(24 * 7 * 2 * time.Hour)
- err = containerClient.RequestAndDecode(&imageCollection,
- arvados.EndpointCollectionCreate.Method,
- arvados.EndpointCollectionCreate.Path,
+ return nil, "", fmt.Errorf("error querying for collection %q in project %s: %w", collectionName, cacheProject.UUID, err)
+ }
+ if len(cl.Items) == 0 {
+ // Successfully discovered that there's no cached
+ // image collection.
+ return nil, "", nil
+ }
+ // Check that the collection actually contains an "image.sif"
+ // file. If not, we can't use it, and trying to create a new
+ // cache collection will probably fail too, so the caller
+ // should not bother trying.
+ coll := cl.Items[0]
+ sifFile := path.Join(arvMountPoint, "by_id", coll.PortableDataHash, "image.sif")
+ _, err = os.Stat(sifFile)
+ if err != nil {
+ return nil, "", fmt.Errorf("found collection %s (%s), but it did not contain an image file: %s", coll.UUID, coll.PortableDataHash, err)
+ }
+ if coll.TrashAt != nil && coll.TrashAt.Sub(time.Now()) < e.imageCacheTTL()*9/10 {
+ // If the remaining TTL is less than 90% of our target
+ // TTL, extend trash_at. This avoids prematurely
+ // trashing and re-converting images that are being
+ // used regularly.
+ err = containerClient.RequestAndDecode(nil,
+ arvados.EndpointCollectionUpdate.Method,
+ "arvados/v1/collections/"+coll.UUID,
nil, map[string]interface{}{
"collection": map[string]string{
- "owner_uuid": imageGroup.UUID,
- "name": collectionName,
- "trash_at": exp.UTC().Format(time.RFC3339),
+ "trash_at": e.imageCacheExp().Format(time.RFC3339),
},
- "ensure_unique_name": true,
})
if err != nil {
- return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
+ e.logf("could not update expiry time of cached image collection (proceeding anyway): %s", err)
}
-
}
-
- return &imageCollection, nil
+ return &coll, sifFile, nil
}
-// LoadImage will satisfy ContainerExecuter interface transforming
-// containerImage into a sif file for later use.
-func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
- containerClient *arvados.Client) error {
+func (e *singularityExecutor) createCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group) (*arvados.Collection, error) {
+ var coll arvados.Collection
+ err := containerClient.RequestAndDecode(&coll,
+ arvados.EndpointCollectionCreate.Method,
+ arvados.EndpointCollectionCreate.Path,
+ nil, map[string]interface{}{
+ "collection": map[string]string{
+ "owner_uuid": cacheProject.UUID,
+ "name": collectionName,
+ "trash_at": e.imageCacheExp().Format(time.RFC3339),
+ },
+ "ensure_unique_name": true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
+ }
+ return &coll, nil
+}
- var imageFilename string
- var sifCollection *arvados.Collection
- var err error
- if containerClient != nil {
- sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
- if err != nil {
- return err
- }
- imageFilename = fmt.Sprintf("%s/by_uuid/%s/image.sif", arvMountPoint, sifCollection.UUID)
- } else {
- imageFilename = e.tmpdir + "/image.sif"
+func (e *singularityExecutor) convertDockerImage(srcPath, dstPath string) error {
+ // Make sure the docker image is readable.
+ if _, err := os.Stat(srcPath); err != nil {
+ return err
}
- if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
- // Make sure the docker image is readable, and error
- // out if not.
- if _, err := os.Stat(imageTarballPath); err != nil {
- return err
- }
+ e.logf("building singularity image")
+ // "singularity build" does not accept a
+ // docker-archive://... filename containing a ":" character,
+ // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
+ // symlink that doesn't have ":" chars.
+ err := os.Symlink(srcPath, e.tmpdir+"/image.tar")
+ if err != nil {
+ return err
+ }
- e.logf("building singularity image")
- // "singularity build" does not accept a
- // docker-archive://... filename containing a ":" character,
- // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
- // symlink that doesn't have ":" chars.
- err := os.Symlink(imageTarballPath, e.tmpdir+"/image.tar")
- if err != nil {
- return err
- }
+ // Set up a cache and tmp dir for singularity build
+ err = os.Mkdir(e.tmpdir+"/cache", 0700)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(e.tmpdir + "/cache")
+ err = os.Mkdir(e.tmpdir+"/tmp", 0700)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(e.tmpdir + "/tmp")
+
+ build := exec.Command("singularity", "build", dstPath, "docker-archive://"+e.tmpdir+"/image.tar")
+ build.Env = os.Environ()
+ build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
+ build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
+ e.logf("%v", build.Args)
+ out, err := build.CombinedOutput()
+ // INFO: Starting build...
+ // Getting image source signatures
+ // Copying blob ab15617702de done
+ // Copying config 651e02b8a2 done
+ // Writing manifest to image destination
+ // Storing signatures
+ // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
+ // INFO: Creating SIF file...
+ // INFO: Build complete: arvados-jobs.latest.sif
+ e.logf("%s", out)
+ return err
+}
- // Set up a cache and tmp dir for singularity build
- err = os.Mkdir(e.tmpdir+"/cache", 0700)
- if err != nil {
- return err
- }
- defer os.RemoveAll(e.tmpdir + "/cache")
- err = os.Mkdir(e.tmpdir+"/tmp", 0700)
- if err != nil {
- return err
- }
- defer os.RemoveAll(e.tmpdir + "/tmp")
-
- build := exec.Command("singularity", "build", imageFilename, "docker-archive://"+e.tmpdir+"/image.tar")
- build.Env = os.Environ()
- build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
- build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
- e.logf("%v", build.Args)
- out, err := build.CombinedOutput()
- // INFO: Starting build...
- // Getting image source signatures
- // Copying blob ab15617702de done
- // Copying config 651e02b8a2 done
- // Writing manifest to image destination
- // Storing signatures
- // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
- // INFO: Creating SIF file...
- // INFO: Build complete: arvados-jobs.latest.sif
- e.logf("%s", out)
+// LoadImage converts the given docker image to a singularity
+// image.
+//
+// If containerClient is not nil, LoadImage first tries to use an
+// existing image (in Home -> .cache -> auto-generated singularity
+// images) and, if none was found there and the image was converted on
+// the fly, tries to save the converted image to the cache so it can
+// be reused next time.
+//
+// If containerClient is nil or a cache project/collection cannot be
+// found or created, LoadImage converts the image on the fly and
+// writes it to the local filesystem instead.
+func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string, containerClient *arvados.Client) error {
+ convertWithoutCache := func(err error) error {
if err != nil {
- return err
+ e.logf("cannot use singularity image cache: %s", err)
}
+ e.imageFilename = path.Join(e.tmpdir, "image.sif")
+ return e.convertDockerImage(imageTarballPath, e.imageFilename)
}
if containerClient == nil {
- e.imageFilename = imageFilename
- return nil
+ return convertWithoutCache(nil)
}
-
- // update TTL to now + two weeks
- exp := time.Now().Add(24 * 7 * 2 * time.Hour)
-
- uuidPath, err := containerClient.PathForUUID("update", sifCollection.UUID)
+ cacheProject, err := e.getImageCacheProject(container.RuntimeUserUUID, containerClient)
if err != nil {
- e.logf("error PathForUUID: %v", err)
- return nil
+ return convertWithoutCache(err)
}
- var imageCollection arvados.Collection
- err = containerClient.RequestAndDecode(&imageCollection,
- arvados.EndpointCollectionUpdate.Method,
- uuidPath,
- nil, map[string]interface{}{
- "collection": map[string]string{
- "name": fmt.Sprintf("singularity image for %v", dockerImageID),
- "trash_at": exp.UTC().Format(time.RFC3339),
- },
- })
- if err == nil {
- // If we just wrote the image to the cache, the
- // response also returns the updated PDH
- e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, imageCollection.PortableDataHash)
+ cacheCollectionName := fmt.Sprintf("singularity image for %s", dockerImageID)
+ existingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint)
+ if err != nil {
+ return convertWithoutCache(err)
+ }
+ if existingCollection != nil {
+ e.imageFilename = sifFile
return nil
}
- e.logf("error updating/renaming collection for cached sif image: %v", err)
- // Failed to update but maybe it lost a race and there is
- // another cached collection in the same place, so check the cache
- // again
- sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
+ newCollection, err := e.createCacheCollection("converting "+cacheCollectionName, containerClient, cacheProject)
+ if err != nil {
+ return convertWithoutCache(err)
+ }
+ dstDir := path.Join(arvMountPoint, "by_uuid", newCollection.UUID)
+ dstFile := path.Join(dstDir, "image.sif")
+ err = e.convertDockerImage(imageTarballPath, dstFile)
if err != nil {
return err
}
- e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, sifCollection.PortableDataHash)
+ buf, err := os.ReadFile(path.Join(dstDir, ".arvados#collection"))
+ if err != nil {
+ return fmt.Errorf("could not sync image collection: %w", err)
+ }
+ var synced arvados.Collection
+ err = json.Unmarshal(buf, &synced)
+ if err != nil {
+ return fmt.Errorf("could not parse .arvados#collection: %w", err)
+ }
+ e.logf("saved converted image in %s with PDH %s", newCollection.UUID, synced.PortableDataHash)
+ e.imageFilename = path.Join(arvMountPoint, "by_id", synced.PortableDataHash, "image.sif")
+ if errRename := containerClient.RequestAndDecode(nil,
+ arvados.EndpointCollectionUpdate.Method,
+ "arvados/v1/collections/"+newCollection.UUID,
+ nil, map[string]interface{}{
+ "collection": map[string]string{
+ "name": cacheCollectionName,
+ },
+ }); errRename != nil {
+ // Error is probably a name collision caused by
+ // another crunch-run process is converting the same
+ // image concurrently. In that case, we prefer to use
+ // the one that won the race -- the resulting images
+ // should be equivalent, but if they do differ at all,
+ // it's better if all containers use the same
+ // conversion.
+ if existingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint); err == nil {
+ e.logf("lost race -- abandoning our conversion in %s (%s) and using image from %s (%s) instead", newCollection.UUID, synced.PortableDataHash, existingCollection.UUID, existingCollection.PortableDataHash)
+ e.imageFilename = sifFile
+ } else {
+ e.logf("using newly converted image anyway, despite error renaming collection: %v", errRename)
+ }
+ }
return nil
}
@@ -257,24 +320,62 @@ func (e *singularityExecutor) Create(spec containerSpec) error {
func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
args := []string{path, "exec", "--containall", "--cleanenv", "--pwd=" + e.spec.WorkingDir}
- if e.fakeroot {
- args = append(args, "--fakeroot")
- }
if !e.spec.EnableNetwork {
args = append(args, "--net", "--network=none")
- } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.fakeroot {
- // Specifying --network=bridge fails unless (a) we are
- // root, (b) we are using --fakeroot, or (c)
- // singularity has been configured to allow our
- // uid/gid to use it like so:
+ } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.sudo {
+ // Specifying --network=bridge fails unless
+ // singularity is running as root.
+ //
+ // Note this used to be possible with --fakeroot, or
+ // configuring singularity like so:
//
// singularity config global --set 'allow net networks' bridge
// singularity config global --set 'allow net groups' mygroup
+ //
+ // However, these options no longer work (as of debian
+ // bookworm) because iptables now refuses to run in a
+ // setuid environment.
args = append(args, "--net", "--network=bridge")
+ } else {
+ // If we don't pass a --net argument at all, the
+ // container will be in the same network namespace as
+ // the host.
+ //
+ // Note this allows the container to listen on the
+ // host's external ports.
}
- if e.spec.CUDADeviceCount != 0 {
+ if e.spec.GPUStack == "cuda" && e.spec.GPUDeviceCount > 0 {
args = append(args, "--nv")
}
+ if e.spec.GPUStack == "rocm" && e.spec.GPUDeviceCount > 0 {
+ args = append(args, "--rocm")
+ }
+
+ // If we ask for resource limits that aren't supported,
+ // singularity will not run the container at all. So we probe
+ // for support first, and only apply the limits that appear to
+ // be supported.
+ //
+ // Default debian configuration lets non-root users set memory
+ // limits but not CPU limits, so we enable/disable those
+ // limits independently.
+ //
+ // https://rootlesscontaine.rs/getting-started/common/cgroup2/
+ checkCgroupSupport(e.logf)
+ if e.spec.VCPUs > 0 {
+ if cgroupSupport["cpu"] {
+ args = append(args, "--cpus", fmt.Sprintf("%d", e.spec.VCPUs))
+ } else {
+ e.logf("cpu limits are not supported by current systemd/cgroup configuration, not setting --cpu %d", e.spec.VCPUs)
+ }
+ }
+ if e.spec.RAM > 0 {
+ if cgroupSupport["memory"] {
+ args = append(args, "--memory", fmt.Sprintf("%d", e.spec.RAM))
+ } else {
+ e.logf("memory limits are not supported by current systemd/cgroup configuration, not setting --memory %d", e.spec.RAM)
+ }
+ }
readonlyflag := map[bool]string{
false: "rw",
@@ -326,6 +427,17 @@ func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
// and https://dev.arvados.org/issues/19081
env = append(env, "SINGULARITY_NO_EVAL=1")
+ // If we don't propagate XDG_RUNTIME_DIR and
+ // DBUS_SESSION_BUS_ADDRESS, singularity resource limits fail
+ // with "FATAL: container creation failed: while applying
+ // cgroups config: system configuration does not support
+ // cgroup management" or "FATAL: container creation failed:
+ // while applying cgroups config: rootless cgroups require a
+ // D-Bus session - check that XDG_RUNTIME_DIR and
+ // DBUS_SESSION_BUS_ADDRESS are set".
+ env = append(env, "XDG_RUNTIME_DIR="+os.Getenv("XDG_RUNTIME_DIR"))
+ env = append(env, "DBUS_SESSION_BUS_ADDRESS="+os.Getenv("DBUS_SESSION_BUS_ADDRESS"))
+
args = append(args, e.imageFilename)
args = append(args, e.spec.Command...)
@@ -345,6 +457,13 @@ func (e *singularityExecutor) Start() error {
return err
}
child := e.execCmd(path)
+ if e.sudo {
+ child.Args = append([]string{child.Path}, child.Args...)
+ child.Path, err = exec.LookPath("sudo")
+ if err != nil {
+ return err
+ }
+ }
err = child.Start()
if err != nil {
return err
@@ -354,11 +473,18 @@ func (e *singularityExecutor) Start() error {
}
func (e *singularityExecutor) Pid() int {
- // see https://dev.arvados.org/issues/17244#note-21
- return 0
+ childproc, err := e.containedProcess()
+ if err != nil {
+ return 0
+ }
+ return childproc
}
func (e *singularityExecutor) Stop() error {
+ if e.child == nil || e.child.Process == nil {
+ // no process started, or Wait already called
+ return nil
+ }
if err := e.child.Process.Signal(syscall.Signal(0)); err != nil {
// process already exited
return nil
@@ -462,7 +588,11 @@ func (e *singularityExecutor) containedProcess() (int, error) {
if e.child == nil || e.child.Process == nil {
return 0, errContainerNotStarted
}
- lsns, err := exec.Command("lsns").CombinedOutput()
+ cmd := exec.Command("lsns")
+ if e.sudo {
+ cmd = exec.Command("sudo", "lsns")
+ }
+ lsns, err := cmd.CombinedOutput()
if err != nil {
return 0, fmt.Errorf("lsns: %w", err)
}
diff --git a/lib/crunchrun/singularity_test.go b/lib/crunchrun/singularity_test.go
index e4c7cdb308..d39ccdc77d 100644
--- a/lib/crunchrun/singularity_test.go
+++ b/lib/crunchrun/singularity_test.go
@@ -5,11 +5,16 @@
package crunchrun
import (
+ "fmt"
"os"
"os/exec"
+ "strings"
+ "sync"
+ "time"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
. "gopkg.in/check.v1"
- check "gopkg.in/check.v1"
)
var _ = Suite(&singularitySuite{})
@@ -28,6 +33,7 @@ func (s *singularitySuite) SetUpSuite(c *C) {
s.executor, err = newSingularityExecutor(c.Logf)
c.Assert(err, IsNil)
}
+ arvadostest.StartKeep(2, true)
}
func (s *singularitySuite) TearDownSuite(c *C) {
@@ -36,18 +42,21 @@ func (s *singularitySuite) TearDownSuite(c *C) {
}
}
-func (s *singularitySuite) TestIPAddress(c *C) {
- // In production, executor will choose --network=bridge
- // because uid=0 under arvados-dispatch-cloud. But in test
- // cases, uid!=0, which means --network=bridge is conditional
- // on --fakeroot.
- uuc, err := os.ReadFile("/proc/sys/kernel/unprivileged_userns_clone")
- c.Check(err, check.IsNil)
- if string(uuc) == "0\n" {
- c.Skip("insufficient privileges to run this test case -- `singularity exec --fakeroot` requires /proc/sys/kernel/unprivileged_userns_clone = 1")
+func (s *singularitySuite) TestEnableNetwork_Listen(c *C) {
+ // With modern iptables, singularity (as of 4.2.1) cannot
+ // enable networking when invoked by a regular user. Under
+ // arvados-dispatch-cloud, crunch-run runs as root, so it's
+ // OK. For testing, assuming tests are not running as root, we
+ // use sudo -- but only if requested via environment variable.
+ if os.Getuid() == 0 {
+ // already root
+ } else if os.Getenv("ARVADOS_TEST_PRIVESC") == "sudo" {
+ c.Logf("ARVADOS_TEST_PRIVESC is 'sudo', invoking 'sudo singularity ...'")
+ s.executor.(*singularityExecutor).sudo = true
+ } else {
+ c.Skip("test case needs to run singularity as root -- set ARVADOS_TEST_PRIVESC=sudo to enable this test")
}
- s.executor.(*singularityExecutor).fakeroot = true
- s.executorSuite.TestIPAddress(c)
+ s.executorSuite.TestEnableNetwork_Listen(c)
}
func (s *singularitySuite) TestInject(c *C) {
@@ -68,15 +77,208 @@ func (s *singularityStubSuite) TestSingularityExecArgs(c *C) {
e, err := newSingularityExecutor(c.Logf)
c.Assert(err, IsNil)
err = e.Create(containerSpec{
- WorkingDir: "/WorkingDir",
- Env: map[string]string{"FOO": "bar"},
- BindMounts: map[string]bindmount{"/mnt": {HostPath: "/hostpath", ReadOnly: true}},
- EnableNetwork: false,
- CUDADeviceCount: 3,
+ WorkingDir: "/WorkingDir",
+ Env: map[string]string{"FOO": "bar"},
+ BindMounts: map[string]bindmount{"/mnt": {HostPath: "/hostpath", ReadOnly: true}},
+ EnableNetwork: false,
+ GPUStack: "cuda",
+ GPUDeviceCount: 3,
+ VCPUs: 2,
+ RAM: 12345678,
})
c.Check(err, IsNil)
e.imageFilename = "/fake/image.sif"
cmd := e.execCmd("./singularity")
- c.Check(cmd.Args, DeepEquals, []string{"./singularity", "exec", "--containall", "--cleanenv", "--pwd=/WorkingDir", "--net", "--network=none", "--nv", "--bind", "/hostpath:/mnt:ro", "/fake/image.sif"})
- c.Check(cmd.Env, DeepEquals, []string{"SINGULARITYENV_FOO=bar", "SINGULARITY_NO_EVAL=1"})
+ expectArgs := []string{"./singularity", "exec", "--containall", "--cleanenv", "--pwd=/WorkingDir", "--net", "--network=none", "--nv"}
+ if cgroupSupport["cpu"] {
+ expectArgs = append(expectArgs, "--cpus", "2")
+ }
+ if cgroupSupport["memory"] {
+ expectArgs = append(expectArgs, "--memory", "12345678")
+ }
+ expectArgs = append(expectArgs, "--bind", "/hostpath:/mnt:ro", "/fake/image.sif")
+ c.Check(cmd.Args, DeepEquals, expectArgs)
+ c.Check(cmd.Env, DeepEquals, []string{
+ "SINGULARITYENV_FOO=bar",
+ "SINGULARITY_NO_EVAL=1",
+ "XDG_RUNTIME_DIR=" + os.Getenv("XDG_RUNTIME_DIR"),
+ "DBUS_SESSION_BUS_ADDRESS=" + os.Getenv("DBUS_SESSION_BUS_ADDRESS"),
+ })
+}
+
+func (s *singularitySuite) setupMount(c *C) (mountdir string) {
+ mountdir = c.MkDir()
+ cmd := exec.Command("arv-mount",
+ "--foreground", "--read-write",
+ "--storage-classes", "default",
+ "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid",
+ "--disable-event-listening",
+ mountdir)
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ err := cmd.Start()
+ c.Assert(err, IsNil)
+ return
+}
+
+func (s *singularitySuite) teardownMount(c *C, mountdir string) {
+ exec.Command("arv-mount", "--unmount", mountdir).Run()
+}
+
+type singularitySuiteLoadTestSetup struct {
+ containerClient *arvados.Client
+ imageCacheProject *arvados.Group
+ dockerImageID string
+ collectionName string
+}
+
+func (s *singularitySuite) setupLoadTest(c *C, e *singularityExecutor) (setup singularitySuiteLoadTestSetup) {
+ // remove symlink and converted image already written by
+ // (executorSuite)SetupTest
+ os.Remove(e.tmpdir + "/image.tar")
+ os.Remove(e.tmpdir + "/image.sif")
+
+ setup.containerClient = arvados.NewClientFromEnv()
+ setup.containerClient.AuthToken = arvadostest.ActiveTokenV2
+
+ var err error
+ setup.imageCacheProject, err = e.getImageCacheProject(arvadostest.ActiveUserUUID, setup.containerClient)
+ c.Assert(err, IsNil)
+
+ setup.dockerImageID = "sha256:388056c9a6838deea3792e8f00705b35b439cf57b3c9c2634fb4e95cfc896de6"
+ setup.collectionName = fmt.Sprintf("singularity image for %s", setup.dockerImageID)
+
+ // Remove existing cache entry, if any.
+ var cl arvados.CollectionList
+ err = setup.containerClient.RequestAndDecode(&cl,
+ arvados.EndpointCollectionList.Method,
+ arvados.EndpointCollectionList.Path,
+ nil, arvados.ListOptions{Filters: []arvados.Filter{
+ arvados.Filter{"owner_uuid", "=", setup.imageCacheProject.UUID},
+ arvados.Filter{"name", "=", setup.collectionName},
+ },
+ Limit: 1})
+ c.Assert(err, IsNil)
+ if len(cl.Items) == 1 {
+ setup.containerClient.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+cl.Items[0].UUID, nil, nil)
+ }
+
+ return
+}
+
+func (s *singularitySuite) checkCacheCollectionExists(c *C, setup singularitySuiteLoadTestSetup) {
+ var cl arvados.CollectionList
+ err := setup.containerClient.RequestAndDecode(&cl,
+ arvados.EndpointCollectionList.Method,
+ arvados.EndpointCollectionList.Path,
+ nil, arvados.ListOptions{Filters: []arvados.Filter{
+ arvados.Filter{"owner_uuid", "=", setup.imageCacheProject.UUID},
+ arvados.Filter{"name", "=", setup.collectionName},
+ },
+ Limit: 1})
+ c.Assert(err, IsNil)
+ if !c.Check(cl.Items, HasLen, 1) {
+ return
+ }
+ c.Check(cl.Items[0].PortableDataHash, Not(Equals), "d41d8cd98f00b204e9800998ecf8427e+0")
+}
+
+func (s *singularitySuite) TestImageCache_New(c *C) {
+ mountdir := s.setupMount(c)
+ defer s.teardownMount(c, mountdir)
+ e, err := newSingularityExecutor(c.Logf)
+ c.Assert(err, IsNil)
+ setup := s.setupLoadTest(c, e)
+ err = e.LoadImage(setup.dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdir, setup.containerClient)
+ c.Check(err, IsNil)
+ _, err = os.Stat(e.tmpdir + "/image.sif")
+ c.Check(err, NotNil)
+ c.Check(os.IsNotExist(err), Equals, true)
+ s.checkCacheCollectionExists(c, setup)
+}
+
+func (s *singularitySuite) TestImageCache_SkipEmpty(c *C) {
+ mountdir := s.setupMount(c)
+ defer s.teardownMount(c, mountdir)
+ e, err := newSingularityExecutor(c.Logf)
+ c.Assert(err, IsNil)
+ setup := s.setupLoadTest(c, e)
+
+ var emptyCollection arvados.Collection
+ exp := time.Now().Add(24 * 7 * 2 * time.Hour)
+ err = setup.containerClient.RequestAndDecode(&emptyCollection,
+ arvados.EndpointCollectionCreate.Method,
+ arvados.EndpointCollectionCreate.Path,
+ nil, map[string]interface{}{
+ "collection": map[string]string{
+ "owner_uuid": setup.imageCacheProject.UUID,
+ "name": setup.collectionName,
+ "trash_at": exp.UTC().Format(time.RFC3339),
+ },
+ })
+ c.Assert(err, IsNil)
+
+ err = e.LoadImage(setup.dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdir, setup.containerClient)
+ c.Check(err, IsNil)
+ c.Check(e.imageFilename, Equals, e.tmpdir+"/image.sif")
+
+ // tmpdir should contain symlink to docker image archive.
+ tarListing, err := exec.Command("tar", "tvf", e.tmpdir+"/image.tar").CombinedOutput()
+ c.Check(err, IsNil)
+ c.Check(string(tarListing), Matches, `(?ms).*/layer.tar.*`)
+
+ // converted singularity image should be non-empty.
+ fi, err := os.Stat(e.imageFilename)
+ if c.Check(err, IsNil) {
+ c.Check(int(fi.Size()), Not(Equals), 0)
+ }
+}
+
+func (s *singularitySuite) TestImageCache_Concurrency_1(c *C) {
+ s.testImageCache(c, 1)
+}
+
+func (s *singularitySuite) TestImageCache_Concurrency_2(c *C) {
+ s.testImageCache(c, 2)
+}
+
+func (s *singularitySuite) TestImageCache_Concurrency_10(c *C) {
+ s.testImageCache(c, 10)
+}
+
+func (s *singularitySuite) testImageCache(c *C, concurrency int) {
+ mountdirs := make([]string, concurrency)
+ execs := make([]*singularityExecutor, concurrency)
+ setups := make([]singularitySuiteLoadTestSetup, concurrency)
+ for i := range execs {
+ mountdirs[i] = s.setupMount(c)
+ defer s.teardownMount(c, mountdirs[i])
+ e, err := newSingularityExecutor(c.Logf)
+ c.Assert(err, IsNil)
+ defer e.Close()
+ execs[i] = e
+ setups[i] = s.setupLoadTest(c, e)
+ }
+
+ var wg sync.WaitGroup
+ for i, e := range execs {
+ i, e := i, e
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := e.LoadImage(setups[i].dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdirs[i], setups[i].containerClient)
+ c.Check(err, IsNil)
+ }()
+ }
+ wg.Wait()
+
+ for i, e := range execs {
+ fusepath := strings.TrimPrefix(e.imageFilename, mountdirs[i])
+ // imageFilename should be in the fuse mount, not
+ // e.tmpdir.
+ c.Check(fusepath, Not(Equals), execs[0].imageFilename)
+ // Below fuse mountpoint, paths should all be equal.
+ fusepath0 := strings.TrimPrefix(execs[0].imageFilename, mountdirs[0])
+ c.Check(fusepath, Equals, fusepath0)
+ }
}
diff --git a/lib/ctrlctx/auth.go b/lib/ctrlctx/auth.go
index 31746b64cc..d366ed952b 100644
--- a/lib/ctrlctx/auth.go
+++ b/lib/ctrlctx/auth.go
@@ -173,7 +173,7 @@ select aca.uuid, aca.expires_at, aca.api_token, aca.scopes, users.uuid, users.is
from api_client_authorizations aca
left join users on aca.user_id = users.id
where `+cond+`
- and (expires_at is null or expires_at > current_timestamp at time zone 'UTC')`, args...).Scan(
+ and (least(expires_at, refreshes_at) is null or least(expires_at, refreshes_at) > current_timestamp at time zone 'UTC')`, args...).Scan(
&aca.UUID, &expiresAt, &aca.APIToken, &scopesYAML,
&user.UUID, &user.IsActive, &user.IsAdmin)
if err == sql.ErrNoRows {
diff --git a/lib/deduplicationreport/report.go b/lib/deduplicationreport/report.go
index 2f9521c65d..a99b8e6a7e 100644
--- a/lib/deduplicationreport/report.go
+++ b/lib/deduplicationreport/report.go
@@ -5,6 +5,7 @@
package deduplicationreport
import (
+ "bytes"
"flag"
"fmt"
"io"
@@ -13,7 +14,7 @@ import (
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/manifest"
+ "git.arvados.org/arvados.git/sdk/go/blockdigest"
"github.com/dustin/go-humanize"
"github.com/sirupsen/logrus"
@@ -91,10 +92,11 @@ Options:
func blockList(collection arvados.Collection) (blocks map[string]int) {
blocks = make(map[string]int)
- m := manifest.Manifest{Text: collection.ManifestText}
- blockChannel := m.BlockIterWithDuplicates()
- for b := range blockChannel {
- blocks[b.Digest.String()] = b.Size
+ for _, token := range bytes.Split([]byte(collection.ManifestText), []byte{' '}) {
+ if blockdigest.IsBlockLocator(string(token)) {
+ loc, _ := blockdigest.ParseBlockLocator(string(token))
+ blocks[loc.Digest.String()] = loc.Size
+ }
}
return
}
diff --git a/lib/diagnostics/cmd.go b/lib/diagnostics/cmd.go
index 0fd3b3eca2..e004ecb9af 100644
--- a/lib/diagnostics/cmd.go
+++ b/lib/diagnostics/cmd.go
@@ -253,25 +253,28 @@ func (diag *diagnoser) runtests() {
// TODO: detect routing errors here, like finding wb2 at the
// wb1 address.
- for i, svc := range []*arvados.Service{
- &cluster.Services.Keepproxy,
- &cluster.Services.WebDAV,
- &cluster.Services.WebDAVDownload,
- &cluster.Services.Websocket,
- &cluster.Services.Workbench1,
- &cluster.Services.Workbench2,
+ for i, svc := range []struct {
+ name string
+ config *arvados.Service
+ }{
+ {"Keepproxy", &cluster.Services.Keepproxy},
+ {"WebDAV", &cluster.Services.WebDAV},
+ {"WebDAVDownload", &cluster.Services.WebDAVDownload},
+ {"Websocket", &cluster.Services.Websocket},
+ {"Workbench1", &cluster.Services.Workbench1},
+ {"Workbench2", &cluster.Services.Workbench2},
} {
- diag.dotest(40+i, fmt.Sprintf("connecting to service endpoint %s", svc.ExternalURL), func() error {
+ u := url.URL(svc.config.ExternalURL)
+ diag.dotest(40+i, fmt.Sprintf("connecting to %s endpoint %s", svc.name, u.String()), func() error {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
defer cancel()
- u := svc.ExternalURL
if strings.HasPrefix(u.Scheme, "ws") {
// We can do a real websocket test elsewhere,
// but for now we'll just check the https
// connection.
u.Scheme = "http" + u.Scheme[2:]
}
- if svc == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, "*") {
+ if svc.config == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, "*") {
u.Host = "d41d8cd98f00b204e9800998ecf8427e-0" + u.Host[1:]
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
@@ -651,11 +654,15 @@ func (diag *diagnoser) runtests() {
diag.dotest(150, "connecting to webshell service", func() error {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
defer cancel()
+ u := cluster.Services.WebShell.ExternalURL
+ if u == (arvados.URL{}) {
+ diag.infof("skipping, webshell not configured")
+ return nil
+ }
if vm.UUID == "" {
diag.warnf("skipping, no vm available")
return nil
}
- u := cluster.Services.WebShell.ExternalURL
webshellurl := u.String() + vm.Hostname + "?"
if strings.HasPrefix(u.Host, "*") {
u.Host = vm.Hostname + u.Host[1:]
diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go
index 8d8b7ff9af..a4843675cd 100644
--- a/lib/dispatchcloud/container/queue.go
+++ b/lib/dispatchcloud/container/queue.go
@@ -7,6 +7,7 @@ package container
import (
"errors"
"io"
+ "strings"
"sync"
"time"
@@ -250,6 +251,10 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
})
if err != nil {
logger.WithError(err).Warn("error getting mounts")
+ if strings.Contains(err.Error(), "json: cannot unmarshal") {
+ // see https://dev.arvados.org/issues/21314
+ go cq.cancelUnsatisfiableContainer(ctr, "error getting mounts from container record: "+err.Error())
+ }
return
}
types, err := cq.chooseType(&ctr)
@@ -262,46 +267,8 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
// We assume here that any chooseType error is a hard
// error: it wouldn't help to try again, or to leave
// it for a different dispatcher process to attempt.
- errorString := err.Error()
logger.WithError(err).Warn("cancel container with no suitable instance type")
- go func() {
- if ctr.State == arvados.ContainerStateQueued {
- // Can't set runtime error without
- // locking first.
- err := cq.Lock(ctr.UUID)
- if err != nil {
- logger.WithError(err).Warn("lock failed")
- return
- // ...and try again on the
- // next Update, if the problem
- // still exists.
- }
- }
- var err error
- defer func() {
- if err == nil {
- return
- }
- // On failure, check current container
- // state, and don't log the error if
- // the failure came from losing a
- // race.
- var latest arvados.Container
- cq.client.RequestAndDecode(&latest, "GET", "arvados/v1/containers/"+ctr.UUID, nil, map[string][]string{"select": {"state"}})
- if latest.State == arvados.ContainerStateCancelled {
- return
- }
- logger.WithError(err).Warn("error while trying to cancel unsatisfiable container")
- }()
- err = cq.setRuntimeError(ctr.UUID, errorString)
- if err != nil {
- return
- }
- err = cq.Cancel(ctr.UUID)
- if err != nil {
- return
- }
- }()
+ go cq.cancelUnsatisfiableContainer(ctr, err.Error())
return
}
typeNames := ""
@@ -320,6 +287,43 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
cq.current[uuid] = QueueEnt{Container: ctr, InstanceTypes: types, FirstSeenAt: time.Now()}
}
+func (cq *Queue) cancelUnsatisfiableContainer(ctr arvados.Container, errorString string) {
+ logger := cq.logger.WithField("ContainerUUID", ctr.UUID)
+ if ctr.State == arvados.ContainerStateQueued {
+ // Can't set runtime error without locking first.
+ err := cq.Lock(ctr.UUID)
+ if err != nil {
+ logger.WithError(err).Warn("lock failed")
+ return
+ // ...and try again on the next Update, if the
+ // problem still exists.
+ }
+ }
+ var err error
+ defer func() {
+ if err == nil {
+ return
+ }
+ // On failure, check current container state, and
+ // don't log the error if the failure came from losing
+ // a race.
+ var latest arvados.Container
+ cq.client.RequestAndDecode(&latest, "GET", "arvados/v1/containers/"+ctr.UUID, nil, map[string][]string{"select": {"state"}})
+ if latest.State == arvados.ContainerStateCancelled {
+ return
+ }
+ logger.WithError(err).Warn("error while trying to cancel unsatisfiable container")
+ }()
+ err = cq.setRuntimeError(ctr.UUID, errorString)
+ if err != nil {
+ return
+ }
+ err = cq.Cancel(ctr.UUID)
+ if err != nil {
+ return
+ }
+}
+
// Lock acquires the dispatch lock for the given container.
func (cq *Queue) Lock(uuid string) error {
return cq.apiUpdate(uuid, "lock")
diff --git a/lib/dispatchcloud/container/queue_test.go b/lib/dispatchcloud/container/queue_test.go
index 928c6dd8c8..646e3db5a8 100644
--- a/lib/dispatchcloud/container/queue_test.go
+++ b/lib/dispatchcloud/container/queue_test.go
@@ -5,12 +5,15 @@
package container
import (
+ "context"
"errors"
"os"
+ "path/filepath"
"sync"
"testing"
"time"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"github.com/sirupsen/logrus"
@@ -108,7 +111,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
wg.Wait()
}
-func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
+func (suite *IntegrationSuite) TestCancel_NoInstanceType(c *check.C) {
errorTypeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
// Make sure the relevant container fields are
// actually populated.
@@ -123,22 +126,7 @@ func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
client := arvados.NewClientFromEnv()
cq := NewQueue(logger(), nil, errorTypeChooser, client)
- ch := cq.Subscribe()
- go func() {
- defer cq.Unsubscribe(ch)
- for range ch {
- // Container should never be added to
- // queue. Note that polling the queue this way
- // doesn't guarantee a bug (container being
- // incorrectly added to the queue) will cause
- // a test failure.
- _, ok := cq.Get(arvadostest.QueuedContainerUUID)
- if !c.Check(ok, check.Equals, false) {
- // Don't spam the log with more failures
- break
- }
- }
- }()
+ go failIfContainerAppearsInQueue(c, cq, arvadostest.QueuedContainerUUID)
var ctr arvados.Container
err := client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, nil)
@@ -156,6 +144,60 @@ func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
c.Check(ctr.RuntimeStatus["error"], check.Equals, `no suitable instance type`)
}
+func (suite *IntegrationSuite) TestCancel_InvalidMountsField(c *check.C) {
+ cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+ c.Assert(err, check.IsNil)
+ cc, err := cfg.GetCluster("zzzzz")
+ c.Assert(err, check.IsNil)
+ db, err := (&ctrlctx.DBConnector{PostgreSQL: cc.PostgreSQL}).GetDB(context.Background())
+ c.Assert(err, check.IsNil)
+ _, err = db.Exec(`update containers set mounts=$1 where uuid=$2`, `{"stdin":["bork"]}`, arvadostest.QueuedContainerUUID)
+ c.Assert(err, check.IsNil)
+ // Note this setup gets cleaned up by the database reset in
+ // TearDownTest.
+
+ typeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
+ return []arvados.InstanceType{}, nil
+ }
+ client := arvados.NewClientFromEnv()
+ cq := NewQueue(logger(), nil, typeChooser, client)
+
+ go failIfContainerAppearsInQueue(c, cq, arvadostest.QueuedContainerUUID)
+
+ var ctr arvados.Container
+ err = client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, arvados.GetOptions{Select: []string{"state"}})
+ c.Check(err, check.IsNil)
+ c.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)
+
+ go cq.Update()
+
+ // Wait for the cancel operation to take effect. Container
+ // will have state=Cancelled or just disappear from the queue.
+ suite.waitfor(c, time.Second, func() bool {
+ err := client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, arvados.GetOptions{Select: []string{"state", "runtime_status"}})
+ return err == nil && ctr.State == arvados.ContainerStateCancelled
+ })
+ c.Logf("runtime_status: %v", ctr.RuntimeStatus)
+ c.Check(ctr.RuntimeStatus["error"], check.Matches, `error getting mounts from container record: json: cannot unmarshal .*`)
+}
+
+func failIfContainerAppearsInQueue(c *check.C, cq *Queue, uuid string) {
+ ch := cq.Subscribe()
+ defer cq.Unsubscribe(ch)
+ for range ch {
+ // Container should never be added to
+ // queue. Note that polling the queue this way
+ // doesn't guarantee a bug (container being
+ // incorrectly added to the queue) will cause
+ // a test failure.
+ _, ok := cq.Get(uuid)
+ if !c.Check(ok, check.Equals, false) {
+ // Don't spam the log with more failures
+ break
+ }
+ }
+}
+
func (suite *IntegrationSuite) waitfor(c *check.C, timeout time.Duration, fn func() bool) {
defer func() {
c.Check(fn(), check.Equals, true)
diff --git a/lib/dispatchcloud/dispatcher_test.go b/lib/dispatchcloud/dispatcher_test.go
index d651e73a67..542c2789bd 100644
--- a/lib/dispatchcloud/dispatcher_test.go
+++ b/lib/dispatchcloud/dispatcher_test.go
@@ -434,24 +434,24 @@ func (s *DispatcherSuite) TestManagementAPI_Containers(c *check.C) {
expect := `
0 zzzzz-dz642-000000000000000 (Running) ""
1 zzzzz-dz642-000000000000001 (Running) ""
- 2 zzzzz-dz642-000000000000002 (Locked) "waiting for suitable instance type to become available: queue position 1"
- 3 zzzzz-dz642-000000000000003 (Locked) "waiting for suitable instance type to become available: queue position 2"
- 4 zzzzz-dz642-000000000000004 (Queued) "waiting while cluster is running at capacity: queue position 3"
- 5 zzzzz-dz642-000000000000005 (Queued) "waiting while cluster is running at capacity: queue position 4"
- 6 zzzzz-dz642-000000000000006 (Queued) "waiting while cluster is running at capacity: queue position 5"
- 7 zzzzz-dz642-000000000000007 (Queued) "waiting while cluster is running at capacity: queue position 6"
- 8 zzzzz-dz642-000000000000008 (Queued) "waiting while cluster is running at capacity: queue position 7"
- 9 zzzzz-dz642-000000000000009 (Queued) "waiting while cluster is running at capacity: queue position 8"
- 10 zzzzz-dz642-000000000000010 (Queued) "waiting while cluster is running at capacity: queue position 9"
- 11 zzzzz-dz642-000000000000011 (Queued) "waiting while cluster is running at capacity: queue position 10"
- 12 zzzzz-dz642-000000000000012 (Queued) "waiting while cluster is running at capacity: queue position 11"
- 13 zzzzz-dz642-000000000000013 (Queued) "waiting while cluster is running at capacity: queue position 12"
- 14 zzzzz-dz642-000000000000014 (Queued) "waiting while cluster is running at capacity: queue position 13"
- 15 zzzzz-dz642-000000000000015 (Queued) "waiting while cluster is running at capacity: queue position 14"
- 16 zzzzz-dz642-000000000000016 (Queued) "waiting while cluster is running at capacity: queue position 15"
- 17 zzzzz-dz642-000000000000017 (Queued) "waiting while cluster is running at capacity: queue position 16"
- 18 zzzzz-dz642-000000000000018 (Queued) "waiting while cluster is running at capacity: queue position 17"
- 19 zzzzz-dz642-000000000000019 (Queued) "waiting while cluster is running at capacity: queue position 18"
+ 2 zzzzz-dz642-000000000000002 (Locked) "Waiting in queue at position 1. Cluster is at capacity for all eligible instance types (type4, type6) and cannot start a new instance right now."
+ 3 zzzzz-dz642-000000000000003 (Locked) "Waiting in queue at position 2. Cluster is at capacity for all eligible instance types (type4, type6) and cannot start a new instance right now."
+ 4 zzzzz-dz642-000000000000004 (Queued) "Waiting in queue at position 3. Cluster is at capacity and cannot start any new instances right now."
+ 5 zzzzz-dz642-000000000000005 (Queued) "Waiting in queue at position 4. Cluster is at capacity and cannot start any new instances right now."
+ 6 zzzzz-dz642-000000000000006 (Queued) "Waiting in queue at position 5. Cluster is at capacity and cannot start any new instances right now."
+ 7 zzzzz-dz642-000000000000007 (Queued) "Waiting in queue at position 6. Cluster is at capacity and cannot start any new instances right now."
+ 8 zzzzz-dz642-000000000000008 (Queued) "Waiting in queue at position 7. Cluster is at capacity and cannot start any new instances right now."
+ 9 zzzzz-dz642-000000000000009 (Queued) "Waiting in queue at position 8. Cluster is at capacity and cannot start any new instances right now."
+ 10 zzzzz-dz642-000000000000010 (Queued) "Waiting in queue at position 9. Cluster is at capacity and cannot start any new instances right now."
+ 11 zzzzz-dz642-000000000000011 (Queued) "Waiting in queue at position 10. Cluster is at capacity and cannot start any new instances right now."
+ 12 zzzzz-dz642-000000000000012 (Queued) "Waiting in queue at position 11. Cluster is at capacity and cannot start any new instances right now."
+ 13 zzzzz-dz642-000000000000013 (Queued) "Waiting in queue at position 12. Cluster is at capacity and cannot start any new instances right now."
+ 14 zzzzz-dz642-000000000000014 (Queued) "Waiting in queue at position 13. Cluster is at capacity and cannot start any new instances right now."
+ 15 zzzzz-dz642-000000000000015 (Queued) "Waiting in queue at position 14. Cluster is at capacity and cannot start any new instances right now."
+ 16 zzzzz-dz642-000000000000016 (Queued) "Waiting in queue at position 15. Cluster is at capacity and cannot start any new instances right now."
+ 17 zzzzz-dz642-000000000000017 (Queued) "Waiting in queue at position 16. Cluster is at capacity and cannot start any new instances right now."
+ 18 zzzzz-dz642-000000000000018 (Queued) "Waiting in queue at position 17. Cluster is at capacity and cannot start any new instances right now."
+ 19 zzzzz-dz642-000000000000019 (Queued) "Waiting in queue at position 18. Cluster is at capacity and cannot start any new instances right now."
`
sequence := make(map[string][]string)
var summary string
diff --git a/lib/dispatchcloud/node_size.go b/lib/dispatchcloud/node_size.go
index 802bc65c28..f794bdd934 100644
--- a/lib/dispatchcloud/node_size.go
+++ b/lib/dispatchcloud/node_size.go
@@ -6,8 +6,10 @@ package dispatchcloud
import (
"errors"
+ "fmt"
"math"
"regexp"
+ "slices"
"sort"
"strconv"
@@ -130,8 +132,38 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) ([]arvados.
var types []arvados.InstanceType
var maxPrice float64
for _, it := range cc.InstanceTypes {
- driverInsuff, driverErr := versionLess(it.CUDA.DriverVersion, ctr.RuntimeConstraints.CUDA.DriverVersion)
- capabilityInsuff, capabilityErr := versionLess(it.CUDA.HardwareCapability, ctr.RuntimeConstraints.CUDA.HardwareCapability)
+ driverInsuff, driverErr := versionLess(it.GPU.DriverVersion, ctr.RuntimeConstraints.GPU.DriverVersion)
+
+ var capabilityInsuff bool
+ var capabilityErr error
+ if ctr.RuntimeConstraints.GPU.Stack == "" {
+ // do nothing
+ } else if ctr.RuntimeConstraints.GPU.Stack == "cuda" {
+ if len(ctr.RuntimeConstraints.GPU.HardwareTarget) > 1 {
+ // Check if the node's capability
+ // exactly matches any of the
+ // requested capability. For CUDA,
+ // this is the hardware capability in
+ // X.Y format.
+ capabilityInsuff = !slices.Contains(ctr.RuntimeConstraints.GPU.HardwareTarget, it.GPU.HardwareTarget)
+ } else if len(ctr.RuntimeConstraints.GPU.HardwareTarget) == 1 {
+ // version compare.
+ capabilityInsuff, capabilityErr = versionLess(it.GPU.HardwareTarget, ctr.RuntimeConstraints.GPU.HardwareTarget[0])
+ } else {
+ capabilityInsuff = true
+ }
+ } else if ctr.RuntimeConstraints.GPU.Stack == "rocm" {
+ // Check if the node's hardware matches any of
+ // the requested hardware. For rocm, this is
+ // a gfxXXXX LLVM target.
+ capabilityInsuff = !slices.Contains(ctr.RuntimeConstraints.GPU.HardwareTarget, it.GPU.HardwareTarget)
+ } else {
+ // not blank, "cuda", or "rocm" so that's an error
+ return nil, ConstraintsNotSatisfiableError{
+ errors.New(fmt.Sprintf("Invalid GPU stack %q, expected to be blank or one of 'cuda' or 'rocm'", ctr.RuntimeConstraints.GPU.Stack)),
+ []arvados.InstanceType{},
+ }
+ }
switch {
// reasons to reject a node
@@ -140,9 +172,11 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) ([]arvados.
case int64(it.RAM) < needRAM: // insufficient RAM
case it.VCPUs < needVCPUs: // insufficient VCPUs
case it.Preemptible != ctr.SchedulingParameters.Preemptible: // wrong preemptable setting
- case it.CUDA.DeviceCount < ctr.RuntimeConstraints.CUDA.DeviceCount: // insufficient CUDA devices
- case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (driverInsuff || driverErr != nil): // insufficient driver version
- case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (capabilityInsuff || capabilityErr != nil): // insufficient hardware capability
+ case it.GPU.Stack != ctr.RuntimeConstraints.GPU.Stack: // incompatible GPU software stack (or none available)
+ case it.GPU.DeviceCount < ctr.RuntimeConstraints.GPU.DeviceCount: // insufficient GPU devices
+ case it.GPU.VRAM > 0 && int64(it.GPU.VRAM) < ctr.RuntimeConstraints.GPU.VRAM: // insufficient VRAM per GPU
+ case ctr.RuntimeConstraints.GPU.DeviceCount > 0 && (driverInsuff || driverErr != nil): // insufficient driver version
+ case ctr.RuntimeConstraints.GPU.DeviceCount > 0 && (capabilityInsuff || capabilityErr != nil): // insufficient hardware capability
// Don't select this node
default:
// Didn't reject the node, so select it
diff --git a/lib/dispatchcloud/node_size_test.go b/lib/dispatchcloud/node_size_test.go
index 5d2713e982..813e54f988 100644
--- a/lib/dispatchcloud/node_size_test.go
+++ b/lib/dispatchcloud/node_size_test.go
@@ -217,65 +217,141 @@ func (*NodeSizeSuite) TestScratchForDockerImage(c *check.C) {
func (*NodeSizeSuite) TestChooseGPU(c *check.C) {
menu := map[string]arvados.InstanceType{
- "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly", CUDA: arvados.CUDAFeatures{DeviceCount: 2, HardwareCapability: "9.0", DriverVersion: "11.0"}},
- "low_capability": {Price: 2.1, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "low_capability", CUDA: arvados.CUDAFeatures{DeviceCount: 1, HardwareCapability: "8.0", DriverVersion: "11.0"}},
- "best": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best", CUDA: arvados.CUDAFeatures{DeviceCount: 1, HardwareCapability: "9.0", DriverVersion: "11.0"}},
- "low_driver": {Price: 2.1, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "low_driver", CUDA: arvados.CUDAFeatures{DeviceCount: 1, HardwareCapability: "9.0", DriverVersion: "10.0"}},
- "cheap_gpu": {Price: 2.0, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "cheap_gpu", CUDA: arvados.CUDAFeatures{DeviceCount: 1, HardwareCapability: "8.0", DriverVersion: "10.0"}},
- "invalid_gpu": {Price: 1.9, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "invalid_gpu", CUDA: arvados.CUDAFeatures{DeviceCount: 1, HardwareCapability: "12.0.12", DriverVersion: "12.0.12"}},
- "non_gpu": {Price: 1.1, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "non_gpu"},
+ "costly": {Price: 4.4, RAM: 4 * GiB, VCPUs: 8, Scratch: 2 * GiB, Name: "costly",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 2, HardwareTarget: "9.0", DriverVersion: "11.0", VRAM: 2 * GiB}},
+
+ "low_capability": {Price: 2.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "low_capability",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "8.0", DriverVersion: "11.0", VRAM: 2 * GiB}},
+
+ "best": {Price: 2.2, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "best",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "9.0", DriverVersion: "11.0", VRAM: 2 * GiB}},
+
+ "low_driver": {Price: 2.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "low_driver",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "9.0", DriverVersion: "10.0", VRAM: 2 * GiB}},
+
+ "cheap_gpu": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "cheap_gpu",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "8.0", DriverVersion: "10.0", VRAM: 2 * GiB}},
+
+ "more_vram": {Price: 2.3, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "more_vram",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "8.0", DriverVersion: "10.0", VRAM: 8 * GiB}},
+
+ "invalid_gpu": {Price: 1.9, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "invalid_gpu",
+ GPU: arvados.GPUFeatures{Stack: "cuda", DeviceCount: 1, HardwareTarget: "12.0.12", DriverVersion: "12.0.12", VRAM: 2 * GiB}},
+
+ "gpu_rocm": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "gpu_rocm",
+ GPU: arvados.GPUFeatures{Stack: "rocm", DeviceCount: 1, HardwareTarget: "gfx1100", DriverVersion: "6.2", VRAM: 20 * GiB}},
+
+ "cheap_gpu_rocm": {Price: 1.9, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "cheap_gpu_rocm",
+ GPU: arvados.GPUFeatures{Stack: "rocm", DeviceCount: 1, HardwareTarget: "gfx1103", DriverVersion: "6.2", VRAM: 8 * GiB}},
+
+ "unspecified_vram": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "unspecified_vram",
+ GPU: arvados.GPUFeatures{Stack: "rocm", DeviceCount: 1, HardwareTarget: "gfx1104", DriverVersion: "6.2", VRAM: 0}},
+
+ "non_gpu": {Price: 1.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: "non_gpu"},
}
type GPUTestCase struct {
- CUDA arvados.CUDARuntimeConstraints
+ GPU arvados.GPURuntimeConstraints
SelectedInstance string
}
cases := []GPUTestCase{
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 1,
- HardwareCapability: "9.0",
- DriverVersion: "11.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ HardwareTarget: []string{"9.0"},
+ DriverVersion: "11.0",
+ VRAM: 2000000000,
},
SelectedInstance: "best",
},
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 2,
- HardwareCapability: "9.0",
- DriverVersion: "11.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 2,
+ HardwareTarget: []string{"9.0"},
+ DriverVersion: "11.0",
+ VRAM: 2000000000,
},
SelectedInstance: "costly",
},
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 1,
- HardwareCapability: "8.0",
- DriverVersion: "11.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ HardwareTarget: []string{"8.0"},
+ DriverVersion: "11.0",
+ VRAM: 2000000000,
},
SelectedInstance: "low_capability",
},
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 1,
- HardwareCapability: "9.0",
- DriverVersion: "10.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ HardwareTarget: []string{"9.0"},
+ DriverVersion: "10.0",
+ VRAM: 2000000000,
},
SelectedInstance: "low_driver",
},
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 1,
- HardwareCapability: "",
- DriverVersion: "10.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ HardwareTarget: []string{"8.0"},
+ DriverVersion: "11.0",
+ VRAM: 8000000000,
+ },
+ SelectedInstance: "more_vram",
+ },
+ GPUTestCase{
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ HardwareTarget: []string{},
+ DriverVersion: "10.0",
+ VRAM: 2000000000,
},
SelectedInstance: "",
},
GPUTestCase{
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 0,
- HardwareCapability: "9.0",
- DriverVersion: "11.0",
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "rocm",
+ DeviceCount: 1,
+ HardwareTarget: []string{"gfx1100"},
+ DriverVersion: "6.2",
+ VRAM: 2000000000,
+ },
+ SelectedInstance: "gpu_rocm",
+ },
+ GPUTestCase{
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "rocm",
+ DeviceCount: 1,
+ HardwareTarget: []string{"gfx1100", "gfx1103"},
+ DriverVersion: "6.2",
+ VRAM: 2000000000,
+ },
+ SelectedInstance: "cheap_gpu_rocm",
+ },
+ GPUTestCase{
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "rocm",
+ DeviceCount: 1,
+ HardwareTarget: []string{"gfx1104"},
+ DriverVersion: "6.2",
+ VRAM: 2000000000,
+ },
+ SelectedInstance: "unspecified_vram",
+ },
+ GPUTestCase{
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "",
+ DeviceCount: 0,
+ HardwareTarget: []string{""},
+ DriverVersion: "",
+ VRAM: 0,
},
SelectedInstance: "non_gpu",
},
@@ -290,7 +366,7 @@ func (*NodeSizeSuite) TestChooseGPU(c *check.C) {
VCPUs: 2,
RAM: 987654321,
KeepCacheRAM: 123456789,
- CUDA: tc.CUDA,
+ GPU: tc.GPU,
},
})
if len(best) > 0 {
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index d270972295..4561a29975 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -7,6 +7,7 @@ package scheduler
import (
"fmt"
"sort"
+ "strings"
"time"
"git.arvados.org/arvados.git/lib/dispatchcloud/container"
@@ -25,14 +26,14 @@ type QueueEnt struct {
}
const (
- schedStatusPreparingRuntimeEnvironment = "preparing runtime environment"
- schedStatusPriorityZero = "not scheduling: priority 0" // ", state X" appended at runtime
- schedStatusContainerLimitReached = "not starting: supervisor container limit has been reached"
- schedStatusWaitingForPreviousAttempt = "waiting for previous attempt to exit"
- schedStatusWaitingNewInstance = "waiting for new instance to be ready"
- schedStatusWaitingInstanceType = "waiting for suitable instance type to become available" // ": queue position X" appended at runtime
- schedStatusWaitingCloudResources = "waiting for cloud resources"
- schedStatusWaitingClusterCapacity = "waiting while cluster is running at capacity" // ": queue position X" appended at runtime
+ schedStatusPreparingRuntimeEnvironment = "Container is allocated to an instance and preparing to run."
+ schedStatusPriorityZero = "This container will not be scheduled to run because its priority is 0 and state is %v."
+ schedStatusSupervisorLimitReached = "Waiting in workflow queue at position %v. Cluster is at capacity and cannot start any new workflows right now."
+ schedStatusWaitingForPreviousAttempt = "Waiting for previous container attempt to exit."
+ schedStatusWaitingNewInstance = "Waiting for a %v instance to boot and be ready to accept work."
+ schedStatusWaitingInstanceType = "Waiting in queue at position %v. Cluster is at capacity for all eligible instance types (%v) and cannot start a new instance right now."
+ schedStatusWaitingCloudResources = "Waiting in queue at position %v. Cluster is at cloud account limits and cannot start any new instances right now."
+ schedStatusWaitingClusterCapacity = "Waiting in queue at position %v. Cluster is at capacity and cannot start any new instances right now."
)
// Queue returns the sorted queue from the last scheduling iteration.
@@ -204,12 +205,12 @@ tryrun:
continue
}
if ctr.Priority < 1 {
- sorted[i].SchedulingStatus = schedStatusPriorityZero + ", state " + string(ctr.State)
+ sorted[i].SchedulingStatus = fmt.Sprintf(schedStatusPriorityZero, string(ctr.State))
continue
}
if ctr.SchedulingParameters.Supervisor && maxSupervisors > 0 && supervisors > maxSupervisors {
overmaxsuper = append(overmaxsuper, sorted[i])
- sorted[i].SchedulingStatus = schedStatusContainerLimitReached
+ sorted[i].SchedulingStatus = fmt.Sprintf(schedStatusSupervisorLimitReached, len(overmaxsuper))
continue
}
// If we have unalloc instances of any of the eligible
@@ -287,7 +288,7 @@ tryrun:
sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
logger.Trace("StartContainer => true")
} else {
- sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
+ sorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, unallocType.Name)
logger.Trace("StartContainer => false")
containerAllocatedWorkerBootingCount += 1
dontstart[unallocType] = true
@@ -318,7 +319,11 @@ tryrun:
// runQueue(), rather than run
// container B now.
qpos++
- sorted[i].SchedulingStatus = schedStatusWaitingInstanceType + fmt.Sprintf(": queue position %d", qpos)
+ var typenames []string
+ for _, tp := range types {
+ typenames = append(typenames, tp.Name)
+ }
+ sorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingInstanceType, qpos, strings.Join(typenames, ", "))
logger.Trace("all eligible types at capacity")
continue
}
@@ -333,7 +338,7 @@ tryrun:
// asynchronously and does its own logging
// about the eventual outcome, so we don't
// need to.)
- sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
+ sorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, availableType.Name)
logger.Info("creating new instance")
// Don't bother trying to start the container
// yet -- obviously the instance will take
@@ -355,7 +360,7 @@ tryrun:
for i, ent := range sorted {
if ent.SchedulingStatus == "" && (ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {
qpos++
- sorted[i].SchedulingStatus = fmt.Sprintf("%s: queue position %d", qreason, qpos)
+ sorted[i].SchedulingStatus = fmt.Sprintf(qreason, qpos)
}
}
sch.lastQueue.Store(sorted)
diff --git a/lib/dispatchcloud/test/stub_driver.go b/lib/dispatchcloud/test/stub_driver.go
index 2265be6e16..63097a6447 100644
--- a/lib/dispatchcloud/test/stub_driver.go
+++ b/lib/dispatchcloud/test/stub_driver.go
@@ -178,6 +178,13 @@ func (sis *StubInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, err
return r, nil
}
+// InstanceQuotaGroup returns the first character of the given
+// instance's ProviderType. Use ProviderTypes like "a1", "a2", "b1",
+// "b2" to test instance quota group behaviors.
+func (sis *StubInstanceSet) InstanceQuotaGroup(it arvados.InstanceType) cloud.InstanceQuotaGroup {
+ return cloud.InstanceQuotaGroup(it.ProviderType[:1])
+}
+
func (sis *StubInstanceSet) Stop() {
sis.mtx.Lock()
defer sis.mtx.Unlock()
@@ -201,11 +208,19 @@ type RateLimitError struct{ Retry time.Time }
func (e RateLimitError) Error() string { return fmt.Sprintf("rate limited until %s", e.Retry) }
func (e RateLimitError) EarliestRetry() time.Time { return e.Retry }
-type CapacityError struct{ InstanceTypeSpecific bool }
+var _ = cloud.RateLimitError(RateLimitError{}) // assert the interface is satisfied
+
+type CapacityError struct {
+ InstanceTypeSpecific bool
+ InstanceQuotaGroupSpecific bool
+}
+
+func (e CapacityError) Error() string { return "insufficient capacity" }
+func (e CapacityError) IsCapacityError() bool { return true }
+func (e CapacityError) IsInstanceTypeSpecific() bool { return e.InstanceTypeSpecific }
+func (e CapacityError) IsInstanceQuotaGroupSpecific() bool { return e.InstanceQuotaGroupSpecific }
-func (e CapacityError) Error() string { return "insufficient capacity" }
-func (e CapacityError) IsCapacityError() bool { return true }
-func (e CapacityError) IsInstanceTypeSpecific() bool { return e.InstanceTypeSpecific }
+var _ = cloud.CapacityError(CapacityError{}) // assert the interface is satisfied
// StubVM is a fake server that runs an SSH service. It represents a
// VM running in a fake cloud.
diff --git a/lib/dispatchcloud/worker/pool.go b/lib/dispatchcloud/worker/pool.go
index 13c369d0c6..bd80ac3c57 100644
--- a/lib/dispatchcloud/worker/pool.go
+++ b/lib/dispatchcloud/worker/pool.go
@@ -111,6 +111,7 @@ func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *promethe
bootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
instanceInitCommand: cloud.InitCommand(cluster.Containers.CloudVMs.InstanceInitCommand),
runnerSource: cluster.Containers.CloudVMs.DeployRunnerBinary,
+ runnerDeployDirectory: cluster.Containers.CloudVMs.DeployRunnerDirectory,
imageID: cloud.ImageID(cluster.Containers.CloudVMs.ImageID),
instanceTypes: cluster.InstanceTypes,
maxProbesPerSecond: cluster.Containers.CloudVMs.MaxProbesPerSecond,
@@ -155,6 +156,7 @@ type Pool struct {
bootProbeCommand string
instanceInitCommand cloud.InitCommand
runnerSource string
+ runnerDeployDirectory string
imageID cloud.ImageID
instanceTypes map[string]arvados.InstanceType
syncInterval time.Duration
@@ -184,7 +186,7 @@ type Pool struct {
atQuotaUntilFewerInstances int
atQuotaUntil time.Time
atQuotaErr cloud.QuotaError
- atCapacityUntil map[string]time.Time
+ atCapacityUntil map[interface{}]time.Time
stop chan bool
mtx sync.RWMutex
setupOnce sync.Once
@@ -383,14 +385,16 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
}
}
if err, ok := err.(cloud.CapacityError); ok && err.IsCapacityError() {
- capKey := it.ProviderType
- if !err.IsInstanceTypeSpecific() {
- // set capacity flag for all
- // instance types
+ var capKey interface{} = it.ProviderType
+ if err.IsInstanceTypeSpecific() {
+ capKey = it.ProviderType
+ } else if err.IsInstanceQuotaGroupSpecific() {
+ capKey = wp.instanceSet.InstanceQuotaGroup(it)
+ } else {
capKey = ""
}
if wp.atCapacityUntil == nil {
- wp.atCapacityUntil = map[string]time.Time{}
+ wp.atCapacityUntil = map[interface{}]time.Time{}
}
wp.atCapacityUntil[capKey] = time.Now().Add(capacityErrorTTL)
time.AfterFunc(capacityErrorTTL, wp.notify)
@@ -412,13 +416,14 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
func (wp *Pool) AtCapacity(it arvados.InstanceType) bool {
wp.mtx.Lock()
defer wp.mtx.Unlock()
- if t, ok := wp.atCapacityUntil[it.ProviderType]; ok && time.Now().Before(t) {
- // at capacity for this instance type
- return true
- }
- if t, ok := wp.atCapacityUntil[""]; ok && time.Now().Before(t) {
- // at capacity for all instance types
- return true
+ for _, capKey := range []interface{}{
+ "", // all instance types
+ wp.instanceSet.InstanceQuotaGroup(it), // instance quota group
+ it.ProviderType, // just this instance type
+ } {
+ if t, ok := wp.atCapacityUntil[capKey]; ok && time.Now().Before(t) {
+ return true
+ }
}
return false
}
@@ -1004,7 +1009,7 @@ func (wp *Pool) loadRunnerData() error {
}
wp.runnerData = buf
wp.runnerMD5 = md5.Sum(buf)
- wp.runnerCmd = fmt.Sprintf("/tmp/arvados-crunch-run/crunch-run~%x", wp.runnerMD5)
+ wp.runnerCmd = fmt.Sprintf("%s/crunch-run~%x", wp.runnerDeployDirectory, wp.runnerMD5)
return nil
}
diff --git a/lib/dispatchcloud/worker/pool_test.go b/lib/dispatchcloud/worker/pool_test.go
index 8d2ba09ebe..b1f0c59b2c 100644
--- a/lib/dispatchcloud/worker/pool_test.go
+++ b/lib/dispatchcloud/worker/pool_test.go
@@ -80,6 +80,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
instanceSetID := cloud.InstanceSetID("test-instance-set-id")
is, err := driver.InstanceSet(nil, instanceSetID, nil, suite.logger, nil)
c.Assert(err, check.IsNil)
+ defer is.Stop()
newExecutor := func(cloud.Instance) Executor {
return &stubExecutor{
@@ -159,6 +160,7 @@ func (suite *PoolSuite) TestDrain(c *check.C) {
driver := test.StubDriver{}
instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
+ defer instanceSet.Stop()
ac := arvados.NewClientFromEnv()
@@ -212,6 +214,7 @@ func (suite *PoolSuite) TestNodeCreateThrottle(c *check.C) {
driver := test.StubDriver{HoldCloudOps: true}
instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
+ defer instanceSet.Stop()
type1 := test.InstanceType(1)
pool := &Pool{
@@ -252,6 +255,7 @@ func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
driver := test.StubDriver{HoldCloudOps: true}
instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
+ defer instanceSet.Stop()
type1 := arvados.InstanceType{Name: "a1s", ProviderType: "a1.small", VCPUs: 1, RAM: 1 * GiB, Price: .01}
type2 := arvados.InstanceType{Name: "a2m", ProviderType: "a2.medium", VCPUs: 2, RAM: 2 * GiB, Price: .02}
@@ -377,6 +381,67 @@ func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
})
}
+func (suite *PoolSuite) TestInstanceQuotaGroup(c *check.C) {
+ driver := test.StubDriver{}
+ instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
+ c.Assert(err, check.IsNil)
+ defer instanceSet.Stop()
+
+ // Note the stub driver uses the first character of
+ // ProviderType as the instance family, so we have two
+ // instance families here, "a" and "b".
+ typeA1 := test.InstanceType(1)
+ typeA1.ProviderType = "a1"
+ typeA2 := test.InstanceType(2)
+ typeA2.ProviderType = "a2"
+ typeB3 := test.InstanceType(3)
+ typeB3.ProviderType = "b3"
+ typeB4 := test.InstanceType(4)
+ typeB4.ProviderType = "b4"
+
+ pool := &Pool{
+ logger: suite.logger,
+ newExecutor: func(cloud.Instance) Executor { return &stubExecutor{} },
+ cluster: suite.testCluster,
+ instanceSet: &throttledInstanceSet{InstanceSet: instanceSet},
+ instanceTypes: arvados.InstanceTypeMap{
+ typeA1.Name: typeA1,
+ typeA2.Name: typeA2,
+ typeB3.Name: typeB3,
+ typeB4.Name: typeB4,
+ },
+ }
+
+ // Arrange for the driver to fail when the pool calls
+ // instanceSet.Create().
+ driver.SetupVM = func(*test.StubVM) error { return test.CapacityError{InstanceQuotaGroupSpecific: true} }
+ // pool.Create() returns true when it starts a goroutine to
+ // call instanceSet.Create() in the background.
+ c.Check(pool.Create(typeA1), check.Equals, true)
+ // Wait for the pool to start reporting that the provider is
+ // at capacity for instance type A1.
+ for deadline := time.Now().Add(time.Second); !pool.AtCapacity(typeA1); time.Sleep(time.Millisecond) {
+ if time.Now().After(deadline) {
+ c.Fatal("timed out waiting for pool to report quota")
+ }
+ }
+
+ // The pool should now report AtCapacity for the affected
+ // instance family (A1, A2) and refuse to call
+ // instanceSet.Create() for those types -- but other types
+ // (B3, B4) are still usable.
+ driver.SetupVM = func(*test.StubVM) error { return nil }
+ c.Check(pool.AtCapacity(typeA1), check.Equals, true)
+ c.Check(pool.AtCapacity(typeA2), check.Equals, true)
+ c.Check(pool.AtCapacity(typeB3), check.Equals, false)
+ c.Check(pool.AtCapacity(typeB4), check.Equals, false)
+ c.Check(pool.Create(typeA2), check.Equals, false)
+ c.Check(pool.Create(typeB3), check.Equals, true)
+ c.Check(pool.Create(typeB4), check.Equals, true)
+ c.Check(pool.Create(typeA2), check.Equals, false)
+ c.Check(pool.Create(typeA1), check.Equals, false)
+}
+
func (suite *PoolSuite) instancesByType(pool *Pool, it arvados.InstanceType) []InstanceView {
var ivs []InstanceView
for _, iv := range pool.Instances() {
diff --git a/lib/install/arvados.service b/lib/install/arvados.service
index f536001f77..4877e00b69 100644
--- a/lib/install/arvados.service
+++ b/lib/install/arvados.service
@@ -17,7 +17,6 @@ ExecReload=/usr/bin/arvados-server config-check
ExecReload=kill -HUP $MAINPID
Restart=always
RestartSec=1
-LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
diff --git a/lib/install/deps.go b/lib/install/deps.go
index 9720a30d26..2b2b09e491 100644
--- a/lib/install/deps.go
+++ b/lib/install/deps.go
@@ -19,6 +19,7 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "slices"
"strconv"
"strings"
"syscall"
@@ -31,11 +32,11 @@ import (
var Command cmd.Handler = &installCommand{}
-const goversion = "1.20.6"
+const goversion = "1.24.1"
const (
defaultRubyVersion = "3.2.2"
- defaultBundlerVersion = "2.2.19"
+ defaultBundlerVersion = "~> 2.4.0"
defaultSingularityVersion = "3.10.4"
pjsversion = "1.9.8"
geckoversion = "0.24.0"
@@ -57,6 +58,7 @@ type installCommand struct {
SingularityVersion string
NodejsVersion string
EatMyData bool
+ UserAccount string
}
func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
@@ -84,6 +86,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
flags.StringVar(&inst.SingularityVersion, "singularity-version", defaultSingularityVersion, "Singularity `version` to install (do not override in production mode)")
flags.StringVar(&inst.NodejsVersion, "nodejs-version", defaultNodejsVersion, "Nodejs `version` to install (not applicable in production mode)")
flags.BoolVar(&inst.EatMyData, "eatmydata", false, "use eatmydata to speed up install")
+ flags.StringVar(&inst.UserAccount, "user-account", "", "Account to add to the docker group so it can run the test suite (not applicable in production mode)")
if ok, code := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
return code
@@ -123,7 +126,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
fmt.Fprintf(stderr, "invalid argument %q for -ruby-version\n", inst.RubyVersion)
return 2
}
- if ok, _ := regexp.MatchString(`^\d`, inst.BundlerVersion); !ok {
+ if ok, _ := regexp.MatchString(`^ *(|~>|[<>!=]=) *\d`, inst.BundlerVersion); !ok {
fmt.Fprintf(stderr, "invalid argument %q for -bundler-version\n", inst.BundlerVersion)
return 2
}
@@ -189,11 +192,10 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
"default-jdk-headless",
"default-jre-headless",
"gettext",
- "libattr1-dev",
+ "jq",
"libffi-dev",
"libfuse-dev",
"libgbm1", // cypress / workbench2 tests
- "libgnutls28-dev",
"libpam-dev",
"libpcre3-dev",
"libpq-dev",
@@ -203,10 +205,10 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
"libxslt1-dev",
"libyaml-dev",
"linkchecker",
+ "locales",
"lsof",
"make",
"net-tools",
- "pandoc",
"pkg-config",
"postgresql",
"postgresql-contrib",
@@ -222,6 +224,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
"r-cran-xml",
"rsync",
"sudo",
+ "unzip",
"uuid-dev",
"wget",
"xvfb",
@@ -235,10 +238,6 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
} else {
pkgs = append(pkgs, "firefox")
}
- if osv.Debian && osv.Major >= 11 {
- // not available in Debian <11
- pkgs = append(pkgs, "s3cmd")
- }
}
if dev || test {
pkgs = append(pkgs,
@@ -248,11 +247,11 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
"gnupg") // docker install recipe
}
switch {
- case osv.Debian && osv.Major >= 10,
- osv.Ubuntu && osv.Major >= 22:
+ case osv.Debian && osv.Major < 13,
+ osv.Ubuntu && osv.Major < 24:
pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev")
case osv.Debian || osv.Ubuntu:
- pkgs = append(pkgs, "g++", "libcurl3", "libcurl3-openssl-dev")
+ pkgs = append(pkgs, "g++", "libcurl4t64", "libcurl4-openssl-dev")
case osv.RedHat:
pkgs = append(pkgs, "gcc", "gcc-c++", "libcurl-devel", "postgresql-devel")
}
@@ -315,6 +314,34 @@ fi
err = fmt.Errorf("couldn't set fs.inotify.max_user_watches value. (Is this a docker container? Fix this on the docker host by adding fs.inotify.max_user_watches=524288 to /etc/sysctl.conf and running `sysctl -p`)")
return 1
}
+
+ if inst.UserAccount != "" {
+ dockergroup, err2 := user.LookupGroup("docker")
+ if err2 != nil {
+ err = fmt.Errorf("docker group lookup failed: %w", err2)
+ return 1
+ }
+ user, err2 := user.Lookup(inst.UserAccount)
+ if err2 != nil {
+ err = fmt.Errorf("user lookup failed: %q: %w", inst.UserAccount, err2)
+ return 1
+ }
+ gids, err2 := user.GroupIds()
+ if err2 != nil {
+ err = fmt.Errorf("group lookup for user %q failed: %w", inst.UserAccount, err2)
+ return 1
+ }
+ if slices.Index(gids, dockergroup.Gid) >= 0 {
+ logger.Printf("user %s (%s) is already a member of the docker group (%s)", inst.UserAccount, user.Uid, dockergroup.Gid)
+ } else {
+ logger.Printf("adding user %s (%s) to the docker group (%s)", inst.UserAccount, user.Uid, dockergroup.Gid)
+ out, err2 := exec.Command("adduser", inst.UserAccount, "docker").CombinedOutput()
+ if err2 != nil {
+ err = fmt.Errorf("error adding user %q to docker group: %w, %q", inst.UserAccount, err2, out)
+ return 1
+ }
+ }
+ }
}
os.Mkdir("/var/lib/arvados", 0755)
@@ -351,7 +378,7 @@ make install
if [[ "$rubyversion" > "3" ]]; then
/var/lib/arvados/bin/gem update --no-document --system 3.4.21
fi
-/var/lib/arvados/bin/gem install bundler:`+inst.BundlerVersion+` --no-document
+/var/lib/arvados/bin/gem install --conservative --no-document --version '`+inst.BundlerVersion+`' bundler
`, stdout, stderr)
if err != nil {
return 1
@@ -397,12 +424,11 @@ rm ${zip}
} else if dev || test {
err = inst.runBash(`
S=`+inst.SingularityVersion+`
-tmp=/var/lib/arvados/tmp/singularity
-trap "rm -r ${tmp}" ERR EXIT
-cd /var/lib/arvados/tmp
-git clone --recurse-submodules https://github.com/sylabs/singularity
-cd singularity
-git checkout v${S}
+tmp="$(mktemp --directory /var/lib/arvados/tmp/singularity-build.XXXXXX)"
+trap 'cd; rm -r "$tmp"' ERR EXIT
+cd "$tmp"
+curl -fL "https://github.com/sylabs/singularity/releases/download/v$S/singularity-ce-$S.tar.gz" |
+ tar -xz --strip-components=1
./mconfig --prefix=/var/lib/arvados
make -C ./builddir
make -C ./builddir install
@@ -772,7 +798,6 @@ rsync -a --delete-after "$tmp/build/" "$dst/"
{"lib/python/bin/arv-get"},
{"lib/python/bin/arv-keepdocker"},
{"lib/python/bin/arv-ls"},
- {"lib/python/bin/arv-migrate-docker19"},
{"lib/python/bin/arv-normalize"},
{"lib/python/bin/arv-put"},
{"lib/python/bin/arv-ws"},
@@ -901,8 +926,6 @@ func prodpkgs(osv osversion) []string {
"curl",
"fuse",
"git",
- "gitolite3",
- "graphviz",
"haveged",
"libcurl3-gnutls",
"libxslt1.1",
@@ -911,11 +934,6 @@ func prodpkgs(osv osversion) []string {
"sudo",
}
if osv.Debian || osv.Ubuntu {
- if osv.Debian && osv.Major == 8 {
- pkgs = append(pkgs, "libgnutls-deb0-28") // sdk/cwl
- } else if osv.Debian && osv.Major >= 10 || osv.Ubuntu && osv.Major >= 16 {
- pkgs = append(pkgs, "python3-distutils") // sdk/cwl
- }
return append(pkgs,
"mime-support", // keep-web
)
diff --git a/lib/install/deps_test.go b/lib/install/deps_test.go
index b9274b425c..9ab030f05a 100644
--- a/lib/install/deps_test.go
+++ b/lib/install/deps_test.go
@@ -25,13 +25,13 @@ tmp="` + tmp + `"
sourcepath="$(realpath ../..)"
(cd ${sourcepath} && go build -o ${tmp} ./cmd/arvados-server)
docker run -i --rm --workdir /arvados \
- -v ${tmp}/arvados-server:/arvados-server:ro \
- -v ${sourcepath}:/arvados:ro \
- -v /arvados/services/api/.bundle \
- -v /arvados/services/api/tmp \
+ --mount type=bind,src="${tmp}/arvados-server",dst=/arvados-server,readonly \
+ --mount type=bind,src="${sourcepath}",dst=/arvados,readonly \
+ --mount type=tmpfs,dst=/arvados/services/api/.bundle \
+ --mount type=tmpfs,dst=/arvados/services/api/tmp \
--env http_proxy \
--env https_proxy \
- debian:11 \
+ debian:bookworm \
bash -c "/arvados-server install -type test &&
git config --global --add safe.directory /arvados &&
/arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -shutdown -timeout 9m"
diff --git a/lib/install/init.go b/lib/install/init.go
index d9b74f6a06..12ffdd7af3 100644
--- a/lib/install/init.go
+++ b/lib/install/init.go
@@ -230,10 +230,6 @@ func (initcmd *initCommand) RunCommand(prog string, args []string, stdin io.Read
Keepbalance:
InternalURLs:
"http://0.0.0.0:9019/": {}
- GitHTTP:
- InternalURLs:
- "http://0.0.0.0:9005/": {}
- ExternalURL: {{printf "%q" ( print "https://" .Domain ":4445/" ) }}
DispatchCloud:
InternalURLs:
"http://0.0.0.0:9006/": {}
diff --git a/lib/lsf/dispatch.go b/lib/lsf/dispatch.go
index 897e5803f2..af5dc593b0 100644
--- a/lib/lsf/dispatch.go
+++ b/lib/lsf/dispatch.go
@@ -321,15 +321,15 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
"%M": fmt.Sprintf("%d", mem),
"%T": fmt.Sprintf("%d", tmp),
"%U": container.UUID,
- "%G": fmt.Sprintf("%d", container.RuntimeConstraints.CUDA.DeviceCount),
+ "%G": fmt.Sprintf("%d", container.RuntimeConstraints.GPU.DeviceCount),
"%W": fmt.Sprintf("%d", maxrunminutes),
}
re := regexp.MustCompile(`%.`)
var substitutionErrors string
argumentTemplate := disp.Cluster.Containers.LSF.BsubArgumentsList
- if container.RuntimeConstraints.CUDA.DeviceCount > 0 {
- argumentTemplate = append(argumentTemplate, disp.Cluster.Containers.LSF.BsubCUDAArguments...)
+ if container.RuntimeConstraints.GPU.DeviceCount > 0 {
+ argumentTemplate = append(argumentTemplate, disp.Cluster.Containers.LSF.BsubGPUArguments...)
}
for idx, a := range argumentTemplate {
if idx > 0 && (argumentTemplate[idx-1] == "-W" || argumentTemplate[idx-1] == "-We") && a == "%W" && maxrunminutes == 0 {
diff --git a/lib/lsf/dispatch_test.go b/lib/lsf/dispatch_test.go
index e1e0bcae31..547b017a70 100644
--- a/lib/lsf/dispatch_test.go
+++ b/lib/lsf/dispatch_test.go
@@ -38,10 +38,12 @@ type suite struct {
}
func (s *suite) TearDownTest(c *check.C) {
- arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil)
+ arvadostest.ResetDB(c)
}
func (s *suite) SetUpTest(c *check.C) {
+ arvadostest.ResetDB(c)
+
cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
c.Assert(err, check.IsNil)
cluster, err := cfg.GetCluster("")
@@ -55,8 +57,21 @@ func (s *suite) SetUpTest(c *check.C) {
VCPUs: 4,
IncludedScratch: 100 << 30,
Scratch: 100 << 30,
+ },
+ "biggest_available_node_with_gpu": arvados.InstanceType{
+ RAM: 100 << 30, // 100 GiB
+ VCPUs: 4,
+ IncludedScratch: 100 << 30,
+ Scratch: 100 << 30,
+ GPU: arvados.GPUFeatures{
+ Stack: "cuda",
+ DriverVersion: "11.0",
+ HardwareTarget: "8.0",
+ DeviceCount: 2,
+ VRAM: 8000000000,
+ },
}}
- s.disp = newHandler(context.Background(), cluster, arvadostest.Dispatch1Token, prometheus.NewRegistry()).(*dispatcher)
+ s.disp = newHandler(context.Background(), cluster, arvadostest.SystemRootToken, prometheus.NewRegistry()).(*dispatcher)
s.disp.lsfcli.stubCommand = func(string, ...string) *exec.Cmd {
return exec.Command("bash", "-c", "echo >&2 unimplemented stub; false")
}
@@ -98,12 +113,14 @@ func (s *suite) SetUpTest(c *check.C) {
err = arvados.NewClientFromEnv().RequestAndDecode(&s.crCUDARequest, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
"container_request": map[string]interface{}{
"runtime_constraints": arvados.RuntimeConstraints{
- RAM: 16000000,
- VCPUs: 1,
- CUDA: arvados.CUDARuntimeConstraints{
- DeviceCount: 1,
- DriverVersion: "11.0",
- HardwareCapability: "8.0",
+ RAM: 16000000000,
+ VCPUs: 4,
+ GPU: arvados.GPURuntimeConstraints{
+ Stack: "cuda",
+ DeviceCount: 1,
+ DriverVersion: "11.0",
+ HardwareTarget: []string{"8.0"},
+ VRAM: 8000000000,
},
},
"container_image": arvadostest.DockerImage112PDH,
@@ -208,12 +225,12 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
case s.crCUDARequest.ContainerUUID:
c.Check(args, check.DeepEquals, []string{
"-J", s.crCUDARequest.ContainerUUID,
- "-n", "1",
- "-D", "528MB",
- "-R", "rusage[mem=528MB:tmp=256MB] span[hosts=1]",
- "-R", "select[mem>=528MB]",
- "-R", "select[tmp>=256MB]",
- "-R", "select[ncpus>=1]",
+ "-n", "4",
+ "-D", "15515MB",
+ "-R", "rusage[mem=15515MB:tmp=15515MB] span[hosts=1]",
+ "-R", "select[mem>=15515MB]",
+ "-R", "select[tmp>=15515MB]",
+ "-R", "select[ncpus>=4]",
"-gpu", "num=1"})
mtx.Lock()
fakejobq[nextjobid] = args[1]
diff --git a/lib/mount/fs.go b/lib/mount/fs.go
index dece44d25d..9ee9663ae9 100644
--- a/lib/mount/fs.go
+++ b/lib/mount/fs.go
@@ -94,6 +94,28 @@ func (fs *keepFS) Create(path string, flags int, mode uint32) (errc int, fh uint
return 0, fs.newFH(f)
}
+func (fs *keepFS) Mknod(path string, mode uint32, dev uint64) int {
+ defer fs.debugPanics()
+ fs.debugOp("Mknod", path)
+ if filetype := mode & uint32(^os.ModePerm); filetype != 0 && filetype != uint32(fuse.S_IFREG) {
+ return -fuse.ENOSYS
+ }
+ if fs.ReadOnly {
+ _, err := fs.root.Stat(path)
+ if err != nil {
+ return -fuse.EROFS
+ } else {
+ return -fuse.EEXIST
+ }
+ }
+ f, err := fs.root.OpenFile(path, os.O_CREATE|os.O_EXCL, os.FileMode(mode)&os.ModePerm)
+ if err != nil {
+ return fs.errCode("Mknod", path, err)
+ }
+ f.Close()
+ return 0
+}
+
func (fs *keepFS) Open(path string, flags int) (errc int, fh uint64) {
defer fs.debugPanics()
fs.debugOp("Open", path)
diff --git a/lib/mount/fs_test.go b/lib/mount/fs_test.go
index 442af7a998..15324aa35a 100644
--- a/lib/mount/fs_test.go
+++ b/lib/mount/fs_test.go
@@ -5,47 +5,98 @@
package mount
import (
+ "os"
+ "syscall"
"testing"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/arvados/cgofuse/fuse"
- check "gopkg.in/check.v1"
+ . "gopkg.in/check.v1"
)
// Gocheck boilerplate
func Test(t *testing.T) {
- check.TestingT(t)
+ TestingT(t)
}
-var _ = check.Suite(&FSSuite{})
+var _ = Suite(&FSSuite{})
-type FSSuite struct{}
-
-func (*FSSuite) TestFuseInterface(c *check.C) {
- var _ fuse.FileSystemInterface = &keepFS{}
+type FSSuite struct {
+ fs *keepFS
}
-func (*FSSuite) TestOpendir(c *check.C) {
+func (s *FSSuite) SetUpTest(c *C) {
client := arvados.NewClientFromEnv()
ac, err := arvadosclient.New(client)
- c.Assert(err, check.IsNil)
+ c.Assert(err, IsNil)
kc, err := keepclient.MakeKeepClient(ac)
- c.Assert(err, check.IsNil)
-
- var fs fuse.FileSystemInterface = &keepFS{
+ c.Assert(err, IsNil)
+ s.fs = &keepFS{
Client: client,
KeepClient: kc,
Logger: ctxlog.TestLogger(c),
}
- fs.Init()
- errc, fh := fs.Opendir("/by_id")
- c.Check(errc, check.Equals, 0)
- c.Check(fh, check.Not(check.Equals), uint64(0))
- c.Check(fh, check.Not(check.Equals), invalidFH)
- errc, fh = fs.Opendir("/bogus")
- c.Check(errc, check.Equals, -fuse.ENOENT)
- c.Check(fh, check.Equals, invalidFH)
+ s.fs.Init()
+}
+
+func (s *FSSuite) TestFuseInterface(c *C) {
+ var _ fuse.FileSystemInterface = s.fs
+}
+
+func (s *FSSuite) TestOpendir(c *C) {
+ errc, fh := s.fs.Opendir("/by_id")
+ c.Check(errc, Equals, 0)
+ c.Check(fh, Not(Equals), uint64(0))
+ c.Check(fh, Not(Equals), invalidFH)
+ errc, fh = s.fs.Opendir("/bogus")
+ c.Check(errc, Equals, -fuse.ENOENT)
+ c.Check(fh, Equals, invalidFH)
+}
+
+func (s *FSSuite) TestMknod_ReadOnly(c *C) {
+ s.fs.ReadOnly = true
+ path := "/by_id/" + arvadostest.FooCollection + "/z"
+ errc := s.fs.Mknod(path, syscall.S_IFREG, 0)
+ c.Check(errc, Equals, -fuse.EROFS)
+}
+
+func (s *FSSuite) TestMknod(c *C) {
+ path := "/by_id/" + arvadostest.FooCollection + "/z"
+ _, err := s.fs.root.Stat(path)
+ c.Assert(err, Equals, os.ErrNotExist)
+
+ // Should return error if mode indicates unsupported file type
+ for _, mode := range []uint32{
+ syscall.S_IFCHR,
+ syscall.S_IFBLK,
+ syscall.S_IFIFO,
+ syscall.S_IFSOCK,
+ } {
+ errc := s.fs.Mknod(path, mode, 0)
+ c.Check(errc, Equals, -fuse.ENOSYS)
+ _, err := s.fs.root.Stat(path)
+ c.Check(err, Equals, os.ErrNotExist)
+ }
+
+ // Should create file and return 0 if mode indicates regular
+ // file
+ errc := s.fs.Mknod(path, syscall.S_IFREG|0o644, 0)
+ c.Check(errc, Equals, 0)
+ _, err = s.fs.root.Stat(path)
+ c.Check(err, IsNil)
+
+ // Special case: "Zero file type is equivalent to type
+ // S_IFREG." cf. mknod(2)
+ errc = s.fs.Mknod(path+"2", 0o644, 0)
+ c.Check(errc, Equals, 0)
+ _, err = s.fs.root.Stat(path + "2")
+ c.Check(err, IsNil)
+
+ // Should return error if target exists
+ errc = s.fs.Mknod(path, syscall.S_IFREG|0o644, 0)
+ c.Check(errc, Equals, -fuse.EEXIST)
}
diff --git a/lib/pam/docker_test.go b/lib/pam/docker_test.go
index 196cb97174..6ce596df2d 100644
--- a/lib/pam/docker_test.go
+++ b/lib/pam/docker_test.go
@@ -111,10 +111,10 @@ func (s *DockerSuite) runTestClient(c *check.C, args ...string) (stdout, stderr
"run", "--rm",
"--hostname", "testvm2.shell",
"--add-host", "zzzzz.arvadosapi.com:" + s.hostip,
- "-v", s.tmpdir + "/pam_arvados.so:/usr/lib/pam_arvados.so:ro",
- "-v", s.tmpdir + "/conffile:/usr/share/pam-configs/arvados:ro",
- "-v", s.tmpdir + "/testclient:/testclient:ro",
- "debian:bullseye",
+ "--mount", "type=bind,src=" + s.tmpdir + "/pam_arvados.so,dst=/usr/lib/pam_arvados.so,readonly",
+ "--mount", "type=bind,src=" + s.tmpdir + "/conffile,dst=/usr/share/pam-configs/arvados,readonly",
+ "--mount", "type=bind,src=" + s.tmpdir + "/testclient,dst=/testclient,readonly",
+ "debian:bookworm",
"/testclient"}, args...)...)
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
diff --git a/lib/service/cmd.go b/lib/service/cmd.go
index 82e95fe0b4..0e418e3564 100644
--- a/lib/service/cmd.go
+++ b/lib/service/cmd.go
@@ -80,9 +80,9 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
loader := config.NewLoader(stdin, log)
loader.SetupFlags(flags)
- // prog is [keepstore, keep-web, git-httpd, ...] but the
+ // prog is [keepstore, keep-web, ...] but the
// legacy config flags are [-legacy-keepstore-config,
- // -legacy-keepweb-config, -legacy-git-httpd-config, ...]
+ // -legacy-keepweb-config, ...]
legacyFlag := "-legacy-" + strings.Replace(prog, "keep-", "keep", 1) + "-config"
args = loader.MungeLegacyConfigArgs(log, args, legacyFlag)
@@ -126,6 +126,24 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
})
ctx := ctxlog.Context(c.ctx, logger)
+ // Check whether the caller is attempting to use environment
+ // variables to override cluster configuration, and advise
+ // that won't work.
+ {
+ envhost := os.Getenv("ARVADOS_API_HOST")
+ if envhost != "" && envhost != cluster.Services.Controller.ExternalURL.Host {
+ logger.Warn("ARVADOS_API_HOST environment variable is present, but will not be used")
+ }
+ envins := os.Getenv("ARVADOS_API_HOST_INSECURE")
+ if envins != "" && (envins != "0") != cluster.TLS.Insecure {
+ logger.Warn("ARVADOS_API_HOST_INSECURE environment variable is present, but will not be used")
+ }
+ envtoken := os.Getenv("ARVADOS_API_TOKEN")
+ if envtoken != "" && envtoken != cluster.SystemRootToken {
+ logger.Warn("ARVADOS_API_TOKEN environment variable is present, but will not be used")
+ }
+ }
+
listenURL, internalURL, err := getListenAddr(cluster.Services, c.svcName, log)
if err != nil {
return 1
@@ -333,11 +351,6 @@ func (c *command) requestPriority(req *http.Request, queued time.Time) int64 {
// to send feedback to dispatchcloud ASAP to stop
// bringing up new containers.
return httpserver.MinPriority
- case req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, "/arvados/v1/logs"):
- // "Create log entry" is the most harmless kind of
- // request to drop. Negative priority is called "low"
- // in aggregate metrics.
- return -1
case req.Header.Get("Origin") != "":
// Handle interactive requests first. Positive
// priority is called "high" in aggregate metrics.
diff --git a/lib/webdavfs/fs.go b/lib/webdavfs/fs.go
index eaa1a5a0c7..befcd3fac7 100644
--- a/lib/webdavfs/fs.go
+++ b/lib/webdavfs/fs.go
@@ -132,7 +132,7 @@ func (readEOF) Read(p []byte) (int, error) {
// First, it allows keep-web to use one locker for all collections
// even though coll1.vhost/foo and coll2.vhost/foo have the same path
// but represent different resources. Additionally, it returns valid
-// tokens (rfc2518 specifies that tokens are represented as URIs and
+// tokens (RFC 2518 specifies that tokens are represented as URIs and
// are unique across all resources for all time), which might improve
// client compatibility.
//
diff --git a/sdk/R/.gitignore b/sdk/R/.gitignore
new file mode 100644
index 0000000000..bb6235e92e
--- /dev/null
+++ b/sdk/R/.gitignore
@@ -0,0 +1,3 @@
+ArvadosR_*.tar.*
+man/
+R/Arvados.R
diff --git a/sdk/R/DESCRIPTION b/sdk/R/DESCRIPTION
index c6c01adebd..02bad411ce 100644
--- a/sdk/R/DESCRIPTION
+++ b/sdk/R/DESCRIPTION
@@ -1,16 +1,18 @@
Package: ArvadosR
Type: Package
Title: Arvados R SDK
-Version: 2.6.0
+Version: 3.1.2
Authors@R: c(person("Fuad", "Muhic", role = c("aut", "ctr"), email = "fmuhic@capeannenterprises.com"),
person("Peter", "Amstutz", role = c("cre"), email = "peter.amstutz@curii.com"),
person("Piotr", "Nowosielski", role = c("aut"), email = "piotr.nowosielski@contractors.roche.com"),
- person("Aneta", "Stanczyk", role = c("aut"), email = "aneta.stanczyk@contractors.roche.com"))
+ person("Aneta", "Stanczyk", role = c("aut"), email = "aneta.stanczyk@contractors.roche.com"),
+ person("Brett", "Smith", role = c("aut"), email = "brett.smith@curii.com"))
Description: This is the Arvados R SDK
URL: http://doc.arvados.org
License: Apache-2.0
Encoding: UTF-8
LazyData: true
+Roxygen: list(markdown = TRUE)
RoxygenNote: 7.2.3
Imports:
R6,
diff --git a/sdk/R/Makefile b/sdk/R/Makefile
new file mode 100644
index 0000000000..20d4b11871
--- /dev/null
+++ b/sdk/R/Makefile
@@ -0,0 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# NOTE: `R CMD check` (and by extension, the Arvados test suite)
+# will carp at you if you use GNU extensions.
+#
+
+ALL=R/Arvados.R man
+SDK_VERSION!=awk '($$1 == "Version:"){v=$$2} END {print v}' DESCRIPTION
+
+all: $(ALL)
+
+.PHONY: api
+api: R/Arvados.R
+R/Arvados.R: arvados-v1-discovery.json generateApi.R
+ Rscript --vanilla generateApi.R
+
+# Used by arvados/doc/Rakefile.
+# Check whether we can load libraries necessary to build the package.
+.PHONY: can_run
+can_run:
+ Rscript --vanilla -e "library(jsonlite); library(roxygen2);"
+
+.PHONY: clean
+clean:
+ rm -rf $(ALL) "ArvadosR_$(SDK_VERSION).tar.gz"
+
+.PHONY: install
+install:
+ R CMD INSTALL .
+
+man: R/Arvados.R R/*.R
+ Rscript --vanilla -e "library(roxygen2); roxygen2::roxygenize(clean=TRUE)"
+
+.PHONY: package
+package: "ArvadosR_$(SDK_VERSION).tar.gz"
+"ArvadosR_$(SDK_VERSION).tar.gz": $(ALL) [A-Z]* *.R tests/*.R tests/testthat/*.R tests/testthat/fakes/*.R
+ R CMD build .
+
+.PHONY: test
+test: $(ALL)
+ Rscript --vanilla run_test.R
diff --git a/sdk/R/NAMESPACE b/sdk/R/NAMESPACE
index 1cc676875a..2835b5091f 100644
--- a/sdk/R/NAMESPACE
+++ b/sdk/R/NAMESPACE
@@ -7,5 +7,4 @@ export(Arvados)
export(ArvadosFile)
export(Collection)
export(Subcollection)
-export(generateAPI)
export(listAll)
diff --git a/sdk/R/R/Arvados.R b/sdk/R/R/Arvados.R
deleted file mode 100644
index ed65d1fc4c..0000000000
--- a/sdk/R/R/Arvados.R
+++ /dev/null
@@ -1,3322 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-#' R6 Class Representing a Arvados
-#'
-#' @description
-#' Arvados class gives users ability to access Arvados REST API. It also allowes user to manipulate collections (and projects?)
-
-#' @export Arvados
-Arvados <- R6::R6Class(
-
- "Arvados",
-
- public = list(
-
- #' @description
- #' Initialize new enviroment.
- #' @param authToken ARVADOS_API_TOKEN from 'Get API Token' on Arvados.
- #' @param hostName ARVADOS_API_HOST from 'Get API Token' on Arvados.
- #' @param numRetries Specify number of times to retry failed service requests.
- #' @return A new `Arvados` object.
- #' @examples
- #' arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
- initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
- {
- if(!is.null(hostName))
- Sys.setenv(ARVADOS_API_HOST = hostName)
-
- if(!is.null(authToken))
- Sys.setenv(ARVADOS_API_TOKEN = authToken)
-
- hostName <- Sys.getenv("ARVADOS_API_HOST")
- token <- Sys.getenv("ARVADOS_API_TOKEN")
-
- if(hostName == "" | token == "")
- stop(paste("Please provide host name and authentification token",
- "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
- "environment variables."))
-
- private$token <- token
- private$host <- paste0("https://", hostName, "/arvados/v1/")
- private$numRetries <- numRetries
- private$REST <- RESTService$new(token, hostName,
- HttpRequest$new(), HttpParser$new(),
- numRetries)
-
- },
-
- #' @description
- #' project_exist enables checking if the project with such a UUID exist.
- #' @param uuid The UUID of a project or a file.
- #' @examples
- #' \dontrun{
- #' arv$project_exist(uuid = "projectUUID")
- #' }
- project_exist = function(uuid)
- {
- proj <- self$project_list(list(list("uuid", '=', uuid)))
- value <- length(proj$items)
-
- if (value == 1){
- cat(format('TRUE'))
- } else {
- cat(format('FALSE'))
- }
- },
-
- #' @description
- #' project_get returns the demanded project.
- #' @param uuid The UUID of the Group in question.
- #' @examples
- #' \dontrun{
- #' project <- arv$project_get(uuid = 'projectUUID')
- #' }
- project_get = function(uuid)
- {
- self$groups_get(uuid)
- },
-
- #' @description
- #' project_create creates a new project of a given name and description.
- #' @param name Name of the project.
- #' @param description Description of the project.
- #' @param ownerUUID The UUID of the maternal project to created one.
- #' @param properties List of the properties of the project.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @examples
- #' \dontrun{
- #' Properties <- list() # should contain a list of new properties to be added
- #' new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
- #' }
- project_create = function(name, description, ownerUUID, properties = NULL, ensureUniqueName = "false")
- {
- group <- list(name = name, description = description, owner_uuid = ownerUUID, properties = properties)
- group <- c("group_class" = "project", group)
- self$groups_create(group, ensureUniqueName = ensureUniqueName)
- },
-
- #' @description
- #' project_properties_set is a method defined in Arvados class that enables setting properties. Allows to set or overwrite the properties. In case there are set already it overwrites them.
- #' @param listProperties List of new properties.
- #' @param uuid The UUID of a project or a file.
- #' @examples
- #' \dontrun{
- #' Properties <- list() # should contain a list of new properties to be added
- #' arv$project_properties_set(Properties, uuid)
- #' }
- project_properties_set = function(listProperties, uuid)
- {
- group <- c("group_class" = "project", list("properties" = listProperties))
- self$groups_update(group, uuid)
-
- },
-
- #' @description
- #' project_properties_append is a method defined in Arvados class that enables appending properties. Allows to add new properties.
- #' @param properties List of new properties.
- #' @param uuid The UUID of a project or a file.
- #' @examples
- #' \dontrun{
- #' newProperties <- list() # should contain a list of new properties to be added
- #' arv$project_properties_append(properties = newProperties, uuid)
- #' }
- project_properties_append = function(properties, uuid)
- {
- proj <- self$project_list(list(list('uuid', '=', uuid)))
- projProp <- proj$items[[1]]$properties
-
- newListOfProperties <- c(projProp, properties)
- uniqueProperties <- unique(unlist(newListOfProperties))
- newListOfProperties <- suppressWarnings(newListOfProperties[which(newListOfProperties == uniqueProperties)])
-
- group <- c("group_class" = "project", list("properties" = newListOfProperties))
- self$groups_update(group, uuid);
-
- },
-
- #' @description
- #' project_properties_get is a method defined in Arvados class that returns properties.
- #' @param uuid The UUID of a project or a file.
- #' @examples
- #' \dontrun{
- #' arv$project_properties_get(projectUUID)
- #' }
- project_properties_get = function(uuid)
- {
- proj <- self$project_list(list(list('uuid', '=', uuid)))
- proj$items[[1]]$properties
- },
-
- #' @description
- #' project_properties_delete is a method defined in Arvados class that deletes list of properties.
- #' @param oneProp Property to be deleted.
- #' @param uuid The UUID of a project or a file.
- #' @examples
- #' \dontrun{
- #' Properties <- list() # should contain a list of new properties to be added
- #' arv$project_properties_delete(Properties, projectUUID)
- #' }
- project_properties_delete = function(oneProp, uuid)
- {
- proj <- self$project_list(list(list('uuid', '=', uuid))) # find project
- projProp <- proj$items[[1]]$properties
- for (i in 1:length(projProp)){
- solution <- identical(projProp[i],oneProp)
- if (solution == TRUE) {
- projProp <- projProp[names(projProp) != names(oneProp)]
- self$project_properties_set(projProp, uuid)
- }
- }
- },
-
- #' @description
- #' project_update enables updating project. New name, description and properties may be given.
- #' @param ... Feature to be updated (name, description, properties).
- #' @param uuid The UUID of a project in question.
- #' @examples
- #' \dontrun{
- #' newProperties <- list() # should contain a list of new properties to be added
- #' arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
- #' }
- project_update = function(..., uuid) {
- vec <- list(...)
- for (i in 1:length(vec))
- {
- if (names(vec[i]) == 'properties') {
- solution <- self$project_properties_append(vec$properties, uuid = uuid)
- }
- }
- vecNew <- vec[names(vec) != "properties"]
- vecNew <- c("group_class" = "project", vecNew)
- z <- self$groups_update(vecNew, uuid)
- },
-
- #' @description
- #' project_list enables listing project by its name, uuid, properties, permissions.
- #' @param filters
- #' @param where
- #' @param order
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param includeTrash Include items whose is_trashed attribute is true.
- #' @param uuid The UUID of a project in question.
- #' @param recursive Include contents from child groups recursively.
- #' @examples
- #' \dontrun{
- #' listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
- #' }
- project_list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- includeTrash = NULL)
- {
- filters[[length(filters) + 1]] <- list("group_class", "=", "project")
- self$groups_list(filters, where, order, select, distinct,
- limit, offset, count, includeTrash)
- },
-
- #' @description
- #' project_delete trashes project of a given uuid. It can be restored from trash or deleted permanently.
- #' @param uuid The UUID of the Group in question.
- #' @examples
- #' \dontrun{
- #' arv$project_delete(uuid = 'projectUUID')
- #' }
- project_delete = function(uuid)
- {
- self$groups_delete(uuid)
- },
-
- #' @description
- #' api_clients_get is a method defined in Arvados class.
- #' @param uuid The UUID of the apiClient in question.
- api_clients_get = function(uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_clients_create is a method defined in Arvados class.
- #' @param apiClient apiClient object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- api_clients_create = function(apiClient,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("api_clients")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(apiClient) > 0)
- body <- jsonlite::toJSON(list(apiClient = apiClient),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_clients_update is a method defined in Arvados class.
- #' @param apiClient apiClient object.
- #' @param uuid The UUID of the apiClient in question.
- api_clients_update = function(apiClient, uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(apiClient) > 0)
- body <- jsonlite::toJSON(list(apiClient = apiClient),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_clients_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the apiClient in question.
- api_clients_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_clients_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- api_clients_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("api_clients")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_get is a method defined in Arvados class.
- #' @param uuid The UUID of the apiClientAuthorization in question.
- api_client_authorizations_get = function(uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_create is a method defined in Arvados class.
- #' @param apiClientAuthorization apiClientAuthorization object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error on (ownerUUID, name) collision_
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- api_client_authorizations_create = function(apiClientAuthorization,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(apiClientAuthorization) > 0)
- body <- jsonlite::toJSON(list(apiClientAuthorization = apiClientAuthorization),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_update is a method defined in Arvados class.
- #' @param apiClientAuthorization apiClientAuthorization object.
- #' @param uuid The UUID of the apiClientAuthorization in question.
- api_client_authorizations_update = function(apiClientAuthorization, uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(apiClientAuthorization) > 0)
- body <- jsonlite::toJSON(list(apiClientAuthorization = apiClientAuthorization),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the apiClientAuthorization in question.
- api_client_authorizations_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_create_system_auth is a method defined in Arvados class.
- #' @param apiClientID
- #' @param scopes
- api_client_authorizations_create_system_auth = function(apiClientID = NULL, scopes = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/create_system_auth")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(apiClientID = apiClientID,
- scopes = scopes)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_current is a method defined in Arvados class.
- api_client_authorizations_current = function()
- {
- endPoint <- stringr::str_interp("api_client_authorizations/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' api_client_authorizations_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- api_client_authorizations_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' authorized_keys_get is a method defined in Arvados class.
- #' @param uuid The UUID of the authorizedKey in question.
- authorized_keys_get = function(uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' authorized_keys_create is a method defined in Arvados class.
- #' @param authorizedKey authorizedKey object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- authorized_keys_create = function(authorizedKey,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("authorized_keys")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(authorizedKey) > 0)
- body <- jsonlite::toJSON(list(authorizedKey = authorizedKey),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' authorized_keys_update is a method defined in Arvados class.
- #' @param authorizedKey authorizedKey object.
- #' @param uuid The UUID of the authorizedKey in question.
- authorized_keys_update = function(authorizedKey, uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(authorizedKey) > 0)
- body <- jsonlite::toJSON(list(authorizedKey = authorizedKey),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' authorized_keys_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the authorizedKey in question.
- authorized_keys_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' authorized_keys_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- authorized_keys_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("authorized_keys")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' collection <- arv$collections_get(uuid = collectionUUID)
- #' }
- collections_get = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_create is a method defined in Arvados class that enables collections creation.
- #' @param name Name of the collection.
- #' @param description Description of the collection.
- #' @param ownerUUID UUID of the maternal project to created one.
- #' @param properties Properties of the collection.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- #' @examples
- #' \dontrun{
- #' Properties <- list() # should contain a list of new properties to be added
- #' arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
- #' }
- collections_create = function(name, description, ownerUUID = NULL, properties = NULL, # name and description are obligatory
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("collections")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- collection <- list(name = name, description = description, owner_uuid = ownerUUID, properties = properties)
- if(length(collection) > 0)
- body <- jsonlite::toJSON(list(collection = collection),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors)){
- if(identical(sub('Entity:.*',"", resource$errors), "//railsapi.internal/arvados/v1/collections: 422 Unprocessable ")){
- resource <- cat(format("A collection with the given name already exists in this projects. If you want to update it use collections_update() instead"))
- }else{
- stop(resource$errors)
- }
- }
-
- resource
- },
-
- #' @description
- #' collections_update is a method defined in Arvados class.
- #' @param name New name of the collection.
- #' @param description New description of the collection.
- #' @param ownerUUID UUID of the maternal project to created one.
- #' @param properties New list of properties of the collection.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
- #' }
- collections_update = function(name, description, ownerUUID = NULL, properties = NULL, uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- collection <- list(name = name, description = description, ownerUUID = ownerUUID, properties = properties)
- if(length(collection) > 0)
- body <- jsonlite::toJSON(list(collection = collection),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' arv$collection_delete(collectionUUID)
- #' }
- collections_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_provenance is a method defined in Arvados class, it returns the collection by uuid.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' collection <- arv$collections_provenance(collectionUUID)
- #' }
- collections_provenance = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/provenance")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_used_by is a method defined in Arvados class, it returns collection by portable_data_hash.
- #' @param uuid The UUID of the Collection in question.
- collections_used_by = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/used_by")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_trash is a method defined in Arvados class, it moves collection to trash.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' arv$collections_trash(collectionUUID)
- #' }
- collections_trash = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/trash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_untrash is a method defined in Arvados class, it moves collection from trash to project.
- #' @param uuid The UUID of the Collection in question.
- #' @examples
- #' \dontrun{
- #' arv$collections_untrash(collectionUUID)
- #' }
- collections_untrash = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/untrash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' collections_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- #' @param includeTrash Include collections whose is_trashed attribute is true.
- #' @param includeOldVersions Include past collection versions.
- #' @examples
- #' \dontrun{
- #' collectionList <- arv$collections_list(list(list("name", "=", "Example")))
- #' }
- collections_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL,
- includeTrash = NULL, includeOldVersions = NULL)
- {
- endPoint <- stringr::str_interp("collections")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation,
- includeTrash = includeTrash, includeOldVersions = includeOldVersions)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_get = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_create is a method defined in Arvados class.
- #' @param container Container object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- containers_create = function(container, ensureUniqueName = "false",
- clusterID = NULL)
- {
- endPoint <- stringr::str_interp("containers")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(container) > 0)
- body <- jsonlite::toJSON(list(container = container),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_update is a method defined in Arvados class.
- #' @param container Container object.
- #' @param uuid The UUID of the Container in question.
- containers_update = function(container, uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(container) > 0)
- body <- jsonlite::toJSON(list(container = container),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_auth is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_auth = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/auth")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_lock is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_lock = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/lock")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_unlock is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_unlock = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/unlock")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_secret_mounts is a method defined in Arvados class.
- #' @param uuid The UUID of the Container in question.
- containers_secret_mounts = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/secret_mounts")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_current is a method defined in Arvados class.
- containers_current = function()
- {
- endPoint <- stringr::str_interp("containers/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' containers_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- containers_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("containers")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' container_requests_get is a method defined in Arvados class.
- #' @param uuid The UUID of the containerRequest in question.
- container_requests_get = function(uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' container_requests_create is a method defined in Arvados class.
- #' @param containerRequest containerRequest object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- container_requests_create = function(containerRequest,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("container_requests")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(containerRequest) > 0)
- body <- jsonlite::toJSON(list(containerRequest = containerRequest),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' container_requests_update is a method defined in Arvados class.
- #' @param containerRequest containerRequest object.
- #' @param uuid The UUID of the containerRequest in question.
- container_requests_update = function(containerRequest, uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(containerRequest) > 0)
- body <- jsonlite::toJSON(list(containerRequest = containerRequest),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' container_requests_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the containerRequest in question.
- container_requests_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' container_requests_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation bypass federation behavior, list items from local instance database only
- #' @param includeTrash Include container requests whose owner project is trashed.
- container_requests_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL,
- includeTrash = NULL)
- {
- endPoint <- stringr::str_interp("container_requests")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation,
- includeTrash = includeTrash)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Group in question.
- groups_get = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_create is a method defined in Arvados class that supports project creation.
- #' @param group Group object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- #' @param async Defer permissions update.
- groups_create = function(group, ensureUniqueName = "false",
- clusterID = NULL, async = "false")
- {
- endPoint <- stringr::str_interp("groups")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID, async = async)
-
- if(length(group) > 0)
- body <- jsonlite::toJSON(list(group = group),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
-
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors)){
- if (identical(sub('#.*', "", resource$errors), "//railsapi.internal/arvados/v1/groups: 422 Unprocessable Entity: ")) {
- #if (identical(sub('P.*', "", resource$errors), "//railsapi.internal/arvados/v1/groups: 422 Unprocessable Entity: #\u003cActiveRecord::RecordNotUnique: ")) {
- resource <- cat(format("Project of that name already exist. If you want to update it use project_update() instead"))
- }else{
- stop(resource$errors)
- }
- }
-
- return(resource)
- },
-
- #' @description
- #' groups_update is a method defined in Arvados class.
- #' @param group Group object.
- #' @param uuid The UUID of the Group in question.
- #' @param async Defer permissions update.
- groups_update = function(group, uuid, async = "false")
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- list(async = async)
-
- if(length(group) > 0)
- body <- jsonlite::toJSON(list(group = group),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Group in question.
- groups_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- dataTime <- gsub("T.*", "", resource$delete_at)
- cat("The content will be deleted permanently at", dataTime)
-
- resource
- },
-
- #' @description
- #' groups_contents is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- #' @param includeTrash Include items whose is_trashed attribute is true.
- #' @param uuid
- #' @param recursive Include contents from child groups recursively.
- #' @param include Include objects referred to by listed field in "included" (only ownerUUID).
- groups_contents = function(filters = NULL,
- where = NULL, order = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- clusterID = NULL, bypassFederation = NULL,
- includeTrash = NULL, uuid = NULL, recursive = NULL,
- include = NULL)
- {
- endPoint <- stringr::str_interp("groups/contents")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- list(filters = filters, where = where,
- order = order, distinct = distinct, limit = limit,
- offset = offset, count = count, clusterID = clusterID,
- bypassFederation = bypassFederation, includeTrash = includeTrash,
- uuid = uuid, recursive = recursive, include = include)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_shared is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- #' @param includeTrash Include items whose is_trashed attribute is true.
- #' @param include
- groups_shared = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL,
- includeTrash = NULL, include = NULL)
- {
- endPoint <- stringr::str_interp("groups/shared")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation,
- includeTrash = includeTrash, include = include)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_trash is a method defined in Arvados class.
- #' @param uuid The UUID of the Group in question.
- groups_trash = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}/trash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_untrash is a method defined in Arvados class.
- #' @param uuid The UUID of the Group in question.
- groups_untrash = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}/untrash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' groups_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- #' @param includeTrash Include items whose is_trashed attribute is true.
- groups_list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- clusterID = NULL, bypassFederation = NULL,
- includeTrash = NULL)
- {
- endPoint <- stringr::str_interp("groups")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
-
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation,
- includeTrash = includeTrash)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_get is a method defined in Arvados class.
- #' @param uuid The UUID of the keepService in question.
- keep_services_get = function(uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_create is a method defined in Arvados class.
- #' @param keepService keepService object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- keep_services_create = function(keepService,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("keep_services")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(keepService) > 0)
- body <- jsonlite::toJSON(list(keepService = keepService),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_update is a method defined in Arvados class.
- #' @param keepService keepService object.
- #' @param uuid The UUID of the keepService in question.
- keep_services_update = function(keepService, uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(keepService) > 0)
- body <- jsonlite::toJSON(list(keepService = keepService),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the keepService in question.
- keep_services_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_accessible is a method defined in Arvados class.
- keep_services_accessible = function()
- {
- endPoint <- stringr::str_interp("keep_services/accessible")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' keep_services_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- keep_services_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("keep_services")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' project_permission_give is a method defined in Arvados class that enables sharing files with another users.
- #' @param type Possible options are can_read or can_write or can_manage.
- #' @param uuid The UUID of a project or a file.
- #' @param user The UUID of the person that gets the permission.
- #' @examples
- #' \dontrun{
- #' arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
- #' }
- project_permission_give = function(type, uuid, user)
- {
- endPoint <- stringr::str_interp("links")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- # it is possible to make it as pasting a list to function, not a 3 arg. What's better?
- link <- list("link_class" = "permission", "name" = type, "head_uuid" = uuid, "tail_uuid" = user)
-
- if(length(link) > 0)
- body <- jsonlite::toJSON(list(link = link),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' project_permission_refuse is a method defined in Arvados class that unables sharing files with another users.
- #' @param type Possible options are can_read or can_write or can_manage.
- #' @param uuid The UUID of a project or a file.
- #' @param user The UUID of a person that permissions are taken from.
- #' @examples
- #' \dontrun{
- #' arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
- #' }
- project_permission_refuse = function(type, uuid, user)
- {
- examples <- self$links_list(list(list("head_uuid","=", uuid)))
-
- theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
- theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == type)]
- solution <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
-
- if (length(solution) == 0) {
- cat(format('No permission granted'))
- } else {
- self$links_delete(solution[[1]]$uuid)
- }
-
- },
-
- #' @description
- #' project_permission_update is a method defined in Arvados class that enables updating permissions.
- #' @param typeNew New option like can_read or can_write or can_manage.
- #' @param typeOld Old option.
- #' @param uuid The UUID of a project or a file.
- #' @param user The UUID of the person that the permission is being updated.
- #' @examples
- #' \dontrun{
- #' arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
- #' }
- project_permission_update = function(typeOld, typeNew, uuid, user)
- {
- link <- list("name" = typeNew)
-
- examples <- self$links_list(list(list("head_uuid","=", uuid)))
-
- theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
- theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == typeOld)]
- solution <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
-
- if (length(solution) == 0) {
- cat(format('No permission granted'))
- } else {
- self$links_update(link, solution[[1]]$uuid)
- }
- },
-
- #' @description
- #' project_permission_check is a method defined in Arvados class that enables checking file permissions.
- #' @param uuid The UUID of a project or a file.
- #' @param user The UUID of the person that the permission is being updated.
- #' @param type Possible options are can_read or can_write or can_manage.
- #' @examples
- #' \dontrun{
- #' arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
- #' }
- project_permission_check = function(uuid, user, type = NULL)
- {
- examples <- self$links_list(list(list("head_uuid","=", uuid)))
-
- theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
-
- if (length(type) == 0 ){
- theUser
- } else {
- theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == type)]
- permisions <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
- print(permisions[[1]]$name)
- }
- },
-
- #' @description
- #' links_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Link in question.
- links_get = function(uuid)
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' links_create is a method defined in Arvados class.
- #' @param link Link object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- links_create = function(link, ensureUniqueName = "false",
- clusterID = NULL)
- {
- endPoint <- stringr::str_interp("links")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(link) > 0)
- body <- jsonlite::toJSON(list(link = link),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' links_update is a method defined in Arvados class.
- #' @param link Link object.
- #' @param uuid The UUID of the Link in question.
- links_update = function(link, uuid, async = "false")
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(async = async)
-
- if(length(link) > 0)
- body <- jsonlite::toJSON(list(link = link),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' links_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Link in question.
- links_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' links_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- links_list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("links")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' links_get_permissions is a method defined in Arvados class.
- #' @param uuid The UUID of the Log in question.
- links_get_permissions = function(uuid)
- {
- endPoint <- stringr::str_interp("permissions/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' logs_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Log in question.
- logs_get = function(uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' logs_create is a method defined in Arvados class.
- #' @param log Log object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- logs_create = function(log, ensureUniqueName = "false",
- clusterID = NULL)
- {
- endPoint <- stringr::str_interp("logs")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(log) > 0)
- body <- jsonlite::toJSON(list(log = log),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' logs_update is a method defined in Arvados class.
- #' @param log Log object.
- #' @param uuid The UUID of the Log in question.
- logs_update = function(log, uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(log) > 0)
- body <- jsonlite::toJSON(list(log = log),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' logs_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Log in question.
- logs_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' logs_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- logs_list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("logs")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_get is a method defined in Arvados class.
- #' @param uuid The UUID of the User in question.
- users_get = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_create is a method defined in Arvados class.
- #' @param user User object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- users_create = function(user, ensureUniqueName = "false",
- clusterID = NULL)
- {
- endPoint <- stringr::str_interp("users")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(user) > 0)
- body <- jsonlite::toJSON(list(user = user),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_update is a method defined in Arvados class.
- #' @param user User object.
- #' @param uuid The UUID of the User in question.
- #' @param bypassFederation
- users_update = function(user, uuid, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(bypassFederation = bypassFederation)
-
- if(length(user) > 0)
- body <- jsonlite::toJSON(list(user = user),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the User in question.
- users_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_current is a method defined in Arvados class.
- users_current = function()
- {
- endPoint <- stringr::str_interp("users/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_system is a method defined in Arvados class.
- users_system = function()
- {
- endPoint <- stringr::str_interp("users/system")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_activate is a method defined in Arvados class.
- #' @param uuid The UUID of the User in question.
- users_activate = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}/activate")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_setup is a method defined in Arvados class.
- #' @param uuid
- #' @param user
- #' @param repo_name
- #' @param vm_uuid
- #' @param send_notification_email
- users_setup = function(uuid = NULL, user = NULL,
- repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
- {
- endPoint <- stringr::str_interp("users/setup")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(uuid = uuid, user = user,
- repo_name = repo_name, vm_uuid = vm_uuid,
- send_notification_email = send_notification_email)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_unsetup is a method defined in Arvados class.
- #' @param uuid The UUID of the User in question.
- users_unsetup = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}/unsetup")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_merge is a method defined in Arvados class.
- #' @param newOwnerUUID
- #' @param newUserToken
- #' @param redirectToNewUser
- #' @param oldUserUUID
- #' @param newUserUUID
- users_merge = function(newOwnerUUID, newUserToken = NULL,
- redirectToNewUser = NULL, oldUserUUID = NULL,
- newUserUUID = NULL)
- {
- endPoint <- stringr::str_interp("users/merge")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(newOwnerUUID = newOwnerUUID,
- newUserToken = newUserToken, redirectToNewUser = redirectToNewUser,
- oldUserUUID = oldUserUUID, newUserUUID = newUserUUID)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' users_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- users_list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("users")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Repository in question.
- repositories_get = function(uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_create is a method defined in Arvados class.
- #' @param repository Repository object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- repositories_create = function(repository,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("repositories")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(repository) > 0)
- body <- jsonlite::toJSON(list(repository = repository),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_update is a method defined in Arvados class.
- #' @param repository Repository object.
- #' @param uuid The UUID of the Repository in question.
- repositories_update = function(repository, uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(repository) > 0)
- body <- jsonlite::toJSON(list(repository = repository),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Repository in question.
- repositories_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_get_all_permissions is a method defined in Arvados class.
- repositories_get_all_permissions = function()
- {
- endPoint <- stringr::str_interp("repositories/get_all_permissions")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' repositories_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- repositories_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("repositories")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_get is a method defined in Arvados class.
- #' @param uuid The UUID of the virtualMachine in question.
- virtual_machines_get = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_create is a method defined in Arvados class.
- #' @param virtualMachine virtualMachine object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- virtual_machines_create = function(virtualMachine,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("virtual_machines")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(virtualMachine) > 0)
- body <- jsonlite::toJSON(list(virtualMachine = virtualMachine),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_update is a method defined in Arvados class.
- #' @param virtualMachine virtualMachine object.
- #' @param uuid The UUID of the virtualMachine in question.
- virtual_machines_update = function(virtualMachine, uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(virtualMachine) > 0)
- body <- jsonlite::toJSON(list(virtualMachine = virtualMachine),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the virtualMachine in question.
- virtual_machines_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_logins is a method defined in Arvados class.
- #' @param uuid The UUID of the virtualMachine in question.
- virtual_machines_logins = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}/logins")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_get_all_logins is a method defined in Arvados class.
- virtual_machines_get_all_logins = function()
- {
- endPoint <- stringr::str_interp("virtual_machines/get_all_logins")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' virtual_machines_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation bypass federation behavior, list items from local instance database only
- virtual_machines_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("virtual_machines")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' workflows_get is a method defined in Arvados class.
- #' @param uuid The UUID of the Workflow in question.
- workflows_get = function(uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' workflows_create is a method defined in Arvados class.
- #' @param workflow Workflow object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- workflows_create = function(workflow, ensureUniqueName = "false",
- clusterID = NULL)
- {
- endPoint <- stringr::str_interp("workflows")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(workflow) > 0)
- body <- jsonlite::toJSON(list(workflow = workflow),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' workflows_update is a method defined in Arvados class.
- #' @param workflow Workflow object.
- #' @param uuid The UUID of the Workflow in question.
- workflows_update = function(workflow, uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(workflow) > 0)
- body <- jsonlite::toJSON(list(workflow = workflow),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' workflows_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the Workflow in question.
- workflows_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' workflows_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- workflows_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("workflows")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_get is a method defined in Arvados class.
- #' @param uuid The UUID of the userAgreement in question.
- user_agreements_get = function(uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_create is a method defined in Arvados class.
- #' @param userAgreement userAgreement object.
- #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
- #' @param clusterID Create object on a remote federated cluster instead of the current one.
- user_agreements_create = function(userAgreement,
- ensureUniqueName = "false", clusterID = NULL)
- {
- endPoint <- stringr::str_interp("user_agreements")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensureUniqueName = ensureUniqueName,
- clusterID = clusterID)
-
- if(length(userAgreement) > 0)
- body <- jsonlite::toJSON(list(userAgreement = userAgreement),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_update is a method defined in Arvados class.
- #' @param userAgreement userAgreement object.
- #' @param uuid The UUID of the userAgreement in question.
- user_agreements_update = function(userAgreement, uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(userAgreement) > 0)
- body <- jsonlite::toJSON(list(userAgreement = userAgreement),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_delete is a method defined in Arvados class.
- #' @param uuid The UUID of the userAgreement in question.
- user_agreements_delete = function(uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_signatures is a method defined in Arvados class.
- user_agreements_signatures = function()
- {
- endPoint <- stringr::str_interp("user_agreements/signatures")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_sign is a method defined in Arvados class.
- user_agreements_sign = function()
- {
- endPoint <- stringr::str_interp("user_agreements/sign")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_list is a method defined in Arvados class.
- #' @param filters
- #' @param where
- #' @param order
- #' @param select
- #' @param distinct
- #' @param limit
- #' @param offset
- #' @param count
- #' @param clusterID List objects on a remote federated cluster instead of the current one.
- #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
- user_agreements_list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", clusterID = NULL, bypassFederation = NULL)
- {
- endPoint <- stringr::str_interp("user_agreements")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- clusterID = clusterID, bypassFederation = bypassFederation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' user_agreements_new is a method defined in Arvados class.
- user_agreements_new = function()
- {
- endPoint <- stringr::str_interp("user_agreements/new")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- #' @description
- #' configs_get is a method defined in Arvados class.
- configs_get = function()
- {
- endPoint <- stringr::str_interp("config")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL=
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- getHostName = function() private$host,
- getToken = function() private$token,
- setRESTService = function(newREST) private$REST <- newREST,
- getRESTService = function() private$REST
- ),
-
- private = list(
-
- token = NULL,
- host = NULL,
- REST = NULL,
- numRetries = NULL
- ),
-
- cloneable = FALSE
-)
-
-
diff --git a/sdk/R/R/ArvadosR.R b/sdk/R/R/ArvadosR.R
index 00b068c28a..34594382cf 100644
--- a/sdk/R/R/ArvadosR.R
+++ b/sdk/R/R/ArvadosR.R
@@ -14,7 +14,8 @@
#' \item Fuad Muhic
#' \item Peter Amstutz
#' \item Aneta Stanczyk
-#' \item Piotr Nowosielski}
+#' \item Piotr Nowosielski
+#' \item Brett Smith}
#'
#' @seealso \itemize{
#' \item https://arvados.org
diff --git a/sdk/R/R/RESTService.R b/sdk/R/R/RESTService.R
index 5cbcb65f75..cd6157e08d 100644
--- a/sdk/R/R/RESTService.R
+++ b/sdk/R/R/RESTService.R
@@ -63,7 +63,7 @@ RESTService <- R6::R6Class(
{
fileURL <- paste0(self$getWebDavHostName(), "c=",
uuid, "/", relativePath);
- headers <- list(Authorization = paste("OAuth2", self$token))
+ headers <- list(Authorization = paste("Bearer", self$token))
serverResponse <- self$http$exec("DELETE", fileURL, headers,
retryTimes = self$numRetries)
@@ -80,7 +80,7 @@ RESTService <- R6::R6Class(
fromURL <- paste0(collectionURL, from)
toURL <- paste0(collectionURL, trimFromStart(to, "/"))
- headers <- list("Authorization" = paste("OAuth2", self$token),
+ headers <- list("Authorization" = paste("Bearer", self$token),
"Destination" = toURL)
serverResponse <- self$http$exec("MOVE", fromURL, headers,
@@ -98,7 +98,7 @@ RESTService <- R6::R6Class(
fromURL <- paste0(collectionURL, from)
toURL <- paste0(collectionURL, trimFromStart(to, "/"))
- headers <- list("Authorization" = paste("OAuth2", self$token),
+ headers <- list("Authorization" = paste("Bearer", self$token),
"Destination" = toURL)
serverResponse <- self$http$exec("COPY", fromURL, headers,
@@ -137,7 +137,7 @@ RESTService <- R6::R6Class(
subcollectionURL <- paste0(collectionURL, "/", relativePath);
- headers <- list("Authorization" = paste("OAuth2", self$token))
+ headers <- list("Authorization" = paste("Bearer", self$token))
response <- self$http$exec("PROPFIND", subcollectionURL, headers,
retryTimes = self$numRetries)
@@ -165,11 +165,11 @@ RESTService <- R6::R6Class(
if(offset == 0 && length == 0)
{
- headers <- list(Authorization = paste("OAuth2", self$token))
+ headers <- list(Authorization = paste("Bearer", self$token))
}
else
{
- headers <- list(Authorization = paste("OAuth2", self$token),
+ headers <- list(Authorization = paste("Bearer", self$token),
Range = range)
}
@@ -189,7 +189,7 @@ RESTService <- R6::R6Class(
{
fileURL <- paste0(self$getWebDavHostName(),
"c=", uuid, "/", relativePath);
- headers <- list(Authorization = paste("OAuth2", self$token),
+ headers <- list(Authorization = paste("Bearer", self$token),
"Content-Type" = contentType)
body <- content
@@ -206,7 +206,7 @@ RESTService <- R6::R6Class(
{
fileURL <- paste0(self$getWebDavHostName(),
"c=", uuid, "/", relativePath);
- headers <- list(Authorization = paste("OAuth2", self$token))
+ headers <- list(Authorization = paste("Bearer", self$token))
conn <- self$http$getConnection(fileURL, headers, openMode)
}
@@ -221,7 +221,7 @@ RESTService <- R6::R6Class(
{
fileURL <- paste0(self$getWebDavHostName(), "c=",
uuid, "/", relativePath)
- headers <- list(Authorization = paste("OAuth2", self$token),
+ headers <- list(Authorization = paste("Bearer", self$token),
"Content-Type" = contentType)
body <- NULL
diff --git a/sdk/R/R/autoGenAPI.R b/sdk/R/R/autoGenAPI.R
deleted file mode 100644
index fbf58c2f51..0000000000
--- a/sdk/R/R/autoGenAPI.R
+++ /dev/null
@@ -1,587 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-getAPIDocument <- function(){
- url <- "https://jutro.arvadosapi.com/discovery/v1/apis/arvados/v1/rest"
- serverResponse <- httr::RETRY("GET", url = url)
-
- httr::content(serverResponse, as = "parsed", type = "application/json")
-}
-
-#' generateAPI
-#'
-#' Autogenerate classes to interact with Arvados from the Arvados discovery document.
-#'
-#' @export
-generateAPI <- function()
-{
- #TODO: Consider passing discovery document URL as parameter.
- #TODO: Consider passing location where to create new files.
- discoveryDocument <- getAPIDocument()
-
- methodResources <- discoveryDocument$resources
-
- # Don't emit deprecated APIs
- methodResources <- methodResources[!(names(methodResources) %in% c("jobs", "job_tasks", "pipeline_templates", "pipeline_instances",
- "keep_disks", "nodes", "humans", "traits", "specimens"))]
- resourceNames <- names(methodResources)
-
- methodDoc <- genMethodsDoc(methodResources, resourceNames)
- classDoc <- genAPIClassDoc(methodResources, resourceNames)
- arvadosAPIHeader <- genAPIClassHeader()
- arvadosProjectMethods <- genProjectMethods()
- arvadosClassMethods <- genClassContent(methodResources, resourceNames)
- arvadosAPIFooter <- genAPIClassFooter()
-
- arvadosClass <- c(methodDoc,
- classDoc,
- arvadosAPIHeader,
- arvadosProjectMethods,
- arvadosClassMethods,
- arvadosAPIFooter)
-
- fileConn <- file("./R/Arvados.R", "w")
- writeLines(c(
- "# Copyright (C) The Arvados Authors. All rights reserved.",
- "#",
- "# SPDX-License-Identifier: Apache-2.0", ""), fileConn)
- writeLines(unlist(arvadosClass), fileConn)
- close(fileConn)
- NULL
-}
-
-genAPIClassHeader <- function()
-{
- c("Arvados <- R6::R6Class(",
- "",
- "\t\"Arvados\",",
- "",
- "\tpublic = list(",
- "",
- "\t\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)",
- "\t\t{",
- "\t\t\tif(!is.null(hostName))",
- "\t\t\t\tSys.setenv(ARVADOS_API_HOST = hostName)",
- "",
- "\t\t\tif(!is.null(authToken))",
- "\t\t\t\tSys.setenv(ARVADOS_API_TOKEN = authToken)",
- "",
- "\t\t\thostName <- Sys.getenv(\"ARVADOS_API_HOST\")",
- "\t\t\ttoken <- Sys.getenv(\"ARVADOS_API_TOKEN\")",
- "",
- "\t\t\tif(hostName == \"\" | token == \"\")",
- "\t\t\t\tstop(paste(\"Please provide host name and authentification token\",",
- "\t\t\t\t\t\t \"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\",",
- "\t\t\t\t\t\t \"environment variables.\"))",
- "",
- "\t\t\tprivate$token <- token",
- "\t\t\tprivate$host <- paste0(\"https://\", hostName, \"/arvados/v1/\")",
- "\t\t\tprivate$numRetries <- numRetries",
- "\t\t\tprivate$REST <- RESTService$new(token, hostName,",
- "\t\t\t HttpRequest$new(), HttpParser$new(),",
- "\t\t\t numRetries)",
- "",
- "\t\t},\n")
-}
-
-genProjectMethods <- function()
-{
- c("\t\tprojects.get = function(uuid)",
- "\t\t{",
- "\t\t\tself$groups.get(uuid)",
- "\t\t},",
- "",
- "\t\tprojects.create = function(group, ensure_unique_name = \"false\")",
- "\t\t{",
- "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
- "\t\t\tself$groups.create(group, ensure_unique_name)",
- "\t\t},",
- "",
- "\t\tprojects.update = function(group, uuid)",
- "\t\t{",
- "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
- "\t\t\tself$groups.update(group, uuid)",
- "\t\t},",
- "",
- "\t\tprojects.list = function(filters = NULL, where = NULL,",
- "\t\t\torder = NULL, select = NULL, distinct = NULL,",
- "\t\t\tlimit = \"100\", offset = \"0\", count = \"exact\",",
- "\t\t\tinclude_trash = NULL)",
- "\t\t{",
- "\t\t\tfilters[[length(filters) + 1]] <- list(\"group_class\", \"=\", \"project\")",
- "\t\t\tself$groups.list(filters, where, order, select, distinct,",
- "\t\t\t limit, offset, count, include_trash)",
- "\t\t},",
- "",
- "\t\tprojects.delete = function(uuid)",
- "\t\t{",
- "\t\t\tself$groups.delete(uuid)",
- "\t\t},",
- "")
-}
-
-genClassContent <- function(methodResources, resourceNames)
-{
- arvadosMethods <- Map(function(resource, resourceName)
- {
- methodNames <- names(resource$methods)
-
- functions <- Map(function(methodMetaData, methodName)
- {
- #NOTE: Index, show and destroy are aliases for the preferred names
- # "list", "get" and "delete". Until they are removed from discovery
- # document we will filter them here.
- if(methodName %in% c("index", "show", "destroy"))
- return(NULL)
-
- methodName <- paste0(resourceName, ".", methodName)
- createMethod(methodName, methodMetaData)
-
- }, resource$methods, methodNames)
-
- unlist(unname(functions))
-
- }, methodResources, resourceNames)
-
- arvadosMethods
-}
-
-genAPIClassFooter <- function()
-{
- c("\t\tgetHostName = function() private$host,",
- "\t\tgetToken = function() private$token,",
- "\t\tsetRESTService = function(newREST) private$REST <- newREST,",
- "\t\tgetRESTService = function() private$REST",
- "\t),",
- "",
- "\tprivate = list(",
- "",
- "\t\ttoken = NULL,",
- "\t\thost = NULL,",
- "\t\tREST = NULL,",
- "\t\tnumRetries = NULL",
- "\t),",
- "",
- "\tcloneable = FALSE",
- ")")
-}
-
-createMethod <- function(name, methodMetaData)
-{
- args <- getMethodArguments(methodMetaData)
- signature <- getMethodSignature(name, args)
- body <- getMethodBody(methodMetaData)
-
- c(signature,
- "\t\t{",
- body,
- "\t\t},\n")
-}
-
-getMethodArguments <- function(methodMetaData)
-{
- request <- methodMetaData$request
- requestArgs <- NULL
-
- if(!is.null(request))
- {
- resourceName <- tolower(request$properties[[1]][[1]])
-
- if(request$required)
- requestArgs <- resourceName
- else
- requestArgs <- paste(resourceName, "=", "NULL")
- }
-
- argNames <- names(methodMetaData$parameters)
-
- args <- sapply(argNames, function(argName)
- {
- arg <- methodMetaData$parameters[[argName]]
-
- if(!arg$required)
- {
- if(!is.null(arg$default))
- return(paste0(argName, " = ", "\"", arg$default, "\""))
- else
- return(paste(argName, "=", "NULL"))
- }
-
- argName
- })
-
- c(requestArgs, args)
-}
-
-getMethodSignature <- function(methodName, args)
-{
- collapsedArgs <- paste0(args, collapse = ", ")
- lineLengthLimit <- 40
-
- if(nchar(collapsedArgs) > lineLengthLimit)
- {
- return(paste0("\t\t",
- formatArgs(paste(methodName, "= function("),
- "\t", args, ")", lineLengthLimit)))
- }
- else
- {
- return(paste0("\t\t", methodName, " = function(", collapsedArgs, ")"))
- }
-}
-
-getMethodBody <- function(methodMetaData)
-{
- url <- getRequestURL(methodMetaData)
- headers <- getRequestHeaders()
- requestQueryList <- getRequestQueryList(methodMetaData)
- requestBody <- getRequestBody(methodMetaData)
- request <- getRequest(methodMetaData)
- response <- getResponse(methodMetaData)
- errorCheck <- getErrorCheckingCode()
- returnStatement <- getReturnObject()
-
- body <- c(url,
- headers,
- requestQueryList, "",
- requestBody, "",
- request, response, "",
- errorCheck, "",
- returnStatement)
-
- paste0("\t\t\t", body)
-}
-
-getRequestURL <- function(methodMetaData)
-{
- endPoint <- methodMetaData$path
- endPoint <- stringr::str_replace_all(endPoint, "\\{", "${")
- url <- c(paste0("endPoint <- stringr::str_interp(\"", endPoint, "\")"),
- paste0("url <- paste0(private$host, endPoint)"))
- url
-}
-
-getRequestHeaders <- function()
-{
- c("headers <- list(Authorization = paste(\"Bearer\", private$token), ",
- " \"Content-Type\" = \"application/json\")")
-}
-
-getRequestQueryList <- function(methodMetaData)
-{
- queryArgs <- names(Filter(function(arg) arg$location == "query",
- methodMetaData$parameters))
-
- if(length(queryArgs) == 0)
- return("queryArgs <- NULL")
-
- queryArgs <- sapply(queryArgs, function(arg) paste0(arg, " = ", arg))
- collapsedArgs <- paste0(queryArgs, collapse = ", ")
-
- lineLengthLimit <- 40
-
- if(nchar(collapsedArgs) > lineLengthLimit)
- return(formatArgs("queryArgs <- list(", "\t\t\t\t ", queryArgs, ")",
- lineLengthLimit))
- else
- return(paste0("queryArgs <- list(", collapsedArgs, ")"))
-}
-
-getRequestBody <- function(methodMetaData)
-{
- request <- methodMetaData$request
-
- if(is.null(request) || !request$required)
- return("body <- NULL")
-
- resourceName <- tolower(request$properties[[1]][[1]])
-
- requestParameterName <- names(request$properties)[1]
-
- c(paste0("if(length(", resourceName, ") > 0)"),
- paste0("\tbody <- jsonlite::toJSON(list(", resourceName, " = ", resourceName, "), "),
- "\t auto_unbox = TRUE)",
- "else",
- "\tbody <- NULL")
-}
-
-getRequest <- function(methodMetaData)
-{
- method <- methodMetaData$httpMethod
- c(paste0("response <- private$REST$http$exec(\"", method, "\", url, headers, body,"),
- " queryArgs, private$numRetries)")
-}
-
-getResponse <- function(methodMetaData)
-{
- "resource <- private$REST$httpParser$parseJSONResponse(response)"
-}
-
-getErrorCheckingCode <- function()
-{
- c("if(!is.null(resource$errors))",
- "\tstop(resource$errors)")
-}
-
-getReturnObject <- function()
-{
- "resource"
-}
-
-#NOTE: Arvados class documentation:
-
-genMethodsDoc <- function(methodResources, resourceNames)
-{
- methodsDoc <- unlist(unname(Map(function(resource, resourceName)
- {
- methodNames <- names(resource$methods)
-
- methodDoc <- Map(function(methodMetaData, methodName)
- {
- #NOTE: Index, show and destroy are aliases for the preferred names
- # "list", "get" and "delete". Until they are removed from discovery
- # document we will filter them here.
- if(methodName %in% c("index", "show", "destroy"))
- return(NULL)
-
- methodName <- paste0(resourceName, ".", methodName)
- getMethodDoc(methodName, methodMetaData)
-
- }, resource$methods, methodNames)
-
- unlist(unname(methodDoc))
-
- }, methodResources, resourceNames)))
-
- projectDoc <- genProjectMethodsDoc()
-
- c(methodsDoc, projectDoc)
-}
-
-genAPIClassDoc <- function(methodResources, resourceNames)
-{
- c("#' Arvados",
- "#'",
- "#' Arvados class gives users ability to access Arvados REST API.",
- "#'" ,
- "#' @section Usage:",
- "#' \\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}",
- "#'",
- "#' @section Arguments:",
- "#' \\describe{",
- "#' \t\\item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}",
- "#' \t\\item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}",
- "#' \t\\item{numRetries}{Number which specifies how many times to retry failed service requests.}",
- "#' }",
- "#'",
- "#' @section Methods:",
- "#' \\describe{",
- getAPIClassMethodList(methodResources, resourceNames),
- "#' }",
- "#'",
- "#' @name Arvados",
- "#' @examples",
- "#' \\dontrun{",
- "#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")",
- "#'",
- "#' collection <- arv$collections.get(\"uuid\")",
- "#'",
- "#' collectionList <- arv$collections.list(list(list(\"name\", \"like\", \"Test%\")))",
- "#' collectionList <- listAll(arv$collections.list, list(list(\"name\", \"like\", \"Test%\")))",
- "#'",
- "#' deletedCollection <- arv$collections.delete(\"uuid\")",
- "#'",
- "#' updatedCollection <- arv$collections.update(list(name = \"New name\", description = \"New description\"),",
- "#' \"uuid\")",
- "#'",
- "#' createdCollection <- arv$collections.create(list(name = \"Example\",",
- "#' description = \"This is a test collection\"))",
- "#' }",
- "NULL",
- "",
- "#' @export")
-}
-
-getAPIClassMethodList <- function(methodResources, resourceNames)
-{
- methodList <- unlist(unname(Map(function(resource, resourceName)
- {
- methodNames <- names(resource$methods)
- paste0(resourceName,
- ".",
- methodNames[!(methodNames %in% c("index", "show", "destroy"))])
-
- }, methodResources, resourceNames)))
-
- hardcodedMethods <- c("projects.create", "projects.get",
- "projects.list", "projects.update", "projects.delete")
- paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}")
-}
-
-getMethodDoc <- function(methodName, methodMetaData)
-{
- name <- paste("#' @name", methodName)
- usage <- getMethodUsage(methodName, methodMetaData)
- description <- paste("#'", methodName, "is a method defined in Arvados class.")
- params <- getMethodDescription(methodMetaData)
- returnValue <- paste("#' @return", methodMetaData$response[["$ref"]], "object.")
-
- c(paste("#'", methodName),
- "#' ",
- description,
- "#' ",
- usage,
- params,
- returnValue,
- name,
- "NULL",
- "")
-}
-
-getMethodUsage <- function(methodName, methodMetaData)
-{
- lineLengthLimit <- 40
- args <- getMethodArguments(methodMetaData)
- c(formatArgs(paste0("#' @usage arv$", methodName,
- "("), "#' \t", args, ")", lineLengthLimit))
-}
-
-getMethodDescription <- function(methodMetaData)
-{
- request <- methodMetaData$request
- requestDoc <- NULL
-
- if(!is.null(request))
- {
- requestDoc <- unname(unlist(sapply(request$properties, function(prop)
- {
- className <- sapply(prop, function(ref) ref)
- objectName <- paste0(tolower(substr(className, 1, 1)),
- substr(className, 2, nchar(className)))
- paste("#' @param", objectName, className, "object.")
- })))
- }
-
- argNames <- names(methodMetaData$parameters)
-
- argsDoc <- unname(unlist(sapply(argNames, function(argName)
- {
- arg <- methodMetaData$parameters[[argName]]
- argDescription <- arg$description
- paste("#' @param", argName, argDescription)
- })))
-
- c(requestDoc, argsDoc)
-}
-
-genProjectMethodsDoc <- function()
-{
- #TODO: Manually update this documentation to reflect changes in discovery document.
- c("#' project.get",
- "#' ",
- "#' projects.get is equivalent to groups.get method.",
- "#' ",
- "#' @usage arv$projects.get(uuid)",
- "#' @param uuid The UUID of the Group in question.",
- "#' @return Group object.",
- "#' @name projects.get",
- "NULL",
- "",
- "#' project.create",
- "#' ",
- "#' projects.create wrapps groups.create method by setting group_class attribute to \"project\".",
- "#' ",
- "#' @usage arv$projects.create(group, ensure_unique_name = \"false\")",
- "#' @param group Group object.",
- "#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "#' @return Group object.",
- "#' @name projects.create",
- "NULL",
- "",
- "#' project.update",
- "#' ",
- "#' projects.update wrapps groups.update method by setting group_class attribute to \"project\".",
- "#' ",
- "#' @usage arv$projects.update(group, uuid)",
- "#' @param group Group object.",
- "#' @param uuid The UUID of the Group in question.",
- "#' @return Group object.",
- "#' @name projects.update",
- "NULL",
- "",
- "#' project.delete",
- "#' ",
- "#' projects.delete is equivalent to groups.delete method.",
- "#' ",
- "#' @usage arv$project.delete(uuid)",
- "#' @param uuid The UUID of the Group in question.",
- "#' @return Group object.",
- "#' @name projects.delete",
- "NULL",
- "",
- "#' project.list",
- "#' ",
- "#' projects.list wrapps groups.list method by setting group_class attribute to \"project\".",
- "#' ",
- "#' @usage arv$projects.list(filters = NULL,",
- "#' where = NULL, order = NULL, distinct = NULL,",
- "#' limit = \"100\", offset = \"0\", count = \"exact\",",
- "#' include_trash = NULL, uuid = NULL, recursive = NULL)",
- "#' @param filters ",
- "#' @param where ",
- "#' @param order ",
- "#' @param distinct ",
- "#' @param limit ",
- "#' @param offset ",
- "#' @param count ",
- "#' @param include_trash Include items whose is_trashed attribute is true.",
- "#' @param uuid ",
- "#' @param recursive Include contents from child groups recursively.",
- "#' @return Group object.",
- "#' @name projects.list",
- "NULL",
- "")
-}
-
-#NOTE: Utility functions:
-
-# This function is used to split very long lines of code into smaller chunks.
-# This is usually the case when we pass a lot of named argumets to a function.
-formatArgs <- function(prependAtStart, prependToEachSplit,
- args, appendAtEnd, lineLength)
-{
- if(length(args) > 1)
- {
- args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",")
- }
-
- args[1] <- paste0(prependAtStart, args[1])
- args[length(args)] <- paste0(args[length(args)], appendAtEnd)
-
- argsLength <- length(args)
- argLines <- list()
- index <- 1
-
- while(index <= argsLength)
- {
- line <- args[index]
- index <- index + 1
-
- while(nchar(line) < lineLength && index <= argsLength)
- {
- line <- paste(line, args[index])
- index <- index + 1
- }
-
- argLines <- c(argLines, line)
- }
-
- argLines <- unlist(argLines)
- argLinesLen <- length(argLines)
-
- if(argLinesLen > 1)
- argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen])
-
- argLines
-}
diff --git a/sdk/R/README.md b/sdk/R/README.md
index fe98e648ca..aea1273925 100644
--- a/sdk/R/README.md
+++ b/sdk/R/README.md
@@ -28,12 +28,12 @@ library('ArvadosR')
> **Note**
> On Linux, you may have to install supporting packages.
>
-> On Centos 7, this is:
+> On Red Hat, AlmaLinux, and Rocky Linux, this is:
> ```
> yum install libxml2-devel openssl-devel curl-devel
> ```
>
-> On Debian, this is:
+> On Debian and Ubuntu, this is:
> ```
> apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev
> ```
@@ -346,15 +346,15 @@ subcollection$copy("destination/folder")
```r
?collections_update
-?jobs_get
+?workflows_get
```
## Building the ArvadosR package
-```r
-cd arvados/sdk && R CMD build R
+```
+make package
```
This will create a tarball of the ArvadosR package in the current directory.
diff --git a/sdk/R/arvados-v1-discovery.json b/sdk/R/arvados-v1-discovery.json
new file mode 100644
index 0000000000..b1fd3112be
--- /dev/null
+++ b/sdk/R/arvados-v1-discovery.json
@@ -0,0 +1,5394 @@
+{
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://api.arvados.org/auth/arvados": {
+ "description": "View and manage objects"
+ },
+ "https://api.arvados.org/auth/arvados.readonly": {
+ "description": "View objects"
+ }
+ }
+ }
+ },
+ "basePath": "/arvados/v1/",
+ "batchPath": "batch",
+ "description": "The API to interact with Arvados.",
+ "discoveryVersion": "v1",
+ "documentationLink": "http://doc.arvados.org/api/index.html",
+ "id": "arvados:v1",
+ "kind": "discovery#restDescription",
+ "name": "arvados",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ }
+ },
+ "protocol": "rest",
+ "resources": {
+ "api_client_authorizations": {
+ "methods": {
+ "get": {
+ "id": "arvados.api_client_authorizations.get",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a ApiClientAuthorization record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.api_client_authorizations.list",
+ "path": "api_client_authorizations",
+ "httpMethod": "GET",
+ "description": "Retrieve a ApiClientAuthorizationList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorizationList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.api_client_authorizations.create",
+ "path": "api_client_authorizations",
+ "httpMethod": "POST",
+ "description": "Create a new ApiClientAuthorization.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.api_client_authorizations.update",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing ApiClientAuthorization.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.api_client_authorizations.delete",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing ApiClientAuthorization.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "create_system_auth": {
+ "id": "arvados.api_client_authorizations.create_system_auth",
+ "path": "api_client_authorizations/create_system_auth",
+ "httpMethod": "POST",
+ "description": "Create a token for the system (\"root\") user.",
+ "parameters": {
+ "scopes": {
+ "type": "array",
+ "required": false,
+ "default": "[\"all\"]",
+ "description": "An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.api_client_authorizations.current",
+ "path": "api_client_authorizations/current",
+ "httpMethod": "GET",
+ "description": "Return all metadata for the token used to authorize this request.",
+ "parameters": {},
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "authorized_keys": {
+ "methods": {
+ "get": {
+ "id": "arvados.authorized_keys.get",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a AuthorizedKey record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.authorized_keys.list",
+ "path": "authorized_keys",
+ "httpMethod": "GET",
+ "description": "Retrieve a AuthorizedKeyList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKeyList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.authorized_keys.create",
+ "path": "authorized_keys",
+ "httpMethod": "POST",
+ "description": "Create a new AuthorizedKey.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
+ }
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.authorized_keys.update",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing AuthorizedKey.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
+ }
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.authorized_keys.delete",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing AuthorizedKey.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "collections": {
+ "methods": {
+ "get": {
+ "id": "arvados.collections.get",
+ "path": "collections/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Collection record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show collection even if its `is_trashed` attribute is true.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.collections.list",
+ "path": "collections",
+ "httpMethod": "GET",
+ "description": "Retrieve a CollectionList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include collections whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include past collection versions.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "CollectionList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.collections.create",
+ "path": "collections",
+ "httpMethod": "POST",
+ "description": "Create a new Collection.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "replace_segments": {
+ "type": "object",
+ "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.collections.update",
+ "path": "collections/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "replace_segments": {
+ "type": "object",
+ "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.collections.delete",
+ "path": "collections/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "provenance": {
+ "id": "arvados.collections.provenance",
+ "path": "collections/{uuid}/provenance",
+ "httpMethod": "GET",
+ "description": "Detail the provenance of a given collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "used_by": {
+ "id": "arvados.collections.used_by",
+ "path": "collections/{uuid}/used_by",
+ "httpMethod": "GET",
+ "description": "Detail where a given collection has been used.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "trash": {
+ "id": "arvados.collections.trash",
+ "path": "collections/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "Trash a collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "untrash": {
+ "id": "arvados.collections.untrash",
+ "path": "collections/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "Untrash a collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "computed_permissions": {
+ "methods": {
+ "list": {
+ "id": "arvados.computed_permissions.list",
+ "path": "computed_permissions",
+ "httpMethod": "GET",
+ "description": "Retrieve a ComputedPermissionList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ComputedPermissionList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "containers": {
+ "methods": {
+ "get": {
+ "id": "arvados.containers.get",
+ "path": "containers/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Container record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.containers.list",
+ "path": "containers",
+ "httpMethod": "GET",
+ "description": "Retrieve a ContainerList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.containers.create",
+ "path": "containers",
+ "httpMethod": "POST",
+ "description": "Create a new Container.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container": {
+ "$ref": "Container"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.containers.update",
+ "path": "containers/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container": {
+ "$ref": "Container"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.containers.delete",
+ "path": "containers/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "auth": {
+ "id": "arvados.containers.auth",
+ "path": "containers/{uuid}/auth",
+ "httpMethod": "GET",
+ "description": "Get the API client authorization token associated with this container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "lock": {
+ "id": "arvados.containers.lock",
+ "path": "containers/{uuid}/lock",
+ "httpMethod": "POST",
+ "description": "Lock a container (for a dispatcher to begin running it).",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "unlock": {
+ "id": "arvados.containers.unlock",
+ "path": "containers/{uuid}/unlock",
+ "httpMethod": "POST",
+ "description": "Unlock a container (for a dispatcher to stop running it).",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update_priority": {
+ "id": "arvados.containers.update_priority",
+ "path": "containers/{uuid}/update_priority",
+ "httpMethod": "POST",
+ "description": "Recalculate and return the priority of a given container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "secret_mounts": {
+ "id": "arvados.containers.secret_mounts",
+ "path": "containers/{uuid}/secret_mounts",
+ "httpMethod": "GET",
+ "description": "Return secret mount information for the container associated with the API token authorizing this request.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.containers.current",
+ "path": "containers/current",
+ "httpMethod": "GET",
+ "description": "Return the container record associated with the API token authorizing this request.",
+ "parameters": {},
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "container_requests": {
+ "methods": {
+ "get": {
+ "id": "arvados.container_requests.get",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a ContainerRequest record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show container request even if its owner project is trashed.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.container_requests.list",
+ "path": "container_requests",
+ "httpMethod": "GET",
+ "description": "Retrieve a ContainerRequestList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include container requests whose owner project is trashed.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequestList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.container_requests.create",
+ "path": "container_requests",
+ "httpMethod": "POST",
+ "description": "Create a new ContainerRequest.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container_request": {
+ "$ref": "ContainerRequest"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.container_requests.update",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing ContainerRequest.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container_request": {
+ "$ref": "ContainerRequest"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.container_requests.delete",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing ContainerRequest.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "container_status": {
+ "id": "arvados.container_requests.container_status",
+ "path": "container_requests/{uuid}/container_status",
+ "httpMethod": "GET",
+ "description": "Return scheduling details for a container request.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "required": true,
+ "description": "The UUID of the container request to query.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "credentials": {
+ "methods": {
+ "get": {
+ "id": "arvados.credentials.get",
+ "path": "credentials/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Credential record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Credential to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.credentials.list",
+ "path": "credentials",
+ "httpMethod": "GET",
+ "description": "Retrieve a CredentialList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "CredentialList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.credentials.create",
+ "path": "credentials",
+ "httpMethod": "POST",
+ "description": "Create a new Credential.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "credential": {
+ "$ref": "Credential"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.credentials.update",
+ "path": "credentials/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Credential.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Credential to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "credential": {
+ "$ref": "Credential"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.credentials.delete",
+ "path": "credentials/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Credential.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Credential to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "secret": {
+ "id": "arvados.credentials.secret",
+ "path": "credentials/{uuid}/secret",
+ "httpMethod": "GET",
+ "description": "Fetch the secret part of the credential (can only be invoked by running containers).",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Credential to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "groups": {
+ "methods": {
+ "get": {
+ "id": "arvados.groups.get",
+ "path": "groups/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Group record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Return group/project even if its `is_trashed` attribute is true.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.groups.list",
+ "path": "groups",
+ "httpMethod": "GET",
+ "description": "Retrieve a GroupList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "GroupList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.groups.create",
+ "path": "groups",
+ "httpMethod": "POST",
+ "description": "Create a new Group.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval."
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "group": {
+ "$ref": "Group"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.groups.update",
+ "path": "groups/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval."
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "group": {
+ "$ref": "Group"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.groups.delete",
+ "path": "groups/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "contents": {
+ "id": "arvados.groups.contents",
+ "path": "groups/contents",
+ "httpMethod": "GET",
+ "description": "List objects that belong to a group.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
+ "uuid": {
+ "type": "string",
+ "required": false,
+ "default": "",
+ "description": "If given, limit the listing to objects owned by the\nuser or group with this UUID.",
+ "location": "query"
+ },
+ "recursive": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, include contents from child groups recursively.",
+ "location": "query"
+ },
+ "include": {
+ "type": "array",
+ "required": false,
+ "description": "An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\n\n * `\"container_uuid\"`\n * `\"owner_uuid\"`\n * `\"collection_uuid\"`\n\n",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, include past versions of collections in the listing.",
+ "location": "query"
+ },
+ "exclude_home_project": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, exclude contents of the user's home project from the listing.\nCalling this method with this flag set is how clients enumerate objects shared\nwith the current user.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "shared": {
+ "id": "arvados.groups.shared",
+ "path": "groups/shared",
+ "httpMethod": "GET",
+ "description": "List groups that the current user can access via permission links.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
+ "include": {
+ "type": "string",
+ "required": false,
+ "description": "A string naming referenced objects to include in the `included` field of the response. Supported values are:\n\n * `\"owner_uuid\"`\n\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "trash": {
+ "id": "arvados.groups.trash",
+ "path": "groups/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "Trash a group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "untrash": {
+ "id": "arvados.groups.untrash",
+ "path": "groups/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "Untrash a group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "keep_services": {
+ "methods": {
+ "get": {
+ "id": "arvados.keep_services.get",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a KeepService record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.keep_services.list",
+ "path": "keep_services",
+ "httpMethod": "GET",
+ "description": "Retrieve a KeepServiceList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepServiceList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.keep_services.create",
+ "path": "keep_services",
+ "httpMethod": "POST",
+ "description": "Create a new KeepService.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_service": {
+ "$ref": "KeepService"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.keep_services.update",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing KeepService.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_service": {
+ "$ref": "KeepService"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.keep_services.delete",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing KeepService.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "accessible": {
+ "id": "arvados.keep_services.accessible",
+ "path": "keep_services/accessible",
+ "httpMethod": "GET",
+ "description": "List Keep services that the current client can access.",
+ "parameters": {},
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "links": {
+ "methods": {
+ "get": {
+ "id": "arvados.links.get",
+ "path": "links/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Link record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.links.list",
+ "path": "links",
+ "httpMethod": "GET",
+ "description": "Retrieve a LinkList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LinkList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.links.create",
+ "path": "links",
+ "httpMethod": "POST",
+ "description": "Create a new Link.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.links.update",
+ "path": "links/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Link.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.links.delete",
+ "path": "links/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Link.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_permissions": {
+ "id": "arvados.links.get_permissions",
+ "path": "permissions/{uuid}",
+ "httpMethod": "GET",
+ "description": "List permissions granted on an Arvados object.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "logs": {
+ "methods": {
+ "get": {
+ "id": "arvados.logs.get",
+ "path": "logs/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Log record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.logs.list",
+ "path": "logs",
+ "httpMethod": "GET",
+ "description": "Retrieve a LogList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LogList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.logs.create",
+ "path": "logs",
+ "httpMethod": "POST",
+ "description": "Create a new Log.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "log": {
+ "$ref": "Log"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.logs.update",
+ "path": "logs/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Log.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "log": {
+ "$ref": "Log"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.logs.delete",
+ "path": "logs/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Log.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "users": {
+ "methods": {
+ "get": {
+ "id": "arvados.users.get",
+ "path": "users/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a User record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.users.list",
+ "path": "users",
+ "httpMethod": "GET",
+ "description": "Retrieve a UserList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.users.create",
+ "path": "users",
+ "httpMethod": "POST",
+ "description": "Create a new User.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.users.update",
+ "path": "users/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing User.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not try to update the user on any other clusters in the federation,\nonly the cluster that received the request.\nYou must be an administrator to use this flag.",
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.users.delete",
+ "path": "users/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing User.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.users.current",
+ "path": "users/current",
+ "httpMethod": "GET",
+ "description": "Return the user record associated with the API token authorizing this request.",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "system": {
+ "id": "arvados.users.system",
+ "path": "users/system",
+ "httpMethod": "GET",
+ "description": "Return this cluster's system (\"root\") user record.",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "activate": {
+ "id": "arvados.users.activate",
+ "path": "users/{uuid}/activate",
+ "httpMethod": "POST",
+ "description": "Set the `is_active` flag on a user record.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "setup": {
+ "id": "arvados.users.setup",
+ "path": "users/setup",
+ "httpMethod": "POST",
+ "description": "Convenience method to \"fully\" set up a user record with a virtual machine login and notification email.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "required": false,
+ "description": "UUID of an existing user record to set up.",
+ "location": "query"
+ },
+ "user": {
+ "type": "object",
+ "required": false,
+ "description": "Attributes of a new user record to set up.",
+ "location": "query"
+ },
+ "repo_name": {
+ "type": "string",
+ "required": false,
+ "description": "This parameter is obsolete and ignored.",
+ "location": "query"
+ },
+ "vm_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.",
+ "location": "query"
+ },
+ "send_notification_email": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, send an email to the user notifying them they can now access this Arvados cluster.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "unsetup": {
+ "id": "arvados.users.unsetup",
+ "path": "users/{uuid}/unsetup",
+ "httpMethod": "POST",
+ "description": "Unset a user's active flag and delete associated records.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "merge": {
+ "id": "arvados.users.merge",
+ "path": "users/merge",
+ "httpMethod": "POST",
+ "description": "Transfer ownership of one user's data to another.",
+ "parameters": {
+ "new_owner_uuid": {
+ "type": "string",
+ "required": true,
+ "description": "UUID of the user or group that will take ownership of data owned by the old user.",
+ "location": "query"
+ },
+ "new_user_token": {
+ "type": "string",
+ "required": false,
+ "description": "Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.",
+ "location": "query"
+ },
+ "redirect_to_new_user": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, authorization attempts for the old user will be redirected to the new user.",
+ "location": "query"
+ },
+ "old_user_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.",
+ "location": "query"
+ },
+ "new_user_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "UUID of the user receiving ownership. You must be an admin to use this option.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "user_agreements": {
+ "methods": {
+ "get": {
+ "id": "arvados.user_agreements.get",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a UserAgreement record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.user_agreements.list",
+ "path": "user_agreements",
+ "httpMethod": "GET",
+ "description": "Retrieve a UserAgreementList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreementList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.user_agreements.create",
+ "path": "user_agreements",
+ "httpMethod": "POST",
+ "description": "Create a new UserAgreement.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user_agreement": {
+ "$ref": "UserAgreement"
+ }
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.user_agreements.update",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing UserAgreement.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user_agreement": {
+ "$ref": "UserAgreement"
+ }
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.user_agreements.delete",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing UserAgreement.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "signatures": {
+ "id": "arvados.user_agreements.signatures",
+ "path": "user_agreements/signatures",
+ "httpMethod": "GET",
+ "description": "List all user agreement signature links from a user.",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "sign": {
+ "id": "arvados.user_agreements.sign",
+ "path": "user_agreements/sign",
+ "httpMethod": "POST",
+ "description": "Create a signature link from the current user for a given user agreement.",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "virtual_machines": {
+ "methods": {
+ "get": {
+ "id": "arvados.virtual_machines.get",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a VirtualMachine record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.virtual_machines.list",
+ "path": "virtual_machines",
+ "httpMethod": "GET",
+ "description": "Retrieve a VirtualMachineList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachineList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.virtual_machines.create",
+ "path": "virtual_machines",
+ "httpMethod": "POST",
+ "description": "Create a new VirtualMachine.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.virtual_machines.update",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing VirtualMachine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.virtual_machines.delete",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing VirtualMachine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "logins": {
+ "id": "arvados.virtual_machines.logins",
+ "path": "virtual_machines/{uuid}/logins",
+ "httpMethod": "GET",
+ "description": "List login permission links for a given virtual machine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_all_logins": {
+ "id": "arvados.virtual_machines.get_all_logins",
+ "path": "virtual_machines/get_all_logins",
+ "httpMethod": "GET",
+ "description": "List login permission links for all virtual machines.",
+ "parameters": {},
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "workflows": {
+ "methods": {
+ "get": {
+ "id": "arvados.workflows.get",
+ "path": "workflows/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Workflow record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.workflows.list",
+ "path": "workflows",
+ "httpMethod": "GET",
+ "description": "Retrieve a WorkflowList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "WorkflowList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.workflows.create",
+ "path": "workflows",
+ "httpMethod": "POST",
+ "description": "Create a new Workflow.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "workflow": {
+ "$ref": "Workflow"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.workflows.update",
+ "path": "workflows/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Workflow.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "workflow": {
+ "$ref": "Workflow"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.workflows.delete",
+ "path": "workflows/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Workflow.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "configs": {
+ "methods": {
+ "get": {
+ "id": "arvados.configs.get",
+ "path": "config",
+ "httpMethod": "GET",
+ "description": "Get this cluster's public configuration settings.",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "vocabularies": {
+ "methods": {
+ "get": {
+ "id": "arvados.vocabularies.get",
+ "path": "vocabulary",
+ "httpMethod": "GET",
+ "description": "Get this cluster's configured vocabulary definition.\n\nRefer to [metadata vocabulary documentation][] for details.\n\n[metadata vocabulary documentation]: https://doc.aravdos.org/admin/metadata-vocabulary.html\n\n",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "sys": {
+ "methods": {
+ "get": {
+ "id": "arvados.sys.trash_sweep",
+ "path": "sys/trash_sweep",
+ "httpMethod": "POST",
+ "description": "Run scheduled data trash and sweep operations across this cluster's Keep services.",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ }
+ },
+ "revision": "20250402",
+ "schemas": {
+ "ApiClientAuthorizationList": {
+ "id": "ApiClientAuthorizationList",
+ "description": "A list of ApiClientAuthorization objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#apiClientAuthorizationList.",
+ "default": "arvados#apiClientAuthorizationList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching ApiClientAuthorization objects.",
+ "items": {
+ "$ref": "ApiClientAuthorization"
+ }
+ }
+ }
+ },
+ "ApiClientAuthorization": {
+ "id": "ApiClientAuthorization",
+ "description": "Arvados API client authorization token\n\nThis resource represents an API token a user may use to authenticate an\nArvados API request.",
+ "type": "object",
+ "uuidPrefix": "gj3su",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "api_token": {
+ "description": "The secret token that can be used to authorize Arvados API requests.",
+ "type": "string"
+ },
+ "created_by_ip_address": {
+ "description": "The IP address of the client that created this token.",
+ "type": "string"
+ },
+ "last_used_by_ip_address": {
+ "description": "The IP address of the client that last used this token.",
+ "type": "string"
+ },
+ "last_used_at": {
+ "description": "The last time this token was used to authorize a request. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "expires_at": {
+ "description": "The time after which this token is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "created_at": {
+ "description": "The time this API client authorization was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "scopes": {
+ "description": "An array of strings identifying HTTP methods and API paths this token is\nauthorized to use. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n\n",
+ "type": "Array"
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This API client authorization's Arvados UUID, like `zzzzz-gj3su-12345abcde67890`."
+ }
+ }
+ },
+ "AuthorizedKeyList": {
+ "id": "AuthorizedKeyList",
+ "description": "A list of AuthorizedKey objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#authorizedKeyList.",
+ "default": "arvados#authorizedKeyList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching AuthorizedKey objects.",
+ "items": {
+ "$ref": "AuthorizedKey"
+ }
+ }
+ }
+ },
+ "AuthorizedKey": {
+ "id": "AuthorizedKey",
+ "description": "Arvados authorized public key\n\nThis resource represents a public key a user may use to authenticate themselves\nto services on the cluster. Its primary use today is to store SSH keys for\nvirtual machines (\"shell nodes\"). It may be extended to store other keys in\nthe future.",
+ "type": "object",
+ "uuidPrefix": "fngyi",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This authorized key's Arvados UUID, like `zzzzz-fngyi-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this authorized key.",
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this authorized key.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this authorized key was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "name": {
+ "description": "The name of this authorized key assigned by a user.",
+ "type": "string"
+ },
+ "key_type": {
+ "description": "A string identifying what type of service uses this key. Supported values are:\n\n * `\"SSH\"`\n\n",
+ "type": "string"
+ },
+ "authorized_user_uuid": {
+ "description": "The UUID of the Arvados user that is authorized by this key.",
+ "type": "string"
+ },
+ "public_key": {
+ "description": "The full public key, in the format referenced by `key_type`.",
+ "type": "text"
+ },
+ "expires_at": {
+ "description": "The time after which this key is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "created_at": {
+ "description": "The time this authorized key was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ }
+ }
+ },
+ "CollectionList": {
+ "id": "CollectionList",
+ "description": "A list of Collection objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#collectionList.",
+ "default": "arvados#collectionList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Collection objects.",
+ "items": {
+ "$ref": "Collection"
+ }
+ }
+ }
+ },
+ "Collection": {
+ "id": "Collection",
+ "description": "Arvados data collection\n\nA collection describes how a set of files is stored in data blocks in Keep,\nalong with associated metadata.",
+ "type": "object",
+ "uuidPrefix": "4zz18",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this collection.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this collection was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this collection.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this collection was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "portable_data_hash": {
+ "description": "The portable data hash of this collection. This string provides a unique\nand stable reference to these contents.",
+ "type": "string"
+ },
+ "replication_desired": {
+ "description": "The number of copies that should be made for data in this collection.",
+ "type": "integer"
+ },
+ "replication_confirmed_at": {
+ "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this collection. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "replication_confirmed": {
+ "description": "The number of copies of data in this collection that the cluster has confirmed\nexist in storage.",
+ "type": "integer"
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This collection's Arvados UUID, like `zzzzz-4zz18-12345abcde67890`."
+ },
+ "manifest_text": {
+ "description": "The manifest text that describes how files are constructed from data blocks\nin this collection. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n",
+ "type": "text"
+ },
+ "name": {
+ "description": "The name of this collection assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this collection assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "string"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this collection.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "delete_at": {
+ "description": "The time this collection will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "trash_at": {
+ "description": "The time this collection will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "description": "A boolean flag to indicate whether or not this collection is trashed.",
+ "type": "boolean"
+ },
+ "storage_classes_desired": {
+ "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this collection. Storage classes are configured by the cluster administrator.",
+ "type": "Array"
+ },
+ "storage_classes_confirmed": {
+ "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this collection's data.",
+ "type": "Array"
+ },
+ "storage_classes_confirmed_at": {
+ "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "current_version_uuid": {
+ "description": "The UUID of the current version of this collection.",
+ "type": "string"
+ },
+ "version": {
+ "description": "An integer that counts which version of a collection this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n",
+ "type": "integer"
+ },
+ "preserve_version": {
+ "description": "A boolean flag to indicate whether this specific version of this collection\nshould be persisted in cluster storage.",
+ "type": "boolean"
+ },
+ "file_count": {
+ "description": "The number of files represented in this collection's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
+ },
+ "file_size_total": {
+ "description": "The total size in bytes of files represented in this collection's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
+ }
+ }
+ },
+ "ComputedPermissionList": {
+ "id": "ComputedPermissionList",
+ "description": "A list of ComputedPermission objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#computedPermissionList.",
+ "default": "arvados#computedPermissionList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching ComputedPermission objects.",
+ "items": {
+ "$ref": "ComputedPermission"
+ }
+ }
+ }
+ },
+ "ComputedPermission": {
+ "id": "ComputedPermission",
+ "description": "Arvados computed permission\n\nComputed permissions do not correspond directly to any Arvados resource, but\nprovide a simple way to query the entire graph of permissions granted to\nusers and groups.",
+ "type": "object",
+ "properties": {
+ "user_uuid": {
+ "description": "The UUID of the Arvados user who has this permission.",
+ "type": "string"
+ },
+ "target_uuid": {
+ "description": "The UUID of the Arvados object the user has access to.",
+ "type": "string"
+ },
+ "perm_level": {
+ "description": "A string representing the user's level of access to the target object.\nPossible values are:\n\n * `\"can_read\"`\n * `\"can_write\"`\n * `\"can_manage\"`\n\n",
+ "type": "string"
+ }
+ }
+ },
+ "ContainerList": {
+ "id": "ContainerList",
+ "description": "A list of Container objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#containerList.",
+ "default": "arvados#containerList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Container objects.",
+ "items": {
+ "$ref": "Container"
+ }
+ }
+ }
+ },
+ "Container": {
+ "id": "Container",
+ "description": "Arvados container record\n\nA container represents compute work that has been or should be dispatched,\nalong with its results. A container can satisfy one or more container requests.",
+ "type": "object",
+ "uuidPrefix": "dz642",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This container's Arvados UUID, like `zzzzz-dz642-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this container.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this container was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this container was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this container.",
+ "type": "string"
+ },
+ "state": {
+ "description": "A string representing the container's current execution status. Possible\nvalues are:\n\n * `\"Queued\"` --- This container has not been dispatched yet.\n * `\"Locked\"` --- A dispatcher has claimed this container in preparation to run it.\n * `\"Running\"` --- A dispatcher is running this container.\n * `\"Cancelled\"` --- Container execution has been cancelled by user request.\n * `\"Complete\"` --- A dispatcher ran this container to completion and recorded the results.\n\n",
+ "type": "string"
+ },
+ "started_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "finished_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "log": {
+ "description": "The portable data hash of the Arvados collection that contains this\ncontainer's logs.",
+ "type": "string"
+ },
+ "environment": {
+ "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.",
+ "type": "Hash"
+ },
+ "cwd": {
+ "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.",
+ "type": "string"
+ },
+ "command": {
+ "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.",
+ "type": "Array"
+ },
+ "output_path": {
+ "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.",
+ "type": "string"
+ },
+ "mounts": {
+ "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n",
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n",
+ "type": "Hash"
+ },
+ "output": {
+ "description": "The portable data hash of the Arvados collection that contains this\ncontainer's output file(s).",
+ "type": "string"
+ },
+ "container_image": {
+ "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.",
+ "type": "string"
+ },
+ "progress": {
+ "description": "A float between 0.0 and 1.0 (inclusive) that represents the container's\nexecution progress. This attribute is not implemented yet.",
+ "type": "float"
+ },
+ "priority": {
+ "description": "An integer between 0 and 1000 (inclusive) that represents this container's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n",
+ "type": "integer"
+ },
+ "exit_code": {
+ "description": "An integer that records the Unix exit code of the `command` from a\nfinished container.",
+ "type": "integer"
+ },
+ "auth_uuid": {
+ "description": "The UUID of the Arvados API client authorization token that a dispatcher\nshould use to set up this container. This token is automatically created by\nArvados and this attribute automatically assigned unless a container is\ncreated with `runtime_token`.",
+ "type": "string"
+ },
+ "locked_by_uuid": {
+ "description": "The UUID of the Arvados API client authorization token that successfully\nlocked this container in preparation to execute it.",
+ "type": "string"
+ },
+ "scheduling_parameters": {
+ "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n",
+ "type": "Hash"
+ },
+ "runtime_status": {
+ "description": "A hash with status updates from a running container.\nRefer to the [runtime status reference][] for details.\n\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\n\n",
+ "type": "Hash"
+ },
+ "runtime_user_uuid": {
+ "description": "The UUID of the Arvados user associated with the API client authorization\ntoken used to run this container.",
+ "type": "text"
+ },
+ "runtime_auth_scopes": {
+ "description": "The `scopes` from the API client authorization token used to run this container.",
+ "type": "Array"
+ },
+ "lock_count": {
+ "description": "The number of times this container has been locked by a dispatcher. This\nmay be greater than 1 if a dispatcher locks a container but then execution is\ninterrupted for any reason.",
+ "type": "integer"
+ },
+ "gateway_address": {
+ "description": "A string with the address of the Arvados gateway server, in `HOST:PORT`\nformat. This is for internal use only.",
+ "type": "string"
+ },
+ "interactive_session_started": {
+ "description": "This flag is set true if any user starts an interactive shell inside the\nrunning container.",
+ "type": "boolean"
+ },
+ "output_storage_classes": {
+ "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container. Storage classes are configured by\nthe cluster administrator.",
+ "type": "Array"
+ },
+ "output_properties": {
+ "description": "A hash of arbitrary metadata to set on the output collection of this container.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "cost": {
+ "description": "A float with the estimated cost of the cloud instance used to run this\ncontainer. The value is `0` if cost estimation is not available on this cluster.",
+ "type": "float"
+ },
+ "subrequests_cost": {
+ "description": "A float with the estimated cost of all cloud instances used to run this\ncontainer and all its subrequests. The value is `0` if cost estimation is not\navailable on this cluster.",
+ "type": "float"
+ },
+ "output_glob": {
+ "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n",
+ "type": "Array"
+ },
+ "service": {
+ "description": "A boolean flag. If set, it informs the system that this is a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.",
+ "type": "boolean"
+ },
+ "published_ports": {
+ "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
+ "type": "jsonb"
+ }
+ }
+ },
+ "ContainerRequestList": {
+ "id": "ContainerRequestList",
+ "description": "A list of ContainerRequest objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#containerRequestList.",
+ "default": "arvados#containerRequestList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching ContainerRequest objects.",
+ "items": {
+ "$ref": "ContainerRequest"
+ }
+ }
+ }
+ },
+ "ContainerRequest": {
+ "id": "ContainerRequest",
+ "description": "Arvados container request\n\nA container request represents a user's request that Arvados do some compute\nwork, along with full details about what work should be done. Arvados will\nattempt to fulfill the request by mapping it to a matching container record,\nrunning the work on demand if necessary.",
+ "type": "object",
+ "uuidPrefix": "xvhdp",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This container request's Arvados UUID, like `zzzzz-xvhdp-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this container request.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this container request was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this container request was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this container request.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of this container request assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this container request assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "text"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "state": {
+ "description": "A string indicating where this container request is in its lifecycle.\nPossible values are:\n\n * `\"Uncommitted\"` --- The container request has not been finalized and can still be edited.\n * `\"Committed\"` --- The container request is ready to be fulfilled.\n * `\"Final\"` --- The container request has been fulfilled or cancelled.\n\n",
+ "type": "string"
+ },
+ "requesting_container_uuid": {
+ "description": "The UUID of the container that created this container request, if any.",
+ "type": "string"
+ },
+ "container_uuid": {
+ "description": "The UUID of the container that fulfills this container request, if any.",
+ "type": "string"
+ },
+ "container_count_max": {
+ "description": "An integer that defines the maximum number of times Arvados should attempt\nto dispatch a container to fulfill this container request.",
+ "type": "integer"
+ },
+ "mounts": {
+ "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n",
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n",
+ "type": "Hash"
+ },
+ "container_image": {
+ "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.",
+ "type": "string"
+ },
+ "environment": {
+ "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.",
+ "type": "Hash"
+ },
+ "cwd": {
+ "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.",
+ "type": "string"
+ },
+ "command": {
+ "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.",
+ "type": "Array"
+ },
+ "output_path": {
+ "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.",
+ "type": "string"
+ },
+ "priority": {
+ "description": "An integer between 0 and 1000 (inclusive) that represents this container request's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n",
+ "type": "integer"
+ },
+ "expires_at": {
+ "description": "The time after which this container request will no longer be fulfilled. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "filters": {
+ "description": "Filters that limit which existing containers are eligible to satisfy this\ncontainer request. This attribute is not implemented yet and should be null.",
+ "type": "text"
+ },
+ "container_count": {
+ "description": "An integer that records how many times Arvados has attempted to dispatch\na container to fulfill this container request.",
+ "type": "integer"
+ },
+ "use_existing": {
+ "description": "A boolean flag. If set, Arvados may choose to satisfy this container\nrequest with an eligible container that already exists. Otherwise, Arvados will\nsatisfy this container request with a newer container, which will usually result\nin the container running again.",
+ "type": "boolean"
+ },
+ "scheduling_parameters": {
+ "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n",
+ "type": "Hash"
+ },
+ "output_uuid": {
+ "description": "The UUID of the Arvados collection that contains output for all the\ncontainer(s) that were dispatched to fulfill this container request.",
+ "type": "string"
+ },
+ "log_uuid": {
+ "description": "The UUID of the Arvados collection that contains logs for all the\ncontainer(s) that were dispatched to fulfill this container request.",
+ "type": "string"
+ },
+ "output_name": {
+ "description": "The name to set on the output collection of this container request.",
+ "type": "string"
+ },
+ "output_ttl": {
+ "description": "An integer in seconds. If greater than zero, when an output collection is\ncreated for this container request, its `expires_at` attribute will be set this\nfar in the future.",
+ "type": "integer"
+ },
+ "output_storage_classes": {
+ "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container request. Storage classes are configured by\nthe cluster administrator.",
+ "type": "Array"
+ },
+ "output_properties": {
+ "description": "A hash of arbitrary metadata to set on the output collection of this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "cumulative_cost": {
+ "description": "A float with the estimated cost of all cloud instances used to run\ncontainer(s) to fulfill this container request and their subrequests.\nThe value is `0` if cost estimation is not available on this cluster.",
+ "type": "float"
+ },
+ "output_glob": {
+ "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n",
+ "type": "Array"
+ },
+ "service": {
+ "description": "A boolean flag. If set, it informs the system that this request is for a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.",
+ "type": "boolean"
+ },
+ "published_ports": {
+ "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
+ "type": "Hash"
+ }
+ }
+ },
+ "CredentialList": {
+ "id": "CredentialList",
+ "description": "A list of Credential objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#credentialList.",
+ "default": "arvados#credentialList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Credential objects.",
+ "items": {
+ "$ref": "Credential"
+ }
+ }
+ }
+ },
+ "Credential": {
+ "id": "Credential",
+ "description": "Arvados credential.",
+ "type": "object",
+ "uuidPrefix": "oss07",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This credential's Arvados UUID, like `zzzzz-oss07-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this credential.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this credential was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this credential was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this credential.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of this credential assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this credential assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "text"
+ },
+ "credential_class": {
+ "description": "The type of credential being stored.",
+ "type": "string"
+ },
+ "scopes": {
+ "description": "The resources the credential applies to or should be used with.",
+ "type": "Array"
+ },
+ "external_id": {
+ "description": "The non-secret external identifier associated with a credential, e.g. a username.",
+ "type": "string"
+ },
+ "expires_at": {
+ "description": "Date after which the credential_secret field is no longer valid. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ }
+ }
+ },
+ "GroupList": {
+ "id": "GroupList",
+ "description": "A list of Group objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#groupList.",
+ "default": "arvados#groupList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Group objects.",
+ "items": {
+ "$ref": "Group"
+ }
+ }
+ }
+ },
+ "Group": {
+ "id": "Group",
+ "description": "Arvados group\n\nGroups provide a way to organize users or data together, depending on their\n`group_class`.",
+ "type": "object",
+ "uuidPrefix": "j7d0g",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This group's Arvados UUID, like `zzzzz-j7d0g-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this group.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this group was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this group.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this group was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "name": {
+ "description": "The name of this group assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this group assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "string"
+ },
+ "group_class": {
+ "description": "A string representing which type of group this is. One of:\n\n * `\"filter\"` --- A virtual project whose contents are selected dynamically by filters.\n * `\"project\"` --- An Arvados project that can contain collections,\n container records, workflows, and subprojects.\n * `\"role\"` --- A group of users that can be granted permissions in Arvados.\n\n",
+ "type": "string"
+ },
+ "trash_at": {
+ "description": "The time this group will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "description": "A boolean flag to indicate whether or not this group is trashed.",
+ "type": "boolean"
+ },
+ "delete_at": {
+ "description": "The time this group will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this group.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "frozen_by_uuid": {
+ "description": "The UUID of the user that has frozen this group, if any. Frozen projects\ncannot have their contents or metadata changed, even by admins.",
+ "type": "string"
+ }
+ }
+ },
+ "KeepServiceList": {
+ "id": "KeepServiceList",
+ "description": "A list of KeepService objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#keepServiceList.",
+ "default": "arvados#keepServiceList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching KeepService objects.",
+ "items": {
+ "$ref": "KeepService"
+ }
+ }
+ }
+ },
+ "KeepService": {
+ "id": "KeepService",
+ "description": "Arvados Keep service\n\nThis resource stores information about a single Keep service in this Arvados\ncluster that clients can contact to retrieve and store data.",
+ "type": "object",
+ "uuidPrefix": "bi6l4",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This Keep service's Arvados UUID, like `zzzzz-bi6l4-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this Keep service.",
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this Keep service.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this Keep service was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "service_host": {
+ "description": "The DNS hostname of this Keep service.",
+ "type": "string"
+ },
+ "service_port": {
+ "description": "The TCP port where this Keep service listens.",
+ "type": "integer"
+ },
+ "service_ssl_flag": {
+ "description": "A boolean flag that indicates whether or not this Keep service uses TLS/SSL.",
+ "type": "boolean"
+ },
+ "service_type": {
+ "description": "A string that describes which type of Keep service this is. One of:\n\n * `\"disk\"` --- A service that stores blocks on a local filesystem.\n * `\"blob\"` --- A service that stores blocks in a cloud object store.\n * `\"proxy\"` --- A keepproxy service.\n\n",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this Keep service was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "read_only": {
+ "description": "A boolean flag. If set, this Keep service does not accept requests to write data\nblocks; it only serves blocks it already has.",
+ "type": "boolean"
+ }
+ }
+ },
+ "LinkList": {
+ "id": "LinkList",
+ "description": "A list of Link objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#linkList.",
+ "default": "arvados#linkList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Link objects.",
+ "items": {
+ "$ref": "Link"
+ }
+ }
+ }
+ },
+ "Link": {
+ "id": "Link",
+ "description": "Arvados object link\n\nA link provides a way to define relationships between Arvados objects,\ndepending on their `link_class`.",
+ "type": "object",
+ "uuidPrefix": "o0j2j",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This link's Arvados UUID, like `zzzzz-o0j2j-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this link.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this link was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this link.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this link was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "tail_uuid": {
+ "description": "The UUID of the Arvados object that is the target of this relationship.",
+ "type": "string"
+ },
+ "link_class": {
+ "description": "A string that defines which kind of link this is. One of:\n\n * `\"permission\"` --- This link grants a permission to the user or group\n referenced by `head_uuid` to the object referenced by `tail_uuid`. The\n access level is set by `name`.\n * `\"star\"` --- This link represents a \"favorite.\" The user referenced\n by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\n * `\"tag\"` --- This link represents an unstructured metadata tag. The object\n referenced by `tail_uuid` has the tag defined by `name`.\n\n",
+ "type": "string"
+ },
+ "name": {
+ "description": "The primary value of this link. For `\"permission\"` links, this is one of\n`\"can_read\"`, `\"can_write\"`, or `\"can_manage\"`.",
+ "type": "string"
+ },
+ "head_uuid": {
+ "description": "The UUID of the Arvados object that is the originator or actor in this\nrelationship. May be null.",
+ "type": "string"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this link.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ }
+ }
+ },
+ "LogList": {
+ "id": "LogList",
+ "description": "A list of Log objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#logList.",
+ "default": "arvados#logList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Log objects.",
+ "items": {
+ "$ref": "Log"
+ }
+ }
+ }
+ },
+ "Log": {
+ "id": "Log",
+ "description": "Arvados log record\n\nThis resource represents a single log record about an event in this Arvados\ncluster. Some individual Arvados services create log records. Users can also\ncreate custom logs.",
+ "type": "object",
+ "uuidPrefix": "57u5n",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "id": {
+ "description": "The serial number of this log. You can use this in filters to query logs\nthat were created before/after another.",
+ "type": "integer"
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This log's Arvados UUID, like `zzzzz-57u5n-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this log.",
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this log.",
+ "type": "string"
+ },
+ "object_uuid": {
+ "description": "The UUID of the Arvados object that this log pertains to, such as a user\nor container.",
+ "type": "string"
+ },
+ "event_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "event_type": {
+ "description": "An arbitrary short string that classifies what type of log this is.",
+ "type": "string"
+ },
+ "summary": {
+ "description": "A text string that describes the logged event. This is the primary\nattribute for simple logs.",
+ "type": "text"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this log.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "created_at": {
+ "description": "The time this log was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this log was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "object_owner_uuid": {
+ "description": "The `owner_uuid` of the object referenced by `object_uuid` at the time\nthis log was created.",
+ "type": "string"
+ }
+ }
+ },
+ "UserList": {
+ "id": "UserList",
+ "description": "A list of User objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#userList.",
+ "default": "arvados#userList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching User objects.",
+ "items": {
+ "$ref": "User"
+ }
+ }
+ }
+ },
+ "User": {
+ "id": "User",
+ "description": "Arvados user\n\nA user represents a single individual or role who may be authorized to access\nthis Arvados cluster.",
+ "type": "object",
+ "uuidPrefix": "tpzed",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This user's Arvados UUID, like `zzzzz-tpzed-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this user.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this user was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this user.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this user was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "email": {
+ "description": "This user's email address.",
+ "type": "string"
+ },
+ "first_name": {
+ "description": "This user's first name.",
+ "type": "string"
+ },
+ "last_name": {
+ "description": "This user's last name.",
+ "type": "string"
+ },
+ "identity_url": {
+ "description": "A URL that represents this user with the cluster's identity provider.",
+ "type": "string"
+ },
+ "is_admin": {
+ "description": "A boolean flag. If set, this user is an administrator of the Arvados\ncluster, and automatically passes most permissions checks.",
+ "type": "boolean"
+ },
+ "prefs": {
+ "description": "A hash that stores cluster-wide user preferences.",
+ "type": "Hash"
+ },
+ "is_active": {
+ "description": "A boolean flag. If unset, this user is not permitted to make any Arvados\nAPI requests.",
+ "type": "boolean"
+ },
+ "username": {
+ "description": "This user's Unix username on virtual machines.",
+ "type": "string"
+ }
+ }
+ },
+ "UserAgreementList": {
+ "id": "UserAgreementList",
+ "description": "A list of UserAgreement objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#userAgreementList.",
+ "default": "arvados#userAgreementList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching UserAgreement objects.",
+ "items": {
+ "$ref": "UserAgreement"
+ }
+ }
+ }
+ },
+ "UserAgreement": {
+ "id": "UserAgreement",
+ "description": "Arvados user agreement\n\nA user agreement is a collection with terms that users must agree to before\nthey can use this Arvados cluster.",
+ "type": "object",
+ "uuidPrefix": "gv0sa",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this user agreement.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this user agreement was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this user agreement.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this user agreement was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "portable_data_hash": {
+ "description": "The portable data hash of this user agreement. This string provides a unique\nand stable reference to these contents.",
+ "type": "string"
+ },
+ "replication_desired": {
+ "description": "The number of copies that should be made for data in this user agreement.",
+ "type": "integer"
+ },
+ "replication_confirmed_at": {
+ "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this user agreement. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "replication_confirmed": {
+ "description": "The number of copies of data in this user agreement that the cluster has confirmed\nexist in storage.",
+ "type": "integer"
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This user agreement's Arvados UUID, like `zzzzz-gv0sa-12345abcde67890`."
+ },
+ "manifest_text": {
+ "description": "The manifest text that describes how files are constructed from data blocks\nin this user agreement. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n",
+ "type": "text"
+ },
+ "name": {
+ "description": "The name of this user agreement assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this user agreement assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "string"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this user agreement.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
+ },
+ "delete_at": {
+ "description": "The time this user agreement will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "trash_at": {
+ "description": "The time this user agreement will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "description": "A boolean flag to indicate whether or not this user agreement is trashed.",
+ "type": "boolean"
+ },
+ "storage_classes_desired": {
+ "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this user agreement. Storage classes are configured by the cluster administrator.",
+ "type": "Array"
+ },
+ "storage_classes_confirmed": {
+ "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this user agreement's data.",
+ "type": "Array"
+ },
+ "storage_classes_confirmed_at": {
+ "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "current_version_uuid": {
+ "description": "The UUID of the current version of this user agreement.",
+ "type": "string"
+ },
+ "version": {
+ "description": "An integer that counts which version of a user agreement this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n",
+ "type": "integer"
+ },
+ "preserve_version": {
+ "description": "A boolean flag to indicate whether this specific version of this user agreement\nshould be persisted in cluster storage.",
+ "type": "boolean"
+ },
+ "file_count": {
+ "description": "The number of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
+ },
+ "file_size_total": {
+ "description": "The total size in bytes of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
+ }
+ }
+ },
+ "VirtualMachineList": {
+ "id": "VirtualMachineList",
+ "description": "A list of VirtualMachine objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#virtualMachineList.",
+ "default": "arvados#virtualMachineList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching VirtualMachine objects.",
+ "items": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ }
+ },
+ "VirtualMachine": {
+ "id": "VirtualMachine",
+ "description": "Arvados virtual machine (\"shell node\")\n\nThis resource stores information about a virtual machine or \"shell node\"\nhosted on this Arvados cluster where users can log in and use preconfigured\nArvados client tools.",
+ "type": "object",
+ "uuidPrefix": "2x53u",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This virtual machine's Arvados UUID, like `zzzzz-2x53u-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this virtual machine.",
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this virtual machine.",
+ "type": "string"
+ },
+ "modified_at": {
+ "description": "The time this virtual machine was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "hostname": {
+ "description": "The DNS hostname where users should access this virtual machine.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this virtual machine was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ }
+ }
+ },
+ "WorkflowList": {
+ "id": "WorkflowList",
+ "description": "A list of Workflow objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#workflowList.",
+ "default": "arvados#workflowList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Workflow objects.",
+ "items": {
+ "$ref": "Workflow"
+ }
+ }
+ }
+ },
+ "Workflow": {
+ "id": "Workflow",
+ "description": "Arvados workflow\n\nA workflow contains workflow definition source code that Arvados can execute\nalong with associated metadata for users.",
+ "type": "object",
+ "uuidPrefix": "7fd4e",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This workflow's Arvados UUID, like `zzzzz-7fd4e-12345abcde67890`."
+ },
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this workflow.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "The time this workflow was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this workflow was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this workflow.",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name of this workflow assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this workflow assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "text"
+ },
+ "definition": {
+ "description": "A string with the CWL source of this workflow.",
+ "type": "text"
+ },
+ "collection_uuid": {
+ "description": "The collection this workflow is linked to, containing the definition of the workflow.",
+ "type": "string"
+ }
+ }
+ }
+ },
+ "servicePath": "arvados/v1/",
+ "title": "Arvados API",
+ "version": "v1"
+}
\ No newline at end of file
diff --git a/sdk/R/createDoc.R b/sdk/R/createDoc.R
deleted file mode 100644
index 5decab9af3..0000000000
--- a/sdk/R/createDoc.R
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-#Run script with $Rscript createDoc.R input.Rmd output.html
-
-require(knitr) # required for knitting from rmd to md
-require(markdown) # required for md to html
-
-args <- commandArgs(TRUE)
-
-if(length(args) != 2)
- stop("Please provide 2 arguments corresponding to input and output file!")
-
-inputFile <- args[[1]] # .Rmd file
-outputFile <- args[[2]] # .html file
-
-# Create and fill temp .md file from existing .Rmd file
-#tempMdFile <- tempfile("tempREADME", fileext = "md")
-knitr::knit(inputFile, outputFile)
-#knitr::knit(inputFile, tempMdFile)
-
-# Generate HTML from temporary .md file
-#markdown::markdownToHTML(tempMdFile, outputFile)
diff --git a/sdk/R/generateApi.R b/sdk/R/generateApi.R
new file mode 100644
index 0000000000..6fe8c4a100
--- /dev/null
+++ b/sdk/R/generateApi.R
@@ -0,0 +1,657 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+library(jsonlite)
+
+getAPIDocument <- function(loc)
+{
+ if (length(grep("^[a-z]+://", loc)) > 0) {
+ library(httr)
+ serverResponse <- httr::RETRY("GET", url = loc)
+ httr::content(serverResponse, as = "parsed", type = "application/json")
+ } else {
+ jsonlite::read_json(loc)
+ }
+}
+
+#' generateAPI
+#'
+#' Autogenerate classes to interact with Arvados from the Arvados discovery document.
+#'
+#' @export
+generateAPI <- function(discoveryDocument)
+{
+ methodResources <- discoveryDocument$resources
+ resourceNames <- names(methodResources)
+
+ classDoc <- genAPIClassDoc(methodResources, resourceNames)
+ arvadosAPIHeader <- genAPIClassHeader()
+ arvadosClassMethods <- genClassContent(methodResources, resourceNames)
+ arvadosProjectMethods <- genProjectMethods(methodResources)
+ arvadosAPIFooter <- genAPIClassFooter()
+
+ arvadosClass <- c(classDoc,
+ arvadosAPIHeader,
+ arvadosClassMethods,
+ arvadosProjectMethods,
+ arvadosAPIFooter)
+
+ fileConn <- file("./R/Arvados.R", "w")
+ writeLines(c(
+ "# Copyright (C) The Arvados Authors. All rights reserved.",
+ "#",
+ "# SPDX-License-Identifier: Apache-2.0",
+ "",
+ "#' Arvados",
+ "#'",
+ "#' This class implements a full REST client to the Arvados API.",
+ "#'"), fileConn)
+ writeLines(unlist(arvadosClass), fileConn)
+ close(fileConn)
+ NULL
+}
+
+genAPIClassHeader <- function()
+{
+ c("#' @export",
+ "Arvados <- R6::R6Class(",
+ "",
+ "\t\"Arvados\",",
+ "",
+ "\tpublic = list(",
+ "",
+ "\t\t#' @description Create a new Arvados API client.",
+ "\t\t#' @param authToken Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.",
+ "\t\t#' @param hostName Host name. If not specified ARVADOS_API_HOST environment variable will be used.",
+ "\t\t#' @param numRetries Number which specifies how many times to retry failed service requests.",
+ "\t\t#' @return A new `Arvados` object.",
+ "\t\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)",
+ "\t\t{",
+ "\t\t\tif(!is.null(hostName))",
+ "\t\t\t\tSys.setenv(ARVADOS_API_HOST = hostName)",
+ "",
+ "\t\t\tif(!is.null(authToken))",
+ "\t\t\t\tSys.setenv(ARVADOS_API_TOKEN = authToken)",
+ "",
+ "\t\t\thostName <- Sys.getenv(\"ARVADOS_API_HOST\")",
+ "\t\t\ttoken <- Sys.getenv(\"ARVADOS_API_TOKEN\")",
+ "",
+ "\t\t\tif(hostName == \"\" | token == \"\")",
+ "\t\t\t\tstop(paste(\"Please provide host name and authentification token\",",
+ "\t\t\t\t\t\t \"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\",",
+ "\t\t\t\t\t\t \"environment variables.\"))",
+ "",
+ "\t\t\tprivate$token <- token",
+ "\t\t\tprivate$host <- paste0(\"https://\", hostName, \"/arvados/v1/\")",
+ "\t\t\tprivate$numRetries <- numRetries",
+ "\t\t\tprivate$REST <- RESTService$new(token, hostName,",
+ "\t\t\t HttpRequest$new(), HttpParser$new(),",
+ "\t\t\t numRetries)",
+ "",
+ "\t\t},\n")
+}
+
+genProjectMethods <- function(methodResources)
+{
+ toCallArg <- function(arg) {
+ callArg <- strsplit(arg, " *=")[[1]][1]
+ paste(callArg, callArg, sep=" = ")
+ }
+ toCallArgs <- function(argList) {
+ paste0(Map(toCallArg, argList), collapse=", ")
+ }
+ groupsMethods <- methodResources[["groups"]][["methods"]]
+ getArgs <- getMethodArguments(groupsMethods[["get"]])
+ createArgs <- getMethodArguments(groupsMethods[["create"]])
+ updateArgs <- getMethodArguments(groupsMethods[["update"]])
+ listArgs <- getMethodArguments(groupsMethods[["list"]])
+ deleteArgs <- getMethodArguments(groupsMethods[["delete"]])
+
+ c("\t\t#' @description An alias for `groups_get`.",
+ getMethodParams(groupsMethods[["get"]]),
+ "\t\t#' @return A Group object.",
+ getMethodSignature("project_get", getArgs),
+ "\t\t{",
+ paste("\t\t\tself$groups_get(", toCallArgs(getArgs), ")", sep=""),
+ "\t\t},",
+ "",
+ "\t\t#' @description A wrapper for `groups_create` that sets `group_class=\"project\"`.",
+ getMethodParams(groupsMethods[["create"]]),
+ "\t\t#' @return A Group object.",
+ getMethodSignature("project_create", createArgs),
+ "\t\t{",
+ "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+ paste("\t\t\tself$groups_create(", toCallArgs(createArgs), ")", sep=""),
+ "\t\t},",
+ "",
+ "\t\t#' @description A wrapper for `groups_update` that sets `group_class=\"project\"`.",
+ getMethodParams(groupsMethods[["update"]]),
+ "\t\t#' @return A Group object.",
+ getMethodSignature("project_update", updateArgs),
+ "\t\t{",
+ "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+ paste("\t\t\tself$groups_update(", toCallArgs(updateArgs), ")", sep=""),
+ "\t\t},",
+ "",
+ "\t\t#' @description A wrapper for `groups_list` that adds a filter for `group_class=\"project\"`.",
+ getMethodParams(groupsMethods[["list"]]),
+ "\t\t#' @return A GroupList object.",
+ getMethodSignature("project_list", listArgs),
+ "\t\t{",
+ "\t\t\tfilters[[length(filters) + 1]] <- list(\"group_class\", \"=\", \"project\")",
+ paste("\t\t\tself$groups_list(", toCallArgs(listArgs), ")", sep=""),
+ "\t\t},",
+ "",
+ "\t\t#' @description An alias for `groups_delete`.",
+ getMethodParams(groupsMethods[["delete"]]),
+ "\t\t#' @return A Group object.",
+ getMethodSignature("project_delete", deleteArgs),
+ "\t\t{",
+ paste("\t\t\tself$groups_delete(", toCallArgs(deleteArgs), ")", sep=""),
+ "\t\t},",
+ "",
+ "\t\t#' @description Test whether or not a project exists.",
+ getMethodParams(groupsMethods[["get"]]),
+ getMethodSignature("project_exist", getArgs),
+ "\t\t{",
+ paste("\t\t\tresult <- try(self$groups_get(", toCallArgs(getArgs), "))", sep=""),
+ "\t\t\tif(inherits(result, \"try-error\"))",
+ "\t\t\t\texists <- FALSE",
+ "\t\t\telse",
+ "\t\t\t\texists <- result['group_class'] == \"project\"",
+ "\t\t\tcat(format(exists))",
+ "\t\t},",
+ "",
+ "\t\t#' @description A convenience wrapper for `project_update` to set project metadata properties.",
+ "\t\t#' @param listProperties List of new properties.",
+ "\t\t#' @param uuid UUID of the project to update.",
+ "\t\t#' @return A Group object.",
+ "\t\tproject_properties_set = function(listProperties, uuid)",
+ "\t\t{",
+ "\t\t\tself$project_update(list(\"properties\" = listProperties), uuid)",
+ "\t\t},",
+ "",
+ "\t\t#' @description Get a project and update it with additional properties.",
+ "\t\t#' @param properties List of new properties.",
+ "\t\t#' @param uuid UUID of the project to update.",
+ "\t\t#' @return A Group object.",
+ "\t\tproject_properties_append = function(properties, uuid)",
+ "\t\t{",
+ "\t\t\tproj <- private$get_project_by_list(uuid, list('uuid', 'properties'))",
+ "\t\t\tnewListOfProperties <- c(proj$properties, properties)",
+ "\t\t\tuniqueProperties <- unique(unlist(newListOfProperties))",
+ "\t\t\tnewProperties <- suppressWarnings(newListOfProperties[which(newListOfProperties == uniqueProperties)])",
+ "\t\t\tself$project_properties_set(newProperties, proj$uuid)",
+ "\t\t},",
+ "",
+ "\t\t#' @description Get properties of a project.",
+ "\t\t#' @param uuid The UUID of the project to query.",
+ "\t\tproject_properties_get = function(uuid)",
+ "\t\t{",
+ "\t\t\tprivate$get_project_by_list(uuid, list('uuid', 'properties'))$properties",
+ "\t\t},",
+ "",
+ "\t\t#' @description Delete one property from a project by name.",
+ "\t\t#' @param oneProp Name of the property to delete.",
+ "\t\t#' @param uuid The UUID of the project to update.",
+ "\t\t#' @return A Group object.",
+ "\t\tproject_properties_delete = function(oneProp, uuid)",
+ "\t\t{",
+ "\t\t\tprojProp <- self$project_properties_get(uuid)",
+ "\t\t\tprojProp[[oneProp]] <- NULL",
+ "\t\t\tself$project_properties_set(projProp, uuid)",
+ "\t\t},",
+ "",
+ "\t\t#' @description Convenience wrapper of `links_list` to create a permission link.",
+ "\t\t#' @param type The type of permission: one of `'can_read'`, `'can_write'`, or `'can_manage'`.",
+ "\t\t#' @param uuid The UUID of the object to grant permission to.",
+ "\t\t#' @param user The UUID of the user or group who receives this permission.",
+ "\t\t#' @return A Link object if one was updated, else NULL.",
+ "\t\tproject_permission_give = function(type, uuid, user)",
+ "\t\t{",
+ "\t\t\tlink <- list(",
+ "\t\t\t\t'link_class' = 'permission',",
+ "\t\t\t\t'name' = type,",
+ "\t\t\t\t'head_uuid' = uuid,",
+ "\t\t\t\t'tail_uuid' = user)",
+ "\t\t\tself$links_create(link)",
+ "\t\t},",
+ "",
+ "\t\t#' @description Find an existing permission link and update its level.",
+ "\t\t#' @param typeOld The type of permission to find: one of `'can_read'`, `'can_write'`, or `'can_manage'`.",
+ "\t\t#' @param typeNew The type of permission to set: one of `'can_read'`, `'can_write'`, or `'can_manage'`.",
+ "\t\t#' @param uuid The UUID of the object to grant permission to.",
+ "\t\t#' @param user The UUID of the user or group who receives this permission.",
+ "\t\t#' @return A Link object if one was updated, else NULL.",
+ "\t\tproject_permission_update = function(typeOld, typeNew, uuid, user)",
+ "\t\t{",
+ "\t\t\tlinks <- self$links_list(filters = list(",
+ "\t\t\t\t\tlist('link_class', '=', 'permission'),",
+ "\t\t\t\t\tlist('name', '=', typeOld),",
+ "\t\t\t\t\tlist('head_uuid', '=', uuid),",
+ "\t\t\t\t\tlist('tail_uuid', '=', user)",
+ "\t\t\t\t), select=list('uuid'), count = 'none')$items",
+ "\t\t\tif (length(links) == 0) {",
+ "\t\t\t\tcat(format('No permission granted'))",
+ "\t\t\t} else {",
+ "\t\t\t\tself$links_update(list('name' = typeNew), links[[1]]$uuid)",
+ "\t\t\t}",
+ "\t\t},",
+ "",
+ "\t\t#' @description Delete an existing permission link.",
+ "\t\t#' @param type The type of permission to delete: one of `'can_read'`, `'can_write'`, or `'can_manage'`.",
+ "\t\t#' @param uuid The UUID of the object to grant permission to.",
+ "\t\t#' @param user The UUID of the user or group who receives this permission.",
+ "\t\t#' @return A Link object if one was deleted, else NULL.",
+ "\t\tproject_permission_delete = function(type, uuid, user)",
+ "\t\t{",
+ "\t\t\tlinks <- self$links_list(filters = list(",
+ "\t\t\t\t\tlist('link_class', '=', 'permission'),",
+ "\t\t\t\t\tlist('name', '=', type),",
+ "\t\t\t\t\tlist('head_uuid', '=', uuid),",
+ "\t\t\t\t\tlist('tail_uuid', '=', user)",
+ "\t\t\t\t), select=list('uuid'), count = 'none')$items",
+ "\t\t\tif (length(links) == 0) {",
+ "\t\t\t\tcat(format('No permission granted'))",
+ "\t\t\t} else {",
+ "\t\t\t\tself$links_delete(links[[1]]$uuid)",
+ "\t\t\t}",
+ "\t\t},",
+ "",
+ "\t\t#' @description Check for an existing permission link.",
+ "\t\t#' @param type The type of permission to check: one of `'can_read'`, `'can_write'`, `'can_manage'`, or `NULL` (the default).",
+ "\t\t#' @param uuid The UUID of the object to check permission on.",
+ "\t\t#' @param user The UUID of the user or group to check permission for.",
+ "\t\t#' @return If `type` is `NULL`, the list of matching permission links.",
+ "\t\t#' Otherwise, prints and invisibly returns the level of the found permission link.",
+ "\t\tproject_permission_check = function(uuid, user, type = NULL)",
+ "\t\t{",
+ "\t\t\tfilters <- list(",
+ "\t\t\t\tlist('link_class', '=', 'permission'),",
+ "\t\t\t\tlist('head_uuid', '=', uuid),",
+ "\t\t\t\tlist('tail_uuid', '=', user))",
+ "\t\t\tif (!is.null(type)) {",
+ "\t\t\t\tfilters <- c(filters, list(list('name', '=', type)))",
+ "\t\t\t}",
+ "\t\t\tlinks <- self$links_list(filters = filters, count='none')$items",
+ "\t\t\tif (is.null(type)) {",
+ "\t\t\t\tlinks",
+ "\t\t\t} else {",
+ "\t\t\t\tprint(links[[1]]$name)",
+ "\t\t\t}",
+ "\t\t},",
+ "")
+}
+
+genClassContent <- function(methodResources, resourceNames)
+{
+ arvadosMethods <- Map(function(resource, resourceName)
+ {
+ methodNames <- names(resource$methods)
+
+ functions <- Map(function(methodMetaData, methodName)
+ {
+ #NOTE: Index, show and destroy are aliases for the preferred names
+ # "list", "get" and "delete". Until they are removed from discovery
+ # document we will filter them here.
+ if(methodName %in% c("index", "show", "destroy"))
+ return(NULL)
+
+ methodName <- paste0(resourceName, "_", methodName)
+ unlist(c(
+ getMethodDoc(methodName, methodMetaData),
+ createMethod(methodName, methodMetaData)
+ ))
+
+ }, resource$methods, methodNames)
+
+ unlist(unname(functions))
+
+ }, methodResources, resourceNames)
+
+ arvadosMethods
+}
+
+genAPIClassFooter <- function()
+{
+ c("\t\t#' @description Return the host name of this client's Arvados API server.",
+ "\t\t#' @return Hostname string.",
+ "\t\tgetHostName = function() private$host,",
+ "",
+ "\t\t#' @description Return the Arvados API token used by this client.",
+ "\t\t#' @return API token string.",
+ "\t\tgetToken = function() private$token,",
+ "",
+ "\t\t#' @description Set the RESTService object used by this client.",
+ "\t\tsetRESTService = function(newREST) private$REST <- newREST,",
+ "",
+ "\t\t#' @description Return the RESTService object used by this client.",
+ "\t\t#' @return RESTService object.",
+ "\t\tgetRESTService = function() private$REST",
+ "\t),",
+ "",
+ "\tprivate = list(",
+ "\t\ttoken = NULL,",
+ "\t\thost = NULL,",
+ "\t\tREST = NULL,",
+ "\t\tnumRetries = NULL,",
+ "\t\tget_project_by_list = function(uuid, select = NULL)",
+ "\t\t{",
+ "\t\t\tself$groups_list(",
+ "\t\t\t\tfilters = list(list('uuid', '=', uuid), list('group_class', '=', 'project')),",
+ "\t\t\t\tselect = select,",
+ "\t\t\t\tcount = 'none'",
+ "\t\t\t)$items[[1]]",
+ "\t\t}",
+ "\t),",
+ "",
+ "\tcloneable = FALSE",
+ ")")
+}
+
+createMethod <- function(name, methodMetaData)
+{
+ args <- getMethodArguments(methodMetaData)
+ signature <- getMethodSignature(name, args)
+ body <- getMethodBody(methodMetaData)
+
+ c(signature,
+ "\t\t{",
+ body,
+ "\t\t},\n")
+}
+
+normalizeParamName <- function(name)
+{
+ # Downcase the first letter
+ name <- sub("^(\\w)", "\\L\\1", name, perl=TRUE)
+ # Convert snake_case to camelCase
+ gsub("_(uuid\\b|id\\b|\\w)", "\\U\\1", name, perl=TRUE)
+}
+
+getMethodArguments <- function(methodMetaData)
+{
+ request <- methodMetaData$request
+ requestArgs <- NULL
+
+ if(!is.null(request))
+ {
+ resourceName <- normalizeParamName(request$properties[[1]][[1]])
+
+ if(request$required)
+ requestArgs <- resourceName
+ else
+ requestArgs <- paste(resourceName, "=", "NULL")
+ }
+
+ argNames <- names(methodMetaData$parameters)
+
+ args <- sapply(argNames, function(argName)
+ {
+ arg <- methodMetaData$parameters[[argName]]
+ argName <- normalizeParamName(argName)
+
+ if(!arg$required)
+ {
+ return(paste(argName, "=", "NULL"))
+ }
+
+ argName
+ })
+
+ c(requestArgs, args)
+}
+
+getMethodSignature <- function(methodName, args)
+{
+ collapsedArgs <- paste0(args, collapse = ", ")
+ lineLengthLimit <- 40
+
+ if(nchar(collapsedArgs) > lineLengthLimit)
+ {
+ return(paste0("\t\t",
+ formatArgs(paste(methodName, "= function("),
+ "\t", args, ")", lineLengthLimit)))
+ }
+ else
+ {
+ return(paste0("\t\t", methodName, " = function(", collapsedArgs, ")"))
+ }
+}
+
+getMethodBody <- function(methodMetaData)
+{
+ url <- getRequestURL(methodMetaData)
+ headers <- getRequestHeaders()
+ requestQueryList <- getRequestQueryList(methodMetaData)
+ requestBody <- getRequestBody(methodMetaData)
+ request <- getRequest(methodMetaData)
+ response <- getResponse(methodMetaData)
+ errorCheck <- getErrorCheckingCode(methodMetaData)
+ returnStatement <- getReturnObject()
+
+ body <- c(url,
+ headers,
+ requestQueryList, "",
+ requestBody, "",
+ request, response, "",
+ errorCheck, "",
+ returnStatement)
+
+ paste0("\t\t\t", body)
+}
+
+getRequestURL <- function(methodMetaData)
+{
+ endPoint <- methodMetaData$path
+ endPoint <- stringr::str_replace_all(endPoint, "\\{", "${")
+ url <- c(paste0("endPoint <- stringr::str_interp(\"", endPoint, "\")"),
+ paste0("url <- paste0(private$host, endPoint)"))
+ url
+}
+
+getRequestHeaders <- function()
+{
+ c("headers <- list(Authorization = paste(\"Bearer\", private$token), ",
+ " \"Content-Type\" = \"application/json\")")
+}
+
+getRequestQueryList <- function(methodMetaData)
+{
+ queryArgs <- names(Filter(function(arg) arg$location == "query",
+ methodMetaData$parameters))
+
+ if(length(queryArgs) == 0)
+ return("queryArgs <- NULL")
+
+ queryArgs <- sapply(queryArgs, function(arg) {
+ arg <- normalizeParamName(arg)
+ paste(arg, "=", arg)
+ })
+ collapsedArgs <- paste0(queryArgs, collapse = ", ")
+
+ lineLengthLimit <- 40
+
+ if(nchar(collapsedArgs) > lineLengthLimit)
+ return(formatArgs("queryArgs <- list(", "\t\t\t\t ", queryArgs, ")",
+ lineLengthLimit))
+ else
+ return(paste0("queryArgs <- list(", collapsedArgs, ")"))
+}
+
+getRequestBody <- function(methodMetaData)
+{
+ request <- methodMetaData$request
+
+ if(is.null(request) || !request$required)
+ return("body <- NULL")
+
+ resourceName <- normalizeParamName(request$properties[[1]][[1]])
+
+ requestParameterName <- names(request$properties)[1]
+
+ c(paste0("if(length(", resourceName, ") > 0)"),
+ paste0("\tbody <- jsonlite::toJSON(list(", resourceName, " = ", resourceName, "), "),
+ "\t auto_unbox = TRUE)",
+ "else",
+ "\tbody <- NULL")
+}
+
+getRequest <- function(methodMetaData)
+{
+ method <- methodMetaData$httpMethod
+ c(paste0("response <- private$REST$http$exec(\"", method, "\", url, headers, body,"),
+ " queryArgs, private$numRetries)")
+}
+
+getResponse <- function(methodMetaData)
+{
+ "resource <- private$REST$httpParser$parseJSONResponse(response)"
+}
+
+getErrorCheckingCode <- function(methodMetaData)
+{
+ if ("ensure_unique_name" %in% names(methodMetaData$parameters)) {
+ body <- c("\tif (identical(sub('Entity:.*', '', resource$errors), '//railsapi.internal/arvados/v1/collections: 422 Unprocessable ')) {",
+ "\t\tresource <- cat(format('An object with the given name already exists with this owner. If you want to update it use the update method instead'))",
+ "\t} else {",
+ "\t\tstop(resource$errors)",
+ "\t}")
+ } else {
+ body <- "\tstop(resource$errors)"
+ }
+ c("if(!is.null(resource$errors)) {", body, "}")
+}
+
+getReturnObject <- function()
+{
+ "resource"
+}
+
+genAPIClassDoc <- function(methodResources, resourceNames)
+{
+ c("#' @examples",
+ "#' \\dontrun{",
+ "#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")",
+ "#'",
+ "#' collection <- arv$collections.get(\"uuid\")",
+ "#'",
+ "#' collectionList <- arv$collections.list(list(list(\"name\", \"like\", \"Test%\")))",
+ "#' collectionList <- listAll(arv$collections.list, list(list(\"name\", \"like\", \"Test%\")))",
+ "#'",
+ "#' deletedCollection <- arv$collections.delete(\"uuid\")",
+ "#'",
+ "#' updatedCollection <- arv$collections.update(list(name = \"New name\", description = \"New description\"),",
+ "#' \"uuid\")",
+ "#'",
+ "#' createdCollection <- arv$collections.create(list(name = \"Example\",",
+ "#' description = \"This is a test collection\"))",
+ "#' }",
+ "")
+}
+
+getAPIClassMethodList <- function(methodResources, resourceNames)
+{
+ methodList <- unlist(unname(Map(function(resource, resourceName)
+ {
+ methodNames <- names(resource$methods)
+ paste0(resourceName,
+ ".",
+ methodNames[!(methodNames %in% c("index", "show", "destroy"))])
+
+ }, methodResources, resourceNames)))
+
+ hardcodedMethods <- c("projects.create", "projects.get",
+ "projects.list", "projects.update", "projects.delete")
+ paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}")
+}
+
+getMethodDoc <- function(methodName, methodMetaData)
+{
+ description <- paste("\t\t#' @description", gsub("\n", "\n\t\t#' ", methodMetaData$description))
+ params <- getMethodParams(methodMetaData)
+ returnValue <- paste("\t\t#' @return", methodMetaData$response[["$ref"]], "object.")
+
+ c(description, params, returnValue)
+}
+
+getMethodParams <- function(methodMetaData)
+{
+ request <- methodMetaData$request
+ requestDoc <- NULL
+
+ if(!is.null(request))
+ {
+ requestDoc <- unname(unlist(sapply(request$properties, function(prop)
+ {
+ className <- sapply(prop, function(ref) ref)
+ objectName <- normalizeParamName(className)
+ paste("\t\t#' @param", objectName, className, "object.")
+ })))
+ }
+
+ argNames <- names(methodMetaData$parameters)
+
+ argsDoc <- unname(unlist(sapply(argNames, function(argName)
+ {
+ arg <- methodMetaData$parameters[[argName]]
+ paste("\t\t#' @param",
+ normalizeParamName(argName),
+ gsub("\n", "\n\t\t#' ", arg$description)
+ )
+ })))
+
+ c(requestDoc, argsDoc)
+}
+
+#NOTE: Utility functions:
+
+# This function is used to split very long lines of code into smaller chunks.
+# This is usually the case when we pass a lot of named argumets to a function.
+formatArgs <- function(prependAtStart, prependToEachSplit,
+ args, appendAtEnd, lineLength)
+{
+ if(length(args) > 1)
+ {
+ args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",")
+ }
+
+ args[1] <- paste0(prependAtStart, args[1])
+ args[length(args)] <- paste0(args[length(args)], appendAtEnd)
+
+ argsLength <- length(args)
+ argLines <- list()
+ index <- 1
+
+ while(index <= argsLength)
+ {
+ line <- args[index]
+ index <- index + 1
+
+ while(nchar(line) < lineLength && index <= argsLength)
+ {
+ line <- paste(line, args[index])
+ index <- index + 1
+ }
+
+ argLines <- c(argLines, line)
+ }
+
+ argLines <- unlist(argLines)
+ argLinesLen <- length(argLines)
+
+ if(argLinesLen > 1)
+ argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen])
+
+ argLines
+}
+
+args <- commandArgs(TRUE)
+if (length(args) == 0) {
+ loc <- "arvados-v1-discovery.json"
+} else {
+ loc <- args[[1]]
+}
+discoveryDocument <- getAPIDocument(loc)
+generateAPI(discoveryDocument)
diff --git a/sdk/R/man/Arvados.Rd b/sdk/R/man/Arvados.Rd
deleted file mode 100644
index 924bfeae9b..0000000000
--- a/sdk/R/man/Arvados.Rd
+++ /dev/null
@@ -1,3026 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{Arvados}
-\alias{Arvados}
-\title{R6 Class Representing a Arvados}
-\description{
-Arvados class gives users ability to access Arvados REST API. It also allowes user to manipulate collections (and projects?)
-}
-\examples{
-
-## ------------------------------------------------
-## Method `Arvados$new`
-## ------------------------------------------------
-
-arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
-
-## ------------------------------------------------
-## Method `Arvados$project_exist`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_exist(uuid = "projectUUID")
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_get`
-## ------------------------------------------------
-
-\dontrun{
-project <- arv$project_get(uuid = 'projectUUID')
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_create`
-## ------------------------------------------------
-
-\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_properties_set`
-## ------------------------------------------------
-
-\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$project_properties_set(Properties, uuid)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_properties_append`
-## ------------------------------------------------
-
-\dontrun{
-newProperties <- list() # should contain a list of new properties to be added
-arv$project_properties_append(properties = newProperties, uuid)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_properties_get`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_properties_get(projectUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_properties_delete`
-## ------------------------------------------------
-
-\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$project_properties_delete(Properties, projectUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_update`
-## ------------------------------------------------
-
-\dontrun{
-newProperties <- list() # should contain a list of new properties to be added
-arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_list`
-## ------------------------------------------------
-
-\dontrun{
-listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_delete`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_delete(uuid = 'projectUUID')
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_get`
-## ------------------------------------------------
-
-\dontrun{
-collection <- arv$collections_get(uuid = collectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_create`
-## ------------------------------------------------
-
-\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_update`
-## ------------------------------------------------
-
-\dontrun{
-collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_delete`
-## ------------------------------------------------
-
-\dontrun{
-arv$collection_delete(collectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_provenance`
-## ------------------------------------------------
-
-\dontrun{
-collection <- arv$collections_provenance(collectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_trash`
-## ------------------------------------------------
-
-\dontrun{
-arv$collections_trash(collectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_untrash`
-## ------------------------------------------------
-
-\dontrun{
-arv$collections_untrash(collectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$collections_list`
-## ------------------------------------------------
-
-\dontrun{
-collectionList <- arv$collections_list(list(list("name", "=", "Example")))
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_permission_give`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_permission_refuse`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_permission_update`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
-}
-
-## ------------------------------------------------
-## Method `Arvados$project_permission_check`
-## ------------------------------------------------
-
-\dontrun{
-arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-}
-\section{Methods}{
-\subsection{Public methods}{
-\itemize{
-\item \href{#method-Arvados-new}{\code{Arvados$new()}}
-\item \href{#method-Arvados-project_exist}{\code{Arvados$project_exist()}}
-\item \href{#method-Arvados-project_get}{\code{Arvados$project_get()}}
-\item \href{#method-Arvados-project_create}{\code{Arvados$project_create()}}
-\item \href{#method-Arvados-project_properties_set}{\code{Arvados$project_properties_set()}}
-\item \href{#method-Arvados-project_properties_append}{\code{Arvados$project_properties_append()}}
-\item \href{#method-Arvados-project_properties_get}{\code{Arvados$project_properties_get()}}
-\item \href{#method-Arvados-project_properties_delete}{\code{Arvados$project_properties_delete()}}
-\item \href{#method-Arvados-project_update}{\code{Arvados$project_update()}}
-\item \href{#method-Arvados-project_list}{\code{Arvados$project_list()}}
-\item \href{#method-Arvados-project_delete}{\code{Arvados$project_delete()}}
-\item \href{#method-Arvados-api_clients_get}{\code{Arvados$api_clients_get()}}
-\item \href{#method-Arvados-api_clients_create}{\code{Arvados$api_clients_create()}}
-\item \href{#method-Arvados-api_clients_update}{\code{Arvados$api_clients_update()}}
-\item \href{#method-Arvados-api_clients_delete}{\code{Arvados$api_clients_delete()}}
-\item \href{#method-Arvados-api_clients_list}{\code{Arvados$api_clients_list()}}
-\item \href{#method-Arvados-api_client_authorizations_get}{\code{Arvados$api_client_authorizations_get()}}
-\item \href{#method-Arvados-api_client_authorizations_create}{\code{Arvados$api_client_authorizations_create()}}
-\item \href{#method-Arvados-api_client_authorizations_update}{\code{Arvados$api_client_authorizations_update()}}
-\item \href{#method-Arvados-api_client_authorizations_delete}{\code{Arvados$api_client_authorizations_delete()}}
-\item \href{#method-Arvados-api_client_authorizations_create_system_auth}{\code{Arvados$api_client_authorizations_create_system_auth()}}
-\item \href{#method-Arvados-api_client_authorizations_current}{\code{Arvados$api_client_authorizations_current()}}
-\item \href{#method-Arvados-api_client_authorizations_list}{\code{Arvados$api_client_authorizations_list()}}
-\item \href{#method-Arvados-authorized_keys_get}{\code{Arvados$authorized_keys_get()}}
-\item \href{#method-Arvados-authorized_keys_create}{\code{Arvados$authorized_keys_create()}}
-\item \href{#method-Arvados-authorized_keys_update}{\code{Arvados$authorized_keys_update()}}
-\item \href{#method-Arvados-authorized_keys_delete}{\code{Arvados$authorized_keys_delete()}}
-\item \href{#method-Arvados-authorized_keys_list}{\code{Arvados$authorized_keys_list()}}
-\item \href{#method-Arvados-collections_get}{\code{Arvados$collections_get()}}
-\item \href{#method-Arvados-collections_create}{\code{Arvados$collections_create()}}
-\item \href{#method-Arvados-collections_update}{\code{Arvados$collections_update()}}
-\item \href{#method-Arvados-collections_delete}{\code{Arvados$collections_delete()}}
-\item \href{#method-Arvados-collections_provenance}{\code{Arvados$collections_provenance()}}
-\item \href{#method-Arvados-collections_used_by}{\code{Arvados$collections_used_by()}}
-\item \href{#method-Arvados-collections_trash}{\code{Arvados$collections_trash()}}
-\item \href{#method-Arvados-collections_untrash}{\code{Arvados$collections_untrash()}}
-\item \href{#method-Arvados-collections_list}{\code{Arvados$collections_list()}}
-\item \href{#method-Arvados-containers_get}{\code{Arvados$containers_get()}}
-\item \href{#method-Arvados-containers_create}{\code{Arvados$containers_create()}}
-\item \href{#method-Arvados-containers_update}{\code{Arvados$containers_update()}}
-\item \href{#method-Arvados-containers_delete}{\code{Arvados$containers_delete()}}
-\item \href{#method-Arvados-containers_auth}{\code{Arvados$containers_auth()}}
-\item \href{#method-Arvados-containers_lock}{\code{Arvados$containers_lock()}}
-\item \href{#method-Arvados-containers_unlock}{\code{Arvados$containers_unlock()}}
-\item \href{#method-Arvados-containers_secret_mounts}{\code{Arvados$containers_secret_mounts()}}
-\item \href{#method-Arvados-containers_current}{\code{Arvados$containers_current()}}
-\item \href{#method-Arvados-containers_list}{\code{Arvados$containers_list()}}
-\item \href{#method-Arvados-container_requests_get}{\code{Arvados$container_requests_get()}}
-\item \href{#method-Arvados-container_requests_create}{\code{Arvados$container_requests_create()}}
-\item \href{#method-Arvados-container_requests_update}{\code{Arvados$container_requests_update()}}
-\item \href{#method-Arvados-container_requests_delete}{\code{Arvados$container_requests_delete()}}
-\item \href{#method-Arvados-container_requests_list}{\code{Arvados$container_requests_list()}}
-\item \href{#method-Arvados-groups_get}{\code{Arvados$groups_get()}}
-\item \href{#method-Arvados-groups_create}{\code{Arvados$groups_create()}}
-\item \href{#method-Arvados-groups_update}{\code{Arvados$groups_update()}}
-\item \href{#method-Arvados-groups_delete}{\code{Arvados$groups_delete()}}
-\item \href{#method-Arvados-groups_contents}{\code{Arvados$groups_contents()}}
-\item \href{#method-Arvados-groups_shared}{\code{Arvados$groups_shared()}}
-\item \href{#method-Arvados-groups_trash}{\code{Arvados$groups_trash()}}
-\item \href{#method-Arvados-groups_untrash}{\code{Arvados$groups_untrash()}}
-\item \href{#method-Arvados-groups_list}{\code{Arvados$groups_list()}}
-\item \href{#method-Arvados-keep_services_get}{\code{Arvados$keep_services_get()}}
-\item \href{#method-Arvados-keep_services_create}{\code{Arvados$keep_services_create()}}
-\item \href{#method-Arvados-keep_services_update}{\code{Arvados$keep_services_update()}}
-\item \href{#method-Arvados-keep_services_delete}{\code{Arvados$keep_services_delete()}}
-\item \href{#method-Arvados-keep_services_accessible}{\code{Arvados$keep_services_accessible()}}
-\item \href{#method-Arvados-keep_services_list}{\code{Arvados$keep_services_list()}}
-\item \href{#method-Arvados-project_permission_give}{\code{Arvados$project_permission_give()}}
-\item \href{#method-Arvados-project_permission_refuse}{\code{Arvados$project_permission_refuse()}}
-\item \href{#method-Arvados-project_permission_update}{\code{Arvados$project_permission_update()}}
-\item \href{#method-Arvados-project_permission_check}{\code{Arvados$project_permission_check()}}
-\item \href{#method-Arvados-links_get}{\code{Arvados$links_get()}}
-\item \href{#method-Arvados-links_create}{\code{Arvados$links_create()}}
-\item \href{#method-Arvados-links_update}{\code{Arvados$links_update()}}
-\item \href{#method-Arvados-links_delete}{\code{Arvados$links_delete()}}
-\item \href{#method-Arvados-links_list}{\code{Arvados$links_list()}}
-\item \href{#method-Arvados-links_get_permissions}{\code{Arvados$links_get_permissions()}}
-\item \href{#method-Arvados-logs_get}{\code{Arvados$logs_get()}}
-\item \href{#method-Arvados-logs_create}{\code{Arvados$logs_create()}}
-\item \href{#method-Arvados-logs_update}{\code{Arvados$logs_update()}}
-\item \href{#method-Arvados-logs_delete}{\code{Arvados$logs_delete()}}
-\item \href{#method-Arvados-logs_list}{\code{Arvados$logs_list()}}
-\item \href{#method-Arvados-users_get}{\code{Arvados$users_get()}}
-\item \href{#method-Arvados-users_create}{\code{Arvados$users_create()}}
-\item \href{#method-Arvados-users_update}{\code{Arvados$users_update()}}
-\item \href{#method-Arvados-users_delete}{\code{Arvados$users_delete()}}
-\item \href{#method-Arvados-users_current}{\code{Arvados$users_current()}}
-\item \href{#method-Arvados-users_system}{\code{Arvados$users_system()}}
-\item \href{#method-Arvados-users_activate}{\code{Arvados$users_activate()}}
-\item \href{#method-Arvados-users_setup}{\code{Arvados$users_setup()}}
-\item \href{#method-Arvados-users_unsetup}{\code{Arvados$users_unsetup()}}
-\item \href{#method-Arvados-users_merge}{\code{Arvados$users_merge()}}
-\item \href{#method-Arvados-users_list}{\code{Arvados$users_list()}}
-\item \href{#method-Arvados-repositories_get}{\code{Arvados$repositories_get()}}
-\item \href{#method-Arvados-repositories_create}{\code{Arvados$repositories_create()}}
-\item \href{#method-Arvados-repositories_update}{\code{Arvados$repositories_update()}}
-\item \href{#method-Arvados-repositories_delete}{\code{Arvados$repositories_delete()}}
-\item \href{#method-Arvados-repositories_get_all_permissions}{\code{Arvados$repositories_get_all_permissions()}}
-\item \href{#method-Arvados-repositories_list}{\code{Arvados$repositories_list()}}
-\item \href{#method-Arvados-virtual_machines_get}{\code{Arvados$virtual_machines_get()}}
-\item \href{#method-Arvados-virtual_machines_create}{\code{Arvados$virtual_machines_create()}}
-\item \href{#method-Arvados-virtual_machines_update}{\code{Arvados$virtual_machines_update()}}
-\item \href{#method-Arvados-virtual_machines_delete}{\code{Arvados$virtual_machines_delete()}}
-\item \href{#method-Arvados-virtual_machines_logins}{\code{Arvados$virtual_machines_logins()}}
-\item \href{#method-Arvados-virtual_machines_get_all_logins}{\code{Arvados$virtual_machines_get_all_logins()}}
-\item \href{#method-Arvados-virtual_machines_list}{\code{Arvados$virtual_machines_list()}}
-\item \href{#method-Arvados-workflows_get}{\code{Arvados$workflows_get()}}
-\item \href{#method-Arvados-workflows_create}{\code{Arvados$workflows_create()}}
-\item \href{#method-Arvados-workflows_update}{\code{Arvados$workflows_update()}}
-\item \href{#method-Arvados-workflows_delete}{\code{Arvados$workflows_delete()}}
-\item \href{#method-Arvados-workflows_list}{\code{Arvados$workflows_list()}}
-\item \href{#method-Arvados-user_agreements_get}{\code{Arvados$user_agreements_get()}}
-\item \href{#method-Arvados-user_agreements_create}{\code{Arvados$user_agreements_create()}}
-\item \href{#method-Arvados-user_agreements_update}{\code{Arvados$user_agreements_update()}}
-\item \href{#method-Arvados-user_agreements_delete}{\code{Arvados$user_agreements_delete()}}
-\item \href{#method-Arvados-user_agreements_signatures}{\code{Arvados$user_agreements_signatures()}}
-\item \href{#method-Arvados-user_agreements_sign}{\code{Arvados$user_agreements_sign()}}
-\item \href{#method-Arvados-user_agreements_list}{\code{Arvados$user_agreements_list()}}
-\item \href{#method-Arvados-user_agreements_new}{\code{Arvados$user_agreements_new()}}
-\item \href{#method-Arvados-configs_get}{\code{Arvados$configs_get()}}
-\item \href{#method-Arvados-getHostName}{\code{Arvados$getHostName()}}
-\item \href{#method-Arvados-getToken}{\code{Arvados$getToken()}}
-\item \href{#method-Arvados-setRESTService}{\code{Arvados$setRESTService()}}
-\item \href{#method-Arvados-getRESTService}{\code{Arvados$getRESTService()}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-new}{}}}
-\subsection{Method \code{new()}}{
-Initialize new enviroment.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{authToken}}{ARVADOS_API_TOKEN from 'Get API Token' on Arvados.}
-
-\item{\code{hostName}}{ARVADOS_API_HOST from 'Get API Token' on Arvados.}
-
-\item{\code{numRetries}}{Specify number of times to retry failed service requests.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Returns}{
-A new `Arvados` object.
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_exist}{}}}
-\subsection{Method \code{project_exist()}}{
-project_exist enables checking if the project with such a UUID exist.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_exist(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of a project or a file.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_exist(uuid = "projectUUID")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_get}{}}}
-\subsection{Method \code{project_get()}}{
-project_get returns the demanded project.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-project <- arv$project_get(uuid = 'projectUUID')
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_create}{}}}
-\subsection{Method \code{project_create()}}{
-project_create creates a new project of a given name and description.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_create(
- name,
- description,
- ownerUUID,
- properties = NULL,
- ensureUniqueName = "false"
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the project.}
-
-\item{\code{description}}{Description of the project.}
-
-\item{\code{ownerUUID}}{The UUID of the maternal project to created one.}
-
-\item{\code{properties}}{List of the properties of the project.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_properties_set}{}}}
-\subsection{Method \code{project_properties_set()}}{
-project_properties_set is a method defined in Arvados class that enables setting properties. Allows to set or overwrite the properties. In case there are set already it overwrites them.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_properties_set(listProperties, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{listProperties}}{List of new properties.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$project_properties_set(Properties, uuid)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_properties_append}{}}}
-\subsection{Method \code{project_properties_append()}}{
-project_properties_append is a method defined in Arvados class that enables appending properties. Allows to add new properties.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_properties_append(properties, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{properties}}{List of new properties.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-newProperties <- list() # should contain a list of new properties to be added
-arv$project_properties_append(properties = newProperties, uuid)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_properties_get}{}}}
-\subsection{Method \code{project_properties_get()}}{
-project_properties_get is a method defined in Arvados class that returns properties.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_properties_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of a project or a file.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_properties_get(projectUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_properties_delete}{}}}
-\subsection{Method \code{project_properties_delete()}}{
-project_properties_delete is a method defined in Arvados class that deletes list of properties.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_properties_delete(oneProp, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{oneProp}}{Property to be deleted.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$project_properties_delete(Properties, projectUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_update}{}}}
-\subsection{Method \code{project_update()}}{
-project_update enables updating project. New name, description and properties may be given.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_update(..., uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{...}}{Feature to be updated (name, description, properties).}
-
-\item{\code{uuid}}{The UUID of a project in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-newProperties <- list() # should contain a list of new properties to be added
-arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_list}{}}}
-\subsection{Method \code{project_list()}}{
-project_list enables listing project by its name, uuid, properties, permissions.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- includeTrash = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
-
-\item{\code{uuid}}{The UUID of a project in question.}
-
-\item{\code{recursive}}{Include contents from child groups recursively.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_delete}{}}}
-\subsection{Method \code{project_delete()}}{
-project_delete trashes project of a given uuid. It can be restored from trash or deleted permanently.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_delete(uuid = 'projectUUID')
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_clients_get}{}}}
-\subsection{Method \code{api_clients_get()}}{
-api_clients_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_clients_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the apiClient in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_clients_create}{}}}
-\subsection{Method \code{api_clients_create()}}{
-api_clients_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_clients_create(
- apiClient,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{apiClient}}{apiClient object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_clients_update}{}}}
-\subsection{Method \code{api_clients_update()}}{
-api_clients_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_clients_update(apiClient, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{apiClient}}{apiClient object.}
-
-\item{\code{uuid}}{The UUID of the apiClient in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_clients_delete}{}}}
-\subsection{Method \code{api_clients_delete()}}{
-api_clients_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_clients_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the apiClient in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_clients_list}{}}}
-\subsection{Method \code{api_clients_list()}}{
-api_clients_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_clients_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_get}{}}}
-\subsection{Method \code{api_client_authorizations_get()}}{
-api_client_authorizations_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_create}{}}}
-\subsection{Method \code{api_client_authorizations_create()}}{
-api_client_authorizations_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_create(
- apiClientAuthorization,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{apiClientAuthorization}}{apiClientAuthorization object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error on (ownerUUID, name) collision_}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_update}{}}}
-\subsection{Method \code{api_client_authorizations_update()}}{
-api_client_authorizations_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_update(apiClientAuthorization, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{apiClientAuthorization}}{apiClientAuthorization object.}
-
-\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_delete}{}}}
-\subsection{Method \code{api_client_authorizations_delete()}}{
-api_client_authorizations_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_create_system_auth}{}}}
-\subsection{Method \code{api_client_authorizations_create_system_auth()}}{
-api_client_authorizations_create_system_auth is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_create_system_auth(
- apiClientID = NULL,
- scopes = NULL
-)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_current}{}}}
-\subsection{Method \code{api_client_authorizations_current()}}{
-api_client_authorizations_current is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_current()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_list}{}}}
-\subsection{Method \code{api_client_authorizations_list()}}{
-api_client_authorizations_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_get}{}}}
-\subsection{Method \code{authorized_keys_get()}}{
-authorized_keys_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$authorized_keys_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the authorizedKey in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_create}{}}}
-\subsection{Method \code{authorized_keys_create()}}{
-authorized_keys_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$authorized_keys_create(
- authorizedKey,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{authorizedKey}}{authorizedKey object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_update}{}}}
-\subsection{Method \code{authorized_keys_update()}}{
-authorized_keys_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$authorized_keys_update(authorizedKey, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{authorizedKey}}{authorizedKey object.}
-
-\item{\code{uuid}}{The UUID of the authorizedKey in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_delete}{}}}
-\subsection{Method \code{authorized_keys_delete()}}{
-authorized_keys_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$authorized_keys_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the authorizedKey in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_list}{}}}
-\subsection{Method \code{authorized_keys_list()}}{
-authorized_keys_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$authorized_keys_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_get}{}}}
-\subsection{Method \code{collections_get()}}{
-collections_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- arv$collections_get(uuid = collectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_create}{}}}
-\subsection{Method \code{collections_create()}}{
-collections_create is a method defined in Arvados class that enables collections creation.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_create(
- name,
- description,
- ownerUUID = NULL,
- properties = NULL,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the collection.}
-
-\item{\code{description}}{Description of the collection.}
-
-\item{\code{ownerUUID}}{UUID of the maternal project to created one.}
-
-\item{\code{properties}}{Properties of the collection.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-Properties <- list() # should contain a list of new properties to be added
-arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_update}{}}}
-\subsection{Method \code{collections_update()}}{
-collections_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_update(
- name,
- description,
- ownerUUID = NULL,
- properties = NULL,
- uuid
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{New name of the collection.}
-
-\item{\code{description}}{New description of the collection.}
-
-\item{\code{ownerUUID}}{UUID of the maternal project to created one.}
-
-\item{\code{properties}}{New list of properties of the collection.}
-
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_delete}{}}}
-\subsection{Method \code{collections_delete()}}{
-collections_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$collection_delete(collectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_provenance}{}}}
-\subsection{Method \code{collections_provenance()}}{
-collections_provenance is a method defined in Arvados class, it returns the collection by uuid.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_provenance(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- arv$collections_provenance(collectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_used_by}{}}}
-\subsection{Method \code{collections_used_by()}}{
-collections_used_by is a method defined in Arvados class, it returns collection by portable_data_hash.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_used_by(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_trash}{}}}
-\subsection{Method \code{collections_trash()}}{
-collections_trash is a method defined in Arvados class, it moves collection to trash.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_trash(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$collections_trash(collectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_untrash}{}}}
-\subsection{Method \code{collections_untrash()}}{
-collections_untrash is a method defined in Arvados class, it moves collection from trash to project.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_untrash(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Collection in question.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$collections_untrash(collectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-collections_list}{}}}
-\subsection{Method \code{collections_list()}}{
-collections_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$collections_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL,
- includeTrash = NULL,
- includeOldVersions = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-
-\item{\code{includeTrash}}{Include collections whose is_trashed attribute is true.}
-
-\item{\code{includeOldVersions}}{Include past collection versions.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collectionList <- arv$collections_list(list(list("name", "=", "Example")))
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_get}{}}}
-\subsection{Method \code{containers_get()}}{
-containers_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_create}{}}}
-\subsection{Method \code{containers_create()}}{
-containers_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_create(
- container,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{container}}{Container object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_update}{}}}
-\subsection{Method \code{containers_update()}}{
-containers_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_update(container, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{container}}{Container object.}
-
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_delete}{}}}
-\subsection{Method \code{containers_delete()}}{
-containers_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_auth}{}}}
-\subsection{Method \code{containers_auth()}}{
-containers_auth is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_auth(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_lock}{}}}
-\subsection{Method \code{containers_lock()}}{
-containers_lock is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_lock(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_unlock}{}}}
-\subsection{Method \code{containers_unlock()}}{
-containers_unlock is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_unlock(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_secret_mounts}{}}}
-\subsection{Method \code{containers_secret_mounts()}}{
-containers_secret_mounts is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_secret_mounts(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Container in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_current}{}}}
-\subsection{Method \code{containers_current()}}{
-containers_current is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_current()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-containers_list}{}}}
-\subsection{Method \code{containers_list()}}{
-containers_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$containers_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-container_requests_get}{}}}
-\subsection{Method \code{container_requests_get()}}{
-container_requests_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$container_requests_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the containerRequest in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-container_requests_create}{}}}
-\subsection{Method \code{container_requests_create()}}{
-container_requests_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$container_requests_create(
- containerRequest,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{containerRequest}}{containerRequest object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-container_requests_update}{}}}
-\subsection{Method \code{container_requests_update()}}{
-container_requests_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$container_requests_update(containerRequest, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{containerRequest}}{containerRequest object.}
-
-\item{\code{uuid}}{The UUID of the containerRequest in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-container_requests_delete}{}}}
-\subsection{Method \code{container_requests_delete()}}{
-container_requests_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$container_requests_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the containerRequest in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-container_requests_list}{}}}
-\subsection{Method \code{container_requests_list()}}{
-container_requests_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$container_requests_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL,
- includeTrash = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{bypass federation behavior, list items from local instance database only}
-
-\item{\code{includeTrash}}{Include container requests whose owner project is trashed.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_get}{}}}
-\subsection{Method \code{groups_get()}}{
-groups_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_create}{}}}
-\subsection{Method \code{groups_create()}}{
-groups_create is a method defined in Arvados class that supports project creation.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_create(
- group,
- ensureUniqueName = "false",
- clusterID = NULL,
- async = "false"
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{group}}{Group object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-
-\item{\code{async}}{Defer permissions update.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_update}{}}}
-\subsection{Method \code{groups_update()}}{
-groups_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_update(group, uuid, async = "false")}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{group}}{Group object.}
-
-\item{\code{uuid}}{The UUID of the Group in question.}
-
-\item{\code{async}}{Defer permissions update.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_delete}{}}}
-\subsection{Method \code{groups_delete()}}{
-groups_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_contents}{}}}
-\subsection{Method \code{groups_contents()}}{
-groups_contents is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_contents(
- filters = NULL,
- where = NULL,
- order = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL,
- includeTrash = NULL,
- uuid = NULL,
- recursive = NULL,
- include = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-
-\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
-
-\item{\code{recursive}}{Include contents from child groups recursively.}
-
-\item{\code{include}}{Include objects referred to by listed field in "included" (only ownerUUID).}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_shared}{}}}
-\subsection{Method \code{groups_shared()}}{
-groups_shared is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_shared(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL,
- includeTrash = NULL,
- include = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-
-\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_trash}{}}}
-\subsection{Method \code{groups_trash()}}{
-groups_trash is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_trash(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_untrash}{}}}
-\subsection{Method \code{groups_untrash()}}{
-groups_untrash is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_untrash(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Group in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-groups_list}{}}}
-\subsection{Method \code{groups_list()}}{
-groups_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$groups_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL,
- includeTrash = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-
-\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_get}{}}}
-\subsection{Method \code{keep_services_get()}}{
-keep_services_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the keepService in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_create}{}}}
-\subsection{Method \code{keep_services_create()}}{
-keep_services_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_create(
- keepService,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{keepService}}{keepService object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_update}{}}}
-\subsection{Method \code{keep_services_update()}}{
-keep_services_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_update(keepService, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{keepService}}{keepService object.}
-
-\item{\code{uuid}}{The UUID of the keepService in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_delete}{}}}
-\subsection{Method \code{keep_services_delete()}}{
-keep_services_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the keepService in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_accessible}{}}}
-\subsection{Method \code{keep_services_accessible()}}{
-keep_services_accessible is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_accessible()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-keep_services_list}{}}}
-\subsection{Method \code{keep_services_list()}}{
-keep_services_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$keep_services_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_permission_give}{}}}
-\subsection{Method \code{project_permission_give()}}{
-project_permission_give is a method defined in Arvados class that enables sharing files with another users.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_permission_give(type, uuid, user)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-
-\item{\code{user}}{The UUID of the person that gets the permission.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_permission_refuse}{}}}
-\subsection{Method \code{project_permission_refuse()}}{
-project_permission_refuse is a method defined in Arvados class that unables sharing files with another users.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_permission_refuse(type, uuid, user)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-
-\item{\code{user}}{The UUID of a person that permissions are taken from.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_permission_update}{}}}
-\subsection{Method \code{project_permission_update()}}{
-project_permission_update is a method defined in Arvados class that enables updating permissions.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_permission_update(typeOld, typeNew, uuid, user)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{typeOld}}{Old option.}
-
-\item{\code{typeNew}}{New option like can_read or can_write or can_manage.}
-
-\item{\code{uuid}}{The UUID of a project or a file.}
-
-\item{\code{user}}{The UUID of the person that the permission is being updated.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-project_permission_check}{}}}
-\subsection{Method \code{project_permission_check()}}{
-project_permission_check is a method defined in Arvados class that enables checking file permissions.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$project_permission_check(uuid, user, type = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of a project or a file.}
-
-\item{\code{user}}{The UUID of the person that the permission is being updated.}
-
-\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_get}{}}}
-\subsection{Method \code{links_get()}}{
-links_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Link in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_create}{}}}
-\subsection{Method \code{links_create()}}{
-links_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_create(link, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{link}}{Link object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_update}{}}}
-\subsection{Method \code{links_update()}}{
-links_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_update(link, uuid, async = "false")}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{link}}{Link object.}
-
-\item{\code{uuid}}{The UUID of the Link in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_delete}{}}}
-\subsection{Method \code{links_delete()}}{
-links_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Link in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_list}{}}}
-\subsection{Method \code{links_list()}}{
-links_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-links_get_permissions}{}}}
-\subsection{Method \code{links_get_permissions()}}{
-links_get_permissions is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$links_get_permissions(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Log in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-logs_get}{}}}
-\subsection{Method \code{logs_get()}}{
-logs_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$logs_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Log in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-logs_create}{}}}
-\subsection{Method \code{logs_create()}}{
-logs_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$logs_create(log, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{log}}{Log object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-logs_update}{}}}
-\subsection{Method \code{logs_update()}}{
-logs_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$logs_update(log, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{log}}{Log object.}
-
-\item{\code{uuid}}{The UUID of the Log in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-logs_delete}{}}}
-\subsection{Method \code{logs_delete()}}{
-logs_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$logs_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Log in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-logs_list}{}}}
-\subsection{Method \code{logs_list()}}{
-logs_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$logs_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_get}{}}}
-\subsection{Method \code{users_get()}}{
-users_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the User in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_create}{}}}
-\subsection{Method \code{users_create()}}{
-users_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_create(user, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{user}}{User object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_update}{}}}
-\subsection{Method \code{users_update()}}{
-users_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_update(user, uuid, bypassFederation = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{user}}{User object.}
-
-\item{\code{uuid}}{The UUID of the User in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_delete}{}}}
-\subsection{Method \code{users_delete()}}{
-users_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the User in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_current}{}}}
-\subsection{Method \code{users_current()}}{
-users_current is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_current()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_system}{}}}
-\subsection{Method \code{users_system()}}{
-users_system is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_system()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_activate}{}}}
-\subsection{Method \code{users_activate()}}{
-users_activate is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_activate(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the User in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_setup}{}}}
-\subsection{Method \code{users_setup()}}{
-users_setup is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_setup(
- uuid = NULL,
- user = NULL,
- repo_name = NULL,
- vm_uuid = NULL,
- send_notification_email = "false"
-)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_unsetup}{}}}
-\subsection{Method \code{users_unsetup()}}{
-users_unsetup is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_unsetup(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the User in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_merge}{}}}
-\subsection{Method \code{users_merge()}}{
-users_merge is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_merge(
- newOwnerUUID,
- newUserToken = NULL,
- redirectToNewUser = NULL,
- oldUserUUID = NULL,
- newUserUUID = NULL
-)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-users_list}{}}}
-\subsection{Method \code{users_list()}}{
-users_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$users_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_get}{}}}
-\subsection{Method \code{repositories_get()}}{
-repositories_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Repository in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_create}{}}}
-\subsection{Method \code{repositories_create()}}{
-repositories_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_create(
- repository,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{repository}}{Repository object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_update}{}}}
-\subsection{Method \code{repositories_update()}}{
-repositories_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_update(repository, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{repository}}{Repository object.}
-
-\item{\code{uuid}}{The UUID of the Repository in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_delete}{}}}
-\subsection{Method \code{repositories_delete()}}{
-repositories_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Repository in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_get_all_permissions}{}}}
-\subsection{Method \code{repositories_get_all_permissions()}}{
-repositories_get_all_permissions is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_get_all_permissions()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-repositories_list}{}}}
-\subsection{Method \code{repositories_list()}}{
-repositories_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$repositories_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_get}{}}}
-\subsection{Method \code{virtual_machines_get()}}{
-virtual_machines_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the virtualMachine in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_create}{}}}
-\subsection{Method \code{virtual_machines_create()}}{
-virtual_machines_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_create(
- virtualMachine,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{virtualMachine}}{virtualMachine object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_update}{}}}
-\subsection{Method \code{virtual_machines_update()}}{
-virtual_machines_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_update(virtualMachine, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{virtualMachine}}{virtualMachine object.}
-
-\item{\code{uuid}}{The UUID of the virtualMachine in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_delete}{}}}
-\subsection{Method \code{virtual_machines_delete()}}{
-virtual_machines_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the virtualMachine in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_logins}{}}}
-\subsection{Method \code{virtual_machines_logins()}}{
-virtual_machines_logins is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_logins(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the virtualMachine in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_get_all_logins}{}}}
-\subsection{Method \code{virtual_machines_get_all_logins()}}{
-virtual_machines_get_all_logins is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_get_all_logins()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_list}{}}}
-\subsection{Method \code{virtual_machines_list()}}{
-virtual_machines_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$virtual_machines_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{bypass federation behavior, list items from local instance database only}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-workflows_get}{}}}
-\subsection{Method \code{workflows_get()}}{
-workflows_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$workflows_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Workflow in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-workflows_create}{}}}
-\subsection{Method \code{workflows_create()}}{
-workflows_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$workflows_create(
- workflow,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{workflow}}{Workflow object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-workflows_update}{}}}
-\subsection{Method \code{workflows_update()}}{
-workflows_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$workflows_update(workflow, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{workflow}}{Workflow object.}
-
-\item{\code{uuid}}{The UUID of the Workflow in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-workflows_delete}{}}}
-\subsection{Method \code{workflows_delete()}}{
-workflows_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$workflows_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the Workflow in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-workflows_list}{}}}
-\subsection{Method \code{workflows_list()}}{
-workflows_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$workflows_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_get}{}}}
-\subsection{Method \code{user_agreements_get()}}{
-user_agreements_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_get(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the userAgreement in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_create}{}}}
-\subsection{Method \code{user_agreements_create()}}{
-user_agreements_create is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_create(
- userAgreement,
- ensureUniqueName = "false",
- clusterID = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{userAgreement}}{userAgreement object.}
-
-\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
-
-\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_update}{}}}
-\subsection{Method \code{user_agreements_update()}}{
-user_agreements_update is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_update(userAgreement, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{userAgreement}}{userAgreement object.}
-
-\item{\code{uuid}}{The UUID of the userAgreement in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_delete}{}}}
-\subsection{Method \code{user_agreements_delete()}}{
-user_agreements_delete is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_delete(uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{The UUID of the userAgreement in question.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_signatures}{}}}
-\subsection{Method \code{user_agreements_signatures()}}{
-user_agreements_signatures is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_signatures()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_sign}{}}}
-\subsection{Method \code{user_agreements_sign()}}{
-user_agreements_sign is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_sign()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_list}{}}}
-\subsection{Method \code{user_agreements_list()}}{
-user_agreements_list is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_list(
- filters = NULL,
- where = NULL,
- order = NULL,
- select = NULL,
- distinct = NULL,
- limit = "100",
- offset = "0",
- count = "exact",
- clusterID = NULL,
- bypassFederation = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
-
-\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_new}{}}}
-\subsection{Method \code{user_agreements_new()}}{
-user_agreements_new is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$user_agreements_new()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-configs_get}{}}}
-\subsection{Method \code{configs_get()}}{
-configs_get is a method defined in Arvados class.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$configs_get()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-getHostName}{}}}
-\subsection{Method \code{getHostName()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$getHostName()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-getToken}{}}}
-\subsection{Method \code{getToken()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$getToken()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-setRESTService}{}}}
-\subsection{Method \code{setRESTService()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$setRESTService(newREST)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Arvados-getRESTService}{}}}
-\subsection{Method \code{getRESTService()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Arvados$getRESTService()}\if{html}{\out{
}}
-}
-
-}
-}
diff --git a/sdk/R/man/ArvadosFile.Rd b/sdk/R/man/ArvadosFile.Rd
deleted file mode 100644
index 81c25af5f1..0000000000
--- a/sdk/R/man/ArvadosFile.Rd
+++ /dev/null
@@ -1,475 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/ArvadosFile.R
-\name{ArvadosFile}
-\alias{ArvadosFile}
-\title{R6 Class Representing a ArvadosFile}
-\description{
-ArvadosFile class represents a file inside Arvados collection.
-}
-\examples{
-
-## ------------------------------------------------
-## Method `ArvadosFile$new`
-## ------------------------------------------------
-
-\dontrun{
-myFile <- ArvadosFile$new("myFile")
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$getName`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile$getName()
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$getFileListing`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile$getFileListing()
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$getSizeInBytes`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile$getSizeInBytes()
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$read`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-fileContent <- arvadosFile$read("text")
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$connection`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-arvConnection <- arvadosFile$connection("w")
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$flush`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-myFile$write("This is new file content")
-arvadosFile$flush()
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$write`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-myFile$write("This is new file content")
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$move`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile$move(newPath)
-}
-
-## ------------------------------------------------
-## Method `ArvadosFile$copy`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile$copy("NewName.format")
-}
-}
-\section{Methods}{
-\subsection{Public methods}{
-\itemize{
-\item \href{#method-ArvadosFile-new}{\code{ArvadosFile$new()}}
-\item \href{#method-ArvadosFile-getName}{\code{ArvadosFile$getName()}}
-\item \href{#method-ArvadosFile-getFileListing}{\code{ArvadosFile$getFileListing()}}
-\item \href{#method-ArvadosFile-getSizeInBytes}{\code{ArvadosFile$getSizeInBytes()}}
-\item \href{#method-ArvadosFile-get}{\code{ArvadosFile$get()}}
-\item \href{#method-ArvadosFile-getFirst}{\code{ArvadosFile$getFirst()}}
-\item \href{#method-ArvadosFile-getCollection}{\code{ArvadosFile$getCollection()}}
-\item \href{#method-ArvadosFile-setCollection}{\code{ArvadosFile$setCollection()}}
-\item \href{#method-ArvadosFile-getRelativePath}{\code{ArvadosFile$getRelativePath()}}
-\item \href{#method-ArvadosFile-getParent}{\code{ArvadosFile$getParent()}}
-\item \href{#method-ArvadosFile-setParent}{\code{ArvadosFile$setParent()}}
-\item \href{#method-ArvadosFile-read}{\code{ArvadosFile$read()}}
-\item \href{#method-ArvadosFile-connection}{\code{ArvadosFile$connection()}}
-\item \href{#method-ArvadosFile-flush}{\code{ArvadosFile$flush()}}
-\item \href{#method-ArvadosFile-write}{\code{ArvadosFile$write()}}
-\item \href{#method-ArvadosFile-move}{\code{ArvadosFile$move()}}
-\item \href{#method-ArvadosFile-copy}{\code{ArvadosFile$copy()}}
-\item \href{#method-ArvadosFile-duplicate}{\code{ArvadosFile$duplicate()}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-new}{}}}
-\subsection{Method \code{new()}}{
-Initialize new enviroment.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$new(name)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the new enviroment.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Returns}{
-A new `ArvadosFile` object.
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-myFile <- ArvadosFile$new("myFile")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getName}{}}}
-\subsection{Method \code{getName()}}{
-Returns name of the file.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getName()}\if{html}{\out{
}}
-}
-
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile$getName()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getFileListing}{}}}
-\subsection{Method \code{getFileListing()}}{
-Returns collections file content as character vector.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getFileListing(fullpath = TRUE)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{fullPath}}{Checking if TRUE.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile$getFileListing()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getSizeInBytes}{}}}
-\subsection{Method \code{getSizeInBytes()}}{
-Returns collections content size in bytes.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getSizeInBytes()}\if{html}{\out{
}}
-}
-
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile$getSizeInBytes()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-get}{}}}
-\subsection{Method \code{get()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$get(fileLikeObjectName)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getFirst}{}}}
-\subsection{Method \code{getFirst()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getFirst()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getCollection}{}}}
-\subsection{Method \code{getCollection()}}{
-Returns collection UUID.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getCollection()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-setCollection}{}}}
-\subsection{Method \code{setCollection()}}{
-Sets new collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$setCollection(collection, setRecursively = TRUE)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getRelativePath}{}}}
-\subsection{Method \code{getRelativePath()}}{
-Returns file path relative to the root.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getRelativePath()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-getParent}{}}}
-\subsection{Method \code{getParent()}}{
-Returns project UUID.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$getParent()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-setParent}{}}}
-\subsection{Method \code{setParent()}}{
-Sets project collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$setParent(newParent)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-read}{}}}
-\subsection{Method \code{read()}}{
-Read file content.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$read(contentType = "raw", offset = 0, length = 0)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{contentType}}{Type of content. Possible is "text", "raw".}
-
-\item{\code{offset}}{Describes the location of a piece of data compared to another location}
-
-\item{\code{length}}{Length of content}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-fileContent <- arvadosFile$read("text")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-connection}{}}}
-\subsection{Method \code{connection()}}{
-Get connection opened in "read" or "write" mode.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$connection(rw)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{rw}}{Type of connection.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-arvConnection <- arvadosFile$connection("w")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-flush}{}}}
-\subsection{Method \code{flush()}}{
-Write connections content to a file or override current content of the file.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$flush()}\if{html}{\out{
}}
-}
-
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-myFile$write("This is new file content")
-arvadosFile$flush()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-write}{}}}
-\subsection{Method \code{write()}}{
-Write to file or override current content of the file.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$write(content, contentType = "text/html")}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{content}}{File to write.}
-
-\item{\code{contentType}}{Type of content. Possible is "text", "raw".}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-arvadosFile <- collection$get(fileName)
-myFile$write("This is new file content")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-move}{}}}
-\subsection{Method \code{move()}}{
-Moves file to a new location inside collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$move(destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{destination}}{Path to new folder.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile$move(newPath)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-copy}{}}}
-\subsection{Method \code{copy()}}{
-Copies file to a new location inside collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$copy(destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{destination}}{Path to new folder.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile$copy("NewName.format")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-ArvadosFile-duplicate}{}}}
-\subsection{Method \code{duplicate()}}{
-Duplicate file and gives it a new name.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{ArvadosFile$duplicate(newName = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{newName}}{New name for duplicated file.}
-}
-\if{html}{\out{
}}
-}
-}
-}
diff --git a/sdk/R/man/ArvadosR.Rd b/sdk/R/man/ArvadosR.Rd
deleted file mode 100644
index 51edb8b138..0000000000
--- a/sdk/R/man/ArvadosR.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/ArvadosR.R
-\name{ArvadosR}
-\alias{ArvadosR}
-\title{ArvadosR}
-\description{
-Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources.
-}
-\seealso{
-\itemize{
-\item https://arvados.org
-\item https://doc.arvados.org/sdk/R/index.html
-\item https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R}
-}
-\author{
-\itemize{
-\item Lucas Di Pentima
-\item Ward Vandewege
-\item Fuad Muhic
-\item Peter Amstutz
-\item Aneta Stanczyk
-\item Piotr Nowosielski}
-}
diff --git a/sdk/R/man/Collection.Rd b/sdk/R/man/Collection.Rd
deleted file mode 100644
index 0de9a842e6..0000000000
--- a/sdk/R/man/Collection.Rd
+++ /dev/null
@@ -1,480 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Collection.R
-\name{Collection}
-\alias{Collection}
-\title{R6 Class Representing Arvados Collection}
-\description{
-Collection class provides interface for working with Arvados collections,
-for exaplme actions like creating, updating, moving or removing are possible.
-}
-\examples{
-
-## ------------------------------------------------
-## Method `Collection$new`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, CollectionUUID)
-}
-
-## ------------------------------------------------
-## Method `Collection$readArvFile`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
-readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
-readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
-readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
-readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers
-readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text
-}
-
-## ------------------------------------------------
-## Method `Collection$writeFile`
-## ------------------------------------------------
-
-\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv
-writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv
-writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta
-writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table
-writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text
-writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary
-writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx
-}
-
-## ------------------------------------------------
-## Method `Collection$create`
-## ------------------------------------------------
-
-\dontrun{
-collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153"))
-}
-
-## ------------------------------------------------
-## Method `Collection$remove`
-## ------------------------------------------------
-
-\dontrun{
-collection$remove(fileName.format)
-}
-
-## ------------------------------------------------
-## Method `Collection$move`
-## ------------------------------------------------
-
-\dontrun{
-collection$move("fileName.format", path)
-}
-
-## ------------------------------------------------
-## Method `Collection$copy`
-## ------------------------------------------------
-
-\dontrun{
-copied <- collection$copy("oldName.format", "newName.format")
-}
-
-## ------------------------------------------------
-## Method `Collection$refresh`
-## ------------------------------------------------
-
-\dontrun{
-collection$refresh()
-}
-
-## ------------------------------------------------
-## Method `Collection$getFileListing`
-## ------------------------------------------------
-
-\dontrun{
-list <- collection$getFileListing()
-}
-
-## ------------------------------------------------
-## Method `Collection$get`
-## ------------------------------------------------
-
-\dontrun{
-arvadosFile <- collection$get(fileName)
-}
-}
-\seealso{
-https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R
-}
-\section{Public fields}{
-\if{html}{\out{}}
-\describe{
-\item{\code{uuid}}{Autentic for Collection UUID.}
-}
-\if{html}{\out{
}}
-}
-\section{Methods}{
-\subsection{Public methods}{
-\itemize{
-\item \href{#method-Collection-new}{\code{Collection$new()}}
-\item \href{#method-Collection-add}{\code{Collection$add()}}
-\item \href{#method-Collection-readArvFile}{\code{Collection$readArvFile()}}
-\item \href{#method-Collection-writeFile}{\code{Collection$writeFile()}}
-\item \href{#method-Collection-create}{\code{Collection$create()}}
-\item \href{#method-Collection-remove}{\code{Collection$remove()}}
-\item \href{#method-Collection-move}{\code{Collection$move()}}
-\item \href{#method-Collection-copy}{\code{Collection$copy()}}
-\item \href{#method-Collection-refresh}{\code{Collection$refresh()}}
-\item \href{#method-Collection-getFileListing}{\code{Collection$getFileListing()}}
-\item \href{#method-Collection-get}{\code{Collection$get()}}
-\item \href{#method-Collection-getRESTService}{\code{Collection$getRESTService()}}
-\item \href{#method-Collection-setRESTService}{\code{Collection$setRESTService()}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-new}{}}}
-\subsection{Method \code{new()}}{
-Initialize new enviroment.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$new(api, uuid)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{api}}{Arvados enviroment.}
-
-\item{\code{uuid}}{The UUID Autentic for Collection UUID.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Returns}{
-A new `Collection` object.
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, CollectionUUID)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-add}{}}}
-\subsection{Method \code{add()}}{
-Adds ArvadosFile or Subcollection specified by content to the collection. Used only with ArvadosFile or Subcollection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$add(content, relativePath = "")}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{content}}{Content to be added.}
-
-\item{\code{relativePath}}{Path to add content.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-readArvFile}{}}}
-\subsection{Method \code{readArvFile()}}{
-Read file content.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$readArvFile(
- file,
- con,
- sep = ",",
- istable = NULL,
- fileclass = "SeqFastadna",
- Ncol = NULL,
- Nrow = NULL,
- wantedFunction = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{file}}{Name of the file.}
-
-\item{\code{sep}}{Separator used in reading tsv, csv file format.}
-
-\item{\code{istable}}{Used in reading txt file to check if the file is table or not.}
-
-\item{\code{fileclass}}{Used in reading fasta file to set file class.}
-
-\item{\code{Ncol}}{Used in reading binary file to set numbers of columns in data.frame.}
-
-\item{\code{Nrow}}{Used in reading binary file to set numbers of rows in data.frame size.}
-
-\item{\code{col}}{Collection from which the file is read.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
-readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
-readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
-readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
-readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers
-readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-writeFile}{}}}
-\subsection{Method \code{writeFile()}}{
-Write file content
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$writeFile(
- name,
- file,
- collectionUUID,
- fileFormat,
- istable = NULL,
- seqName = NULL
-)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the file.}
-
-\item{\code{file}}{File to be saved.}
-
-\item{\code{istable}}{Used in writing txt file to check if the file is table or not.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- Collection$new(arv, collectionUUID)
-writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv
-writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv
-writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta
-writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table
-writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text
-writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary
-writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-create}{}}}
-\subsection{Method \code{create()}}{
-Creates one or more ArvadosFiles and adds them to the collection at specified path.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$create(files)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{files}}{Content to be created.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153"))
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-remove}{}}}
-\subsection{Method \code{remove()}}{
-Remove one or more files from the collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$remove(paths)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{paths}}{Content to be removed.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection$remove(fileName.format)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-move}{}}}
-\subsection{Method \code{move()}}{
-Moves ArvadosFile or Subcollection to another location in the collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$move(content, destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{content}}{Content to be moved.}
-
-\item{\code{destination}}{Path to move content.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection$move("fileName.format", path)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-copy}{}}}
-\subsection{Method \code{copy()}}{
-Copies ArvadosFile or Subcollection to another location in the collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$copy(content, destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{content}}{Content to be moved.}
-
-\item{\code{destination}}{Path to move content.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-copied <- collection$copy("oldName.format", "newName.format")
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-refresh}{}}}
-\subsection{Method \code{refresh()}}{
-Refreshes the environment.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$refresh()}\if{html}{\out{
}}
-}
-
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-collection$refresh()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-getFileListing}{}}}
-\subsection{Method \code{getFileListing()}}{
-Returns collections file content as character vector.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$getFileListing()}\if{html}{\out{
}}
-}
-
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-list <- collection$getFileListing()
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-get}{}}}
-\subsection{Method \code{get()}}{
-If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$get(relativePath)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{relativePath}}{Path from content is taken.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Examples}{
-\if{html}{\out{}}
-\preformatted{\dontrun{
-arvadosFile <- collection$get(fileName)
-}
-}
-\if{html}{\out{
}}
-
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-getRESTService}{}}}
-\subsection{Method \code{getRESTService()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$getRESTService()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Collection-setRESTService}{}}}
-\subsection{Method \code{setRESTService()}}{
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Collection$setRESTService(newRESTService)}\if{html}{\out{
}}
-}
-
-}
-}
diff --git a/sdk/R/man/Subcollection.Rd b/sdk/R/man/Subcollection.Rd
deleted file mode 100644
index 9faf0c279e..0000000000
--- a/sdk/R/man/Subcollection.Rd
+++ /dev/null
@@ -1,250 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Subcollection.R
-\name{Subcollection}
-\alias{Subcollection}
-\title{R6 Class Representing a Subcollection}
-\description{
-Subcollection class represents a folder inside Arvados collection.
-It is essentially a composite of arvadosFiles and other subcollections.
-}
-\section{Methods}{
-\subsection{Public methods}{
-\itemize{
-\item \href{#method-Subcollection-new}{\code{Subcollection$new()}}
-\item \href{#method-Subcollection-getName}{\code{Subcollection$getName()}}
-\item \href{#method-Subcollection-getRelativePath}{\code{Subcollection$getRelativePath()}}
-\item \href{#method-Subcollection-add}{\code{Subcollection$add()}}
-\item \href{#method-Subcollection-remove}{\code{Subcollection$remove()}}
-\item \href{#method-Subcollection-getFileListing}{\code{Subcollection$getFileListing()}}
-\item \href{#method-Subcollection-getSizeInBytes}{\code{Subcollection$getSizeInBytes()}}
-\item \href{#method-Subcollection-move}{\code{Subcollection$move()}}
-\item \href{#method-Subcollection-copy}{\code{Subcollection$copy()}}
-\item \href{#method-Subcollection-duplicate}{\code{Subcollection$duplicate()}}
-\item \href{#method-Subcollection-get}{\code{Subcollection$get()}}
-\item \href{#method-Subcollection-getFirst}{\code{Subcollection$getFirst()}}
-\item \href{#method-Subcollection-setCollection}{\code{Subcollection$setCollection()}}
-\item \href{#method-Subcollection-getCollection}{\code{Subcollection$getCollection()}}
-\item \href{#method-Subcollection-getParent}{\code{Subcollection$getParent()}}
-\item \href{#method-Subcollection-setParent}{\code{Subcollection$setParent()}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-new}{}}}
-\subsection{Method \code{new()}}{
-Initialize new enviroment.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$new(name)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the new enviroment.}
-}
-\if{html}{\out{
}}
-}
-\subsection{Returns}{
-A new `Subcollection` object.
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getName}{}}}
-\subsection{Method \code{getName()}}{
-Returns name of the file.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getName()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getRelativePath}{}}}
-\subsection{Method \code{getRelativePath()}}{
-Returns Subcollection's path relative to the root.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getRelativePath()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-add}{}}}
-\subsection{Method \code{add()}}{
-Adds ArvadosFile or Subcollection specified by content to the Subcollection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$add(content)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{content}}{Content to be added.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-remove}{}}}
-\subsection{Method \code{remove()}}{
-Removes ArvadosFile or Subcollection specified by name from the Subcollection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$remove(name)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the file to be removed.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getFileListing}{}}}
-\subsection{Method \code{getFileListing()}}{
-Returns Subcollections file content as character vector.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getFileListing(fullPath = TRUE)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{fullPath}}{Checking if the path to file exists.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getSizeInBytes}{}}}
-\subsection{Method \code{getSizeInBytes()}}{
-Returns subcollections content size in bytes.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getSizeInBytes()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-move}{}}}
-\subsection{Method \code{move()}}{
-Moves Subcollection to a new location inside collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$move(destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{destination}}{Path to move the file.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-copy}{}}}
-\subsection{Method \code{copy()}}{
-Copies Subcollection to a new location inside collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$copy(destination)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{destination}}{Path to copy the file.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-duplicate}{}}}
-\subsection{Method \code{duplicate()}}{
-Duplicate Subcollection and gives it a new name.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$duplicate(newName = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{newName}}{New name for duplicated file.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-get}{}}}
-\subsection{Method \code{get()}}{
-If name is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$get(name)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{}}
-\describe{
-\item{\code{name}}{Name of the file.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getFirst}{}}}
-\subsection{Method \code{getFirst()}}{
-Returns files in Subcollection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getFirst()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-setCollection}{}}}
-\subsection{Method \code{setCollection()}}{
-Sets Collection by its UUID.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$setCollection(collection, setRecursively = TRUE)}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getCollection}{}}}
-\subsection{Method \code{getCollection()}}{
-Returns Collection of Subcollection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getCollection()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-getParent}{}}}
-\subsection{Method \code{getParent()}}{
-Returns Collection UUID.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$getParent()}\if{html}{\out{
}}
-}
-
-}
-\if{html}{\out{
}}
-\if{html}{\out{}}
-\if{latex}{\out{\hypertarget{method-Subcollection-setParent}{}}}
-\subsection{Method \code{setParent()}}{
-Sets new Collection.
-\subsection{Usage}{
-\if{html}{\out{}}\preformatted{Subcollection$setParent(newParent)}\if{html}{\out{
}}
-}
-
-}
-}
diff --git a/sdk/R/man/figures/dax.png b/sdk/R/man/figures/dax.png
deleted file mode 100644
index c511f0ec51..0000000000
Binary files a/sdk/R/man/figures/dax.png and /dev/null differ
diff --git a/sdk/R/man/listAll.Rd b/sdk/R/man/listAll.Rd
deleted file mode 100644
index b9a5c5d174..0000000000
--- a/sdk/R/man/listAll.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/util.R
-\name{listAll}
-\alias{listAll}
-\title{listAll}
-\usage{
-listAll(fn, ...)
-}
-\arguments{
-\item{fn}{Arvados method used to retrieve items from REST service.}
-
-\item{...}{Optional arguments which will be pased to fn .}
-}
-\description{
-List all resources even if the number of items is greater than maximum API limit.
-}
-\examples{
-\dontrun{
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test\%"))
-}
-}
diff --git a/sdk/R/man/print.ArvadosFile.Rd b/sdk/R/man/print.ArvadosFile.Rd
deleted file mode 100644
index 43f734121e..0000000000
--- a/sdk/R/man/print.ArvadosFile.Rd
+++ /dev/null
@@ -1,16 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/ArvadosFile.R
-\name{print.ArvadosFile}
-\alias{print.ArvadosFile}
-\title{print.ArvadosFile}
-\usage{
-\method{print}{ArvadosFile}(x, ...)
-}
-\arguments{
-\item{x}{Instance of ArvadosFile class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for ArvadosFile class
-}
diff --git a/sdk/R/man/print.Collection.Rd b/sdk/R/man/print.Collection.Rd
deleted file mode 100644
index 3de4bd541a..0000000000
--- a/sdk/R/man/print.Collection.Rd
+++ /dev/null
@@ -1,16 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Collection.R
-\name{print.Collection}
-\alias{print.Collection}
-\title{print.Collection}
-\usage{
-\method{print}{Collection}(x, ...)
-}
-\arguments{
-\item{x}{Instance of Collection class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for Collection class
-}
diff --git a/sdk/R/man/print.Subcollection.Rd b/sdk/R/man/print.Subcollection.Rd
deleted file mode 100644
index 3bc62c0908..0000000000
--- a/sdk/R/man/print.Subcollection.Rd
+++ /dev/null
@@ -1,16 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Subcollection.R
-\name{print.Subcollection}
-\alias{print.Subcollection}
-\title{print.Subcollection}
-\usage{
-\method{print}{Subcollection}(x, ...)
-}
-\arguments{
-\item{x}{Instance of Subcollection class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for Subcollection class
-}
diff --git a/sdk/cli/arvados-cli.gemspec b/sdk/cli/arvados-cli.gemspec
index 67f93c19c3..dd5166091a 100644
--- a/sdk/cli/arvados-cli.gemspec
+++ b/sdk/cli/arvados-cli.gemspec
@@ -2,28 +2,36 @@
#
# SPDX-License-Identifier: Apache-2.0
-if not File.exist?('/usr/bin/git') then
- STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
- exit
-end
-
-git_dir = ENV["GIT_DIR"]
-git_work = ENV["GIT_WORK_TREE"]
begin
- ENV["GIT_DIR"] = File.expand_path "#{__dir__}/../../.git"
- ENV["GIT_WORK_TREE"] = File.expand_path "#{__dir__}/../.."
- git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H #{__dir__}`.chomp.split(":")
- if ENV["ARVADOS_BUILDING_VERSION"]
- version = ENV["ARVADOS_BUILDING_VERSION"]
- else
- version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
+ git_root = "#{__dir__}/../.."
+ git_timestamp, git_hash = IO.popen(
+ ["git", "-C", git_root,
+ "log", "-n1", "--first-parent", "--format=%ct:%H",
+ "--", "build/version-at-commit.sh", "sdk/ruby", "sdk/cli"],
+ ) do |git_log|
+ git_log.readline.chomp.split(":")
end
- version = version.sub("~dev", ".dev").sub("~rc", ".rc")
- git_timestamp = Time.at(git_timestamp.to_i).utc
-ensure
- ENV["GIT_DIR"] = git_dir
- ENV["GIT_WORK_TREE"] = git_work
+rescue Errno::ENOENT
+ $stderr.puts("failed to get version information: 'git' not found")
+ exit 69 # EX_UNAVAILABLE
+end
+
+if $? != 0
+ $stderr.puts("failed to get version information: 'git log' exited #{$?}")
+ exit 65 # EX_DATAERR
+end
+git_timestamp = Time.at(git_timestamp.to_i).utc
+version = ENV["ARVADOS_BUILDING_VERSION"] || IO.popen(
+ ["#{git_root}/build/version-at-commit.sh", git_hash],
+ ) do |ver_out|
+ ver_out.readline.chomp.encode("utf-8")
end
+version = version.sub("~dev", ".dev").sub("~rc", ".rc")
+arv_dep_version = if dev_index = (version =~ /\.dev/)
+ "~> #{version[...dev_index]}.a"
+ else
+ "= #{version}"
+ end
Gem::Specification.new do |s|
s.name = 'arvados-cli'
@@ -39,11 +47,14 @@ Gem::Specification.new do |s|
s.executables << "arv"
s.executables << "arv-tag"
s.required_ruby_version = '>= 2.7.0'
- s.add_runtime_dependency 'arvados', '~> 2.8.a'
+ s.add_runtime_dependency 'arvados', arv_dep_version
# arvados fork of google-api-client gem with old API and new
# compatibility fixes, built from ../ruby-google-api-client/
s.add_runtime_dependency('arvados-google-api-client', '>= 0.8.7.5', '< 0.8.9')
- s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 8.0'
+ # Rails 7.1.3.x is the last version to support Ruby 2.7.0 in Ubuntu 20.04.
+ # Later 7.1.x releases require Ruby >= 2.7.3:
+ #
+ s.add_runtime_dependency 'activesupport', '~> 7.1.3.4'
s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
s.add_runtime_dependency 'optimist', '~> 3.0'
s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
diff --git a/sdk/cli/bin/arv b/sdk/cli/bin/arv
index e5aa4e4f2a..6699aa9f32 100755
--- a/sdk/cli/bin/arv
+++ b/sdk/cli/bin/arv
@@ -274,7 +274,7 @@ def fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts
:parameters => {"uuid" => uuid},
:authenticated => false,
:headers => {
- authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']
})
obj = check_response result
rescue => e
@@ -323,7 +323,7 @@ def arv_edit client, arvados, global_opts, remaining_opts
:body_object => { rsc.singularize => newobj },
:authenticated => false,
:headers => {
- authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']
})
results = check_response result
STDERR.puts "Updated object #{results['uuid']}"
@@ -405,7 +405,7 @@ def arv_create client, arvados, global_opts, remaining_opts
:body_object => {object_type => newobj},
:authenticated => false,
:headers => {
- authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']
})
results = check_response result
puts "Created object #{results['uuid']}"
@@ -677,7 +677,7 @@ when
uri_s = eval(api_method).generate_uri(request_parameters)
Curl::Easy.perform(uri_s) do |curl|
curl.headers['Accept'] = 'text/plain'
- curl.headers['Authorization'] = "OAuth2 #{ENV['ARVADOS_API_TOKEN']}"
+ curl.headers['Authorization'] = "Bearer #{ENV['ARVADOS_API_TOKEN']}"
if ENV['ARVADOS_API_HOST_INSECURE']
curl.ssl_verify_peer = false
curl.ssl_verify_host = false
@@ -694,7 +694,7 @@ else
:body_object => request_body,
:authenticated => false,
:headers => {
- authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']
})
end
diff --git a/sdk/cli/bin/arv-tag b/sdk/cli/bin/arv-tag
index f709020fc7..3e4fbea1b4 100755
--- a/sdk/cli/bin/arv-tag
+++ b/sdk/cli/bin/arv-tag
@@ -25,7 +25,7 @@ def api_call(method, parameters:{}, request_body:{})
:body_object => request_body,
:authenticated => false,
:headers => {
- authorization: "OAuth2 #{ENV['ARVADOS_API_TOKEN']}",
+ authorization: "Bearer #{ENV['ARVADOS_API_TOKEN']}",
})
begin
diff --git a/sdk/cwl/README.rst b/sdk/cwl/README.rst
index 45d0be6ddb..21930134d9 100644
--- a/sdk/cwl/README.rst
+++ b/sdk/cwl/README.rst
@@ -2,4 +2,94 @@
..
.. SPDX-License-Identifier: Apache-2.0
-Arvados Common Workflow Language (CWL) runner.
+==================
+Arvados CWL Runner
+==================
+
+Overview
+--------
+
+This package provides the ``arvados-cwl-runner`` tool to register and run Common Workflow Language workflows in Arvados_.
+
+.. _Arvados: https://arvados.org/
+
+Installation
+------------
+
+Installing under your user account
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method lets you install the package without root access. However,
+other users on the same system will need to reconfigure their shell in order
+to be able to use it. Run the following to install the package in an
+environment at ``~/arvclients``::
+
+ python3 -m venv ~/arvclients
+ ~/arvclients/bin/pip install arvados-cwl-runner
+
+Command line tools will be installed under ``~/arvclients/bin``. You can
+test one by running::
+
+ ~/arvclients/bin/arvados-cwl-runner --version
+
+You can run these tools by specifying the full path every time, or you can
+add the directory to your shell's search path by running::
+
+ export PATH="$PATH:$HOME/arvclients/bin"
+
+You can make this search path change permanent by adding this command to
+your shell's configuration, for example ``~/.bashrc`` if you're using bash.
+You can test the change by running::
+
+ arvados-cwl-runner --version
+
+Installing on Debian and Ubuntu systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Arvados publishes packages for Debian 11 "bullseye," Debian 12 "bookworm," Ubuntu 20.04 "focal," and Ubuntu 22.04 "jammy." You can install the Python SDK package on any of these distributions by running the following commands::
+
+ sudo install -d /etc/apt/keyrings
+ sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
+ sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null </dev/null <<'EOF'
+ [arvados]
+ name=Arvados
+ baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/
+ gpgcheck=1
+ gpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados
+ EOF
+ sudo dnf install python3-arvados-cwl-runner
+
+Configuration
+-------------
+
+This client software needs two pieces of information to connect to
+Arvados: the DNS name of the API server, and an API authorization
+token. `The Arvados user
+documentation
+`_ describes
+how to find this information in the Arvados Workbench, and install it
+on your system.
+
+Testing and Development
+-----------------------
+
+This package is one part of the Arvados source package, and it has
+integration tests to check interoperability with other Arvados
+components. Our `hacking guide
+`_
+describes how to set up a development environment and run tests.
diff --git a/sdk/cwl/arvados_cwl/__init__.py b/sdk/cwl/arvados_cwl/__init__.py
index 7e13488758..c2e6525ff4 100644
--- a/sdk/cwl/arvados_cwl/__init__.py
+++ b/sdk/cwl/arvados_cwl/__init__.py
@@ -6,9 +6,6 @@
# Implement cwl-runner interface for submitting and running work on Arvados, using
# the Crunch containers API.
-from future.utils import viewitems
-from builtins import str
-
import argparse
import importlib.metadata
import importlib.resources
@@ -186,11 +183,18 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
parser.add_argument("--enable-dev", action="store_true",
help="Enable loading and running development versions "
"of the CWL standards.", default=False)
- parser.add_argument('--storage-classes', default="default",
- help="Specify comma separated list of storage classes to be used when saving final workflow output to Keep.")
- parser.add_argument('--intermediate-storage-classes', default="default",
- help="Specify comma separated list of storage classes to be used when saving intermediate workflow output to Keep.")
-
+ parser.add_argument(
+ '--storage-classes',
+ type=arv_cmd.UniqueSplit(),
+ default=[],
+ help="Specify comma separated list of storage classes to be used when saving final workflow output to Keep.",
+ )
+ parser.add_argument(
+ '--intermediate-storage-classes',
+ type=arv_cmd.UniqueSplit(),
+ default=[],
+ help="Specify comma separated list of storage classes to be used when saving intermediate workflow output to Keep.",
+ )
parser.add_argument("--intermediate-output-ttl", type=int, metavar="N",
help="If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).",
default=0)
@@ -226,7 +230,19 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
default=5*60, dest="http_timeout", help="API request timeout in seconds. Default is 300 seconds (5 minutes).")
parser.add_argument("--defer-downloads", action="store_true", default=False,
- help="When submitting a workflow, defer downloading HTTP URLs to workflow launch instead of downloading to Keep before submit.")
+ help="When submitting a workflow, defer downloading HTTP or S3 URLs to launch of the workflow runner container instead of downloading to Keep before submit.")
+
+ parser.add_argument("--enable-aws-credential-capture", action="store_true", default=False, dest="aws_credential_capture",
+ help="When submitting a workflow that requires AWS credentials, capture them from the local environment for use by the workflow runner container.")
+
+ parser.add_argument("--disable-aws-credential-capture", action="store_false", default=False, dest="aws_credential_capture",
+ help="Do not capture AWS credentials from the local environment, must use credentials registered with Arvados.")
+
+ parser.add_argument("--s3-public-bucket", action="store_true",
+ help="Downloads are from a public bucket, so no AWS credentials are required.")
+
+ parser.add_argument("--use-credential", default=None, dest="selected_credential",
+ help="Name or uuid of a credential registered with Arvados that will be used to fetch external resources.")
parser.add_argument("--varying-url-params", type=str, default="",
help="A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.")
@@ -238,6 +254,14 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
exgroup.add_argument("--enable-preemptible", dest="enable_preemptible", default=None, action="store_true", help="Use preemptible instances. Control individual steps with arv:UsePreemptible hint.")
exgroup.add_argument("--disable-preemptible", dest="enable_preemptible", default=None, action="store_false", help="Don't use preemptible instances.")
+ exgroup = parser.add_mutually_exclusive_group()
+ exgroup.add_argument("--enable-resubmit-non-preemptible", dest="enable_resubmit_non_preemptible",
+ default=None, action="store_true",
+ help="If a workflow step fails due to the instance it is running on being preempted, re-submit the container with the `preemptible` flag disabled. Control individual steps with arv:PreemptionBehavior hint.")
+ exgroup.add_argument("--disable-resubmit-non-preemptible", dest="enable_resubmit_non_preemptible",
+ default=None, action="store_false",
+ help="Don't resumbit when a preemptible instance is reclaimed.")
+
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--copy-deps", dest="copy_deps", default=None, action="store_true", help="Copy dependencies into the destination project.")
exgroup.add_argument("--no-copy-deps", dest="copy_deps", default=None, action="store_false", help="Leave dependencies where they are.")
@@ -272,7 +296,7 @@ def add_arv_hints():
cwltool.command_line_tool.ACCEPTLIST_RE = cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE
supported_versions = ["v1.0", "v1.1", "v1.2"]
for s in supported_versions:
- customschema = importlib.resources.read_text(__name__, f'arv-cwl-schema-{s}.yml', 'utf-8')
+ customschema = importlib.resources.read_text(__name__, f'arv-cwl-schema-{s}.yml', encoding='utf-8')
use_custom_schema(s, "http://arvados.org/cwl", customschema)
cwltool.process.supportedProcessRequirements.extend([
"http://arvados.org/cwl#RunInSingleContainer",
@@ -291,6 +315,8 @@ def add_arv_hints():
"http://arvados.org/cwl#OutputCollectionProperties",
"http://arvados.org/cwl#KeepCacheTypeRequirement",
"http://arvados.org/cwl#OutOfMemoryRetry",
+ "http://arvados.org/cwl#PreemptionBehavior",
+ "http://arvados.org/cwl#ROCmRequirement",
])
def exit_signal_handler(sigcode, frame):
@@ -333,7 +359,7 @@ def main(args=sys.argv[1:],
add_arv_hints()
- for key, val in viewitems(cwltool.argparser.get_default_args()):
+ for key, val in cwltool.argparser.get_default_args().items():
if not hasattr(arvargs, key):
setattr(arvargs, key, val)
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
index aeb41db568..8743b69cca 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
@@ -385,6 +385,53 @@ $graph:
doc: |
Maximum number of GPU devices to request. If not specified,
same as `cudaDeviceCountMin`.
+ cudaVram:
+ type: ['null', long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20)
+
+
+- name: ROCmRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Require support for AMD ROCm (GPU hardware acceleration).
+ fields:
+ class:
+ type: string
+ doc: 'arv:ROCmRequirement'
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ rocmDriverVersion:
+ type: string
+ doc: |
+ Compatible ROCm driver version, in X.Y format, e.g. "6.2".
+ rocmTarget:
+ type:
+ - 'string'
+ - 'string[]'
+ doc: |
+ Compatible GPU architecture/ROCm LLVM targets, e.g. "gfx1100".
+ rocmDeviceCountMin:
+ type: ['null', int, cwl:Expression]
+ default: 1
+ doc: |
+ Minimum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMax`. If neither are specified,
+ default 1.
+ rocmDeviceCountMax:
+ type: ['null', int, cwl:Expression]
+ doc: |
+ Maximum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMin`.
+ rocmVram:
+ type: [long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20).
- name: UsePreemptible
type: record
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
index 0e51d50080..67b714f159 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
@@ -328,6 +328,53 @@ $graph:
doc: |
Maximum number of GPU devices to request. If not specified,
same as `cudaDeviceCountMin`.
+ cudaVram:
+ type: ['null', long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20)
+
+
+- name: ROCmRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Require support for AMD ROCm (GPU hardware acceleration).
+ fields:
+ class:
+ type: string
+ doc: 'arv:ROCmRequirement'
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ rocmDriverVersion:
+ type: string
+ doc: |
+ Compatible ROCm driver version, in X.Y format, e.g. "6.2".
+ rocmTarget:
+ type:
+ - 'string'
+ - 'string[]'
+ doc: |
+ Compatible GPU architecture/ROCm LLVM targets, e.g. "gfx1100".
+ rocmDeviceCountMin:
+ type: ['null', int, cwl:Expression]
+ default: 1
+ doc: |
+ Minimum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMax`. If neither are specified,
+ default 1.
+ rocmDeviceCountMax:
+ type: ['null', int, cwl:Expression]
+ doc: |
+ Maximum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMin`.
+ rocmVram:
+ type: [long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20).
- name: UsePreemptible
type: record
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
index a753579c9a..a568327dec 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
@@ -330,6 +330,53 @@ $graph:
doc: |
Maximum number of GPU devices to request. If not specified,
same as `cudaDeviceCountMin`.
+ cudaVram:
+ type: ['null', long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20)
+
+
+- name: ROCmRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Require support for AMD ROCm (GPU hardware acceleration).
+ fields:
+ class:
+ type: string
+ doc: 'arv:ROCmRequirement'
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ rocmDriverVersion:
+ type: string
+ doc: |
+ Compatible ROCm driver version, in X.Y format, e.g. "6.2".
+ rocmTarget:
+ type:
+ - 'string'
+ - 'string[]'
+ doc: |
+ Compatible GPU architecture/ROCm LLVM targets, e.g. "gfx1100".
+ rocmDeviceCountMin:
+ type: ['null', int, cwl:Expression]
+ default: 1
+ doc: |
+ Minimum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMax`. If neither are specified,
+ default 1.
+ rocmDeviceCountMax:
+ type: ['null', int, cwl:Expression]
+ doc: |
+ Maximum number of GPU devices to request. If not specified,
+ same as `rocmDeviceCountMin`.
+ rocmVram:
+ type: [long, cwl:Expression]
+ default: 1024
+ doc: |
+ Amount of VRAM to request, in mebibytes (2**20).
- name: UsePreemptible
type: record
@@ -454,3 +501,23 @@ $graph:
type: ['null', string, cwl:Expression]
doc: |
Custom name to use for the runner process
+
+
+- name: PreemptionBehavior
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ If `resubmitNonPreemptible` is true and a workflow step fails due
+ to the instance it is running on being preempted, re-submit the
+ container with the `preemptible` flag set to false.
+ fields:
+ - name: class
+ type: string
+ doc: "arv:PreemptionBehavior"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+
+ - name: resubmitNonPreemptible
+ type: boolean
diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py
index c3b914ba99..d01623ef7b 100644
--- a/sdk/cwl/arvados_cwl/arvcontainer.py
+++ b/sdk/cwl/arvados_cwl/arvcontainer.py
@@ -2,10 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-
import logging
import json
import os
@@ -24,8 +20,10 @@ from cwltool.errors import WorkflowException
from cwltool.process import UnsupportedRequirement, shortname
from cwltool.utils import aslist, adjustFileObjs, adjustDirObjs, visit_class
from cwltool.job import JobBase
+from cwltool.builder import substitute
import arvados.collection
+import arvados.util
import crunchstat_summary.summarizer
import crunchstat_summary.reader
@@ -44,10 +42,13 @@ metrics = logging.getLogger('arvados.cwl-runner.metrics')
def cleanup_name_for_collection(name):
return name.replace("/", " ")
+class OutputGlobError(RuntimeError):
+ pass
+
class ArvadosContainer(JobBase):
"""Submit and manage a Crunch container request for executing a CWL CommandLineTool."""
- def __init__(self, runner, job_runtime,
+ def __init__(self, runner, job_runtime, globpatterns,
builder, # type: Builder
joborder, # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
make_path_mapper, # type: Callable[..., PathMapper]
@@ -61,6 +62,7 @@ class ArvadosContainer(JobBase):
self.running = False
self.uuid = None
self.attempt_count = 0
+ self.globpatterns = globpatterns
def update_pipeline_component(self, r):
pass
@@ -310,15 +312,42 @@ class ArvadosContainer(JobBase):
if storage_class_req and storage_class_req.get("intermediateStorageClass"):
container_request["output_storage_classes"] = aslist(storage_class_req["intermediateStorageClass"])
else:
- container_request["output_storage_classes"] = runtimeContext.intermediate_storage_classes.strip().split(",")
+ container_request["output_storage_classes"] = (
+ runtimeContext.intermediate_storage_classes
+ or list(arvados.util.iter_storage_classes(self.arvrunner.api.config()))
+ )
cuda_req, _ = self.get_requirement("http://commonwl.org/cwltool#CUDARequirement")
if cuda_req:
- runtime_constraints["cuda"] = {
- "device_count": resources.get("cudaDeviceCount", 1),
- "driver_version": cuda_req["cudaVersionMin"],
- "hardware_capability": aslist(cuda_req["cudaComputeCapability"])[0]
- }
+ if self.arvrunner.api._rootDesc["revision"] >= "20250128":
+ # Arvados 3.1+ API
+ runtime_constraints["gpu"] = {
+ "stack": "cuda",
+ "device_count": resources.get("cudaDeviceCount", 1),
+ "driver_version": cuda_req["cudaVersionMin"],
+ "hardware_target": aslist(cuda_req["cudaComputeCapability"]),
+ "vram": self.builder.do_eval(cuda_req.get("cudaVram", 0))*1024*1024,
+ }
+ else:
+ # Legacy API
+ runtime_constraints["cuda"] = {
+ "device_count": resources.get("cudaDeviceCount", 1),
+ "driver_version": cuda_req["cudaVersionMin"],
+ "hardware_capability": aslist(cuda_req["cudaComputeCapability"])[0]
+ }
+
+ rocm_req, _ = self.get_requirement("http://arvados.org/cwl#ROCmRequirement")
+ if rocm_req:
+ if self.arvrunner.api._rootDesc["revision"] >= "20250128":
+ runtime_constraints["gpu"] = {
+ "stack": "rocm",
+ "device_count": self.builder.do_eval(rocm_req.get("rocmDeviceCountMin", None)) or self.builder.do_eval(rocm_req.get("rocmDeviceCountMax", 1)),
+ "driver_version": rocm_req["rocmDriverVersion"],
+ "hardware_target": aslist(rocm_req["rocmTarget"]),
+ "vram": self.builder.do_eval(rocm_req["rocmVram"])*1024*1024,
+ }
+ else:
+ raise WorkflowException("Arvados API server does not support ROCm (requires Arvados 3.1+)")
if runtimeContext.enable_preemptible is False:
scheduling_parameters["preemptible"] = False
@@ -331,6 +360,11 @@ class ArvadosContainer(JobBase):
elif runtimeContext.enable_preemptible is None:
pass
+ if scheduling_parameters.get("preemptible") and self.may_resubmit_non_preemptible():
+ # Only make one attempt, because if it is preempted we
+ # will resubmit and ask for a non-preemptible instance.
+ container_request["container_count_max"] = 1
+
if self.timelimit is not None and self.timelimit > 0:
scheduling_parameters["max_run_time"] = self.timelimit
@@ -370,6 +404,145 @@ class ArvadosContainer(JobBase):
logger.warning("%s API revision is %s, revision %s is required to support setting properties on output collections.",
self.arvrunner.label(self), self.arvrunner.api._rootDesc["revision"], "20220510")
+ if self.arvrunner.api._rootDesc["revision"] >= "20240502" and self.globpatterns:
+ output_glob = []
+ try:
+ for gp in self.globpatterns:
+ pattern = ""
+ gb = None
+ if isinstance(gp, str):
+ try:
+ gb = self.builder.do_eval(gp)
+ except:
+ raise OutputGlobError("Expression evaluation failed")
+ elif isinstance(gp, dict):
+ # dict of two keys, 'glob' and 'pattern' which
+ # means we should try to predict the names of
+ # secondary files to capture.
+ try:
+ gb = self.builder.do_eval(gp["glob"])
+ except:
+ raise OutputGlobError("Expression evaluation failed")
+ pattern = gp["pattern"]
+
+ if "${" in pattern or "$(" in pattern:
+ # pattern is an expression, need to evaluate
+ # it first.
+ if '*' in gb or "]" in gb:
+ # glob has wildcards, so we can't
+ # predict the secondary file name.
+ # Capture everything.
+ raise OutputGlobError("glob has wildcards, cannot predict secondary file name")
+
+ # After evealuating 'glob' we have a
+ # expected name we can provide to the
+ # expression.
+ nr, ne = os.path.splitext(gb)
+ try:
+ pattern = self.builder.do_eval(pattern, context={
+ "path": gb,
+ "basename": os.path.basename(gb),
+ "nameext": ne,
+ "nameroot": nr,
+ })
+ except:
+ raise OutputGlobError("Expression evaluation failed")
+ if isinstance(pattern, str):
+ # If we get a string back, that's the expected
+ # file name for the secondary file.
+ gb = pattern
+ pattern = ""
+ else:
+ # However, it is legal for this to return a
+ # file object or an array. In that case we'll
+ # just capture everything.
+ raise OutputGlobError("secondary file expression did not evaluate to a string")
+ else:
+ # Should never happen, globpatterns is
+ # constructed in arvtool from data that has
+ # already gone through schema validation, but
+ # still good to have a fallback.
+ raise TypeError("Expected glob pattern to be a str or dict, was %s" % gp)
+
+ if not gb:
+ continue
+
+ for gbeval in aslist(gb):
+ if gbeval.startswith(self.outdir+"/"):
+ gbeval = gbeval[len(self.outdir)+1:]
+ while gbeval.startswith("./"):
+ gbeval = gbeval[2:]
+
+ if pattern:
+ # pattern is not an expression or we would
+ # have handled this earlier, so it must be
+ # a simple substitution on the secondary
+ # file name.
+ #
+ # 'pattern' was assigned in the earlier code block
+ #
+ # if there's a wild card in the glob, figure
+ # out if there's enough text after it that the
+ # suffix substitution can be done correctly.
+ cutpos = max(gbeval.find("*"), gbeval.find("]"))
+ if cutpos > -1:
+ tail = gbeval[cutpos+1:]
+ if tail.count(".") < pattern.count("^"):
+ # the known suffix in the glob has
+ # fewer dotted extensions than the
+ # substition pattern wants to remove,
+ # so we can't accurately predict
+ # correct name glob in advance.
+ gbeval = ""
+ if gbeval:
+ gbeval = substitute(gbeval, pattern)
+
+ if gbeval in (self.outdir, "", "."):
+ output_glob.append("**")
+ elif gbeval.endswith("/"):
+ output_glob.append(gbeval+"**")
+ else:
+ output_glob.append(gbeval)
+ output_glob.append(gbeval + "/**")
+
+ if "**" in output_glob:
+ # if it's going to match all, prefer not to provide it
+ # at all.
+ output_glob.clear()
+ except OutputGlobError as e:
+ logger.debug("Unable to set a more specific output_glob (this is not an error): %s", e.args[0], exc_info=e)
+ output_glob.clear()
+
+ if output_glob:
+ # Tools should either use cwl.output.json or
+ # outputBinding globs. However, one CWL conformance
+ # test has both, so we need to make sure we collect
+ # cwl.output.json in this case. That test uses
+ # cwl.output.json return a string, but also uses
+ # outputBinding.
+ output_glob.append("cwl.output.json")
+
+ # It could happen that a tool creates cwl.output.json,
+ # references a file, but also uses a outputBinding
+ # glob that doesn't include the file being referenced.
+ #
+ # In this situation, output_glob will only match the
+ # pattern we know about. If cwl.output.json referred
+ # to other files in the output, those would be
+ # missing. We could upload the entire output, but we
+ # currently have no way of knowing at this point
+ # whether cwl.output.json will be used this way.
+ #
+ # Because this is a corner case, I'm inclined to leave
+ # this as a known issue for now. No conformance tests
+ # do this and I'd even be inclined to have it ruled
+ # incompatible in the CWL spec if it did come up.
+ # That said, in retrospect it would have been good to
+ # require CommandLineTool to declare when it expects
+ # cwl.output.json.
+
+ container_request["output_glob"] = output_glob
+
ram_multiplier = [1]
oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
@@ -419,11 +592,26 @@ class ArvadosContainer(JobBase):
runtime_constraints["ram"] = ram * ram_multiplier[self.attempt_count]
container_request["state"] = "Committed"
- response = self.arvrunner.api.container_requests().update(
- uuid=self.uuid,
- body=container_request,
- **extra_submit_params
- ).execute(num_retries=self.arvrunner.num_retries)
+ try:
+ response = self.arvrunner.api.container_requests().update(
+ uuid=self.uuid,
+ body=container_request,
+ **extra_submit_params
+ ).execute(num_retries=self.arvrunner.num_retries)
+ except Exception as e:
+ # If the request was actually processed but we didn't
+ # receive a response, we'll re-try the request, but if
+ # the container went directly from "Committed" to
+ # "Final", the retry attempt will fail with a state
+ # change error. So if there's an error, double check
+ # to see if the container is in the expected state.
+ #
+ # See discussion on #22160
+ response = self.arvrunner.api.container_requests().get(
+ uuid=self.uuid
+ ).execute(num_retries=self.arvrunner.num_retries)
+ if response.get("state") not in ("Committed", "Final"):
+ raise
self.arvrunner.process_submitted(self)
self.attempt_count += 1
@@ -437,6 +625,30 @@ class ArvadosContainer(JobBase):
logger.debug("Container request was %s", container_request)
self.output_callback({}, "permanentFail")
+ def may_resubmit_non_preemptible(self):
+ if self.job_runtime.enable_resubmit_non_preemptible is False:
+ # explicitly disabled
+ return False
+
+ spot_instance_retry_req, _ = self.get_requirement("http://arvados.org/cwl#PreemptionBehavior")
+ if spot_instance_retry_req:
+ if spot_instance_retry_req["resubmitNonPreemptible"] is False:
+ # explicitly disabled by hint
+ return False
+ elif self.job_runtime.enable_resubmit_non_preemptible is None:
+ # default behavior is we don't retry
+ return False
+
+ # At this point, by process of elimination either
+ # resubmitNonPreemptible or enable_resubmit_non_preemptible
+ # must be True, so now check if the container was actually
+ # preempted.
+
+ return True
+
+ def spot_instance_retry(self, record, container):
+ return self.may_resubmit_non_preemptible() and bool(container["runtime_status"].get("preemptionNotice"))
+
def out_of_memory_retry(self, record, container):
oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
if oom_retry_req is None:
@@ -468,10 +680,13 @@ class ArvadosContainer(JobBase):
outputs = {}
retried = False
rcode = None
+ do_retry = False
+
try:
container = self.arvrunner.api.containers().get(
uuid=record["container_uuid"]
).execute(num_retries=self.arvrunner.num_retries)
+
if container["state"] == "Complete":
rcode = container["exit_code"]
if self.successCodes and rcode in self.successCodes:
@@ -486,20 +701,40 @@ class ArvadosContainer(JobBase):
processStatus = "permanentFail"
if processStatus == "permanentFail" and self.attempt_count == 1 and self.out_of_memory_retry(record, container):
- logger.warning("%s Container failed with out of memory error, retrying with more RAM.",
+ logger.info("%s Container failed with out of memory error. Retrying container with more RAM.",
self.arvrunner.label(self))
- self.job_runtime.submit_request_uuid = None
- self.uuid = None
- self.run(None)
- retried = True
- return
+ self.job_runtime = self.job_runtime.copy()
+ do_retry = True
- if rcode == 137:
+ if rcode == 137 and not do_retry:
logger.warning("%s Container may have been killed for using too much RAM. Try resubmitting with a higher 'ramMin' or use the arv:OutOfMemoryRetry feature.",
self.arvrunner.label(self))
else:
processStatus = "permanentFail"
+ if processStatus == "permanentFail" and self.attempt_count == 1 and self.spot_instance_retry(record, container):
+ logger.info("%s Container failed because the preemptible instance it was running on was reclaimed. Retrying container on a non-preemptible instance.")
+ self.job_runtime = self.job_runtime.copy()
+ self.job_runtime.enable_preemptible = False
+ do_retry = True
+
+ if do_retry:
+ # Add a property indicating that this container was resubmitted.
+ updateproperties = record["properties"].copy()
+ olduuid = self.uuid
+ self.job_runtime.submit_request_uuid = None
+ self.uuid = None
+ self.run(None)
+ # this flag suppresses calling the output callback, we only want to set this
+ # when we're sure that the resubmission has happened without issue.
+ retried = True
+ # Add a property to the old container request indicating it
+ # was retried
+ updateproperties["arv:failed_container_resubmitted"] = self.uuid
+ self.arvrunner.api.container_requests().update(uuid=olduuid,
+ body={"properties": updateproperties}).execute()
+ return
+
logc = None
if record["log_uuid"]:
logc = arvados.collection.Collection(record["log_uuid"],
@@ -593,6 +828,29 @@ class RunnerContainer(Runner):
}
self.job_order[param] = {"$include": mnt}
+ environment = {}
+
+ if self.arvrunner.botosession is not None and runtimeContext.defer_downloads and runtimeContext.aws_credential_capture:
+ # There are deferred downloads from S3. Save our credentials to secret
+ # storage
+ secret_mounts["/var/lib/cwl/.aws/config"] = {
+ "kind": "text",
+ "content": """[default]
+region = {}
+""".format(self.arvrunner.botosession.region_name)
+ }
+ environment["AWS_CONFIG_FILE"] = "/var/lib/cwl/.aws/config"
+
+ creds = self.arvrunner.botosession.get_credentials()
+ secret_mounts["/var/lib/cwl/.aws/credentials"] = {
+ "kind": "text",
+ "content": """[default]
+aws_access_key_id = {}
+aws_secret_access_key = {}
+""".format(creds.access_key, creds.secret_key)
+ }
+ environment["AWS_SHARED_CREDENTIALS_FILE"] = "/var/lib/cwl/.aws/credentials"
+
container_image = arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext)
workflow_runner_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
@@ -627,7 +885,8 @@ class RunnerContainer(Runner):
"API": True
},
"use_existing": self.reuse_runner,
- "properties": {}
+ "properties": {},
+ "environment": environment
}
if self.embedded_tool.tool.get("id", "").startswith("keep:"):
@@ -699,11 +958,11 @@ class RunnerContainer(Runner):
if runtimeContext.debug:
command.append("--debug")
- if runtimeContext.storage_classes != "default" and runtimeContext.storage_classes:
- command.append("--storage-classes=" + runtimeContext.storage_classes)
+ if runtimeContext.storage_classes:
+ command.append("--storage-classes=" + ",".join(runtimeContext.storage_classes))
- if runtimeContext.intermediate_storage_classes != "default" and runtimeContext.intermediate_storage_classes:
- command.append("--intermediate-storage-classes=" + runtimeContext.intermediate_storage_classes)
+ if runtimeContext.intermediate_storage_classes:
+ command.append("--intermediate-storage-classes=" + ",".join(runtimeContext.intermediate_storage_classes))
if runtimeContext.on_error:
command.append("--on-error=" + self.on_error)
@@ -727,7 +986,7 @@ class RunnerContainer(Runner):
command.append("--disable-preemptible")
if runtimeContext.varying_url_params:
- command.append("--varying-url-params="+runtimeContext.varying_url_params)
+ command.append("--varying-url-params=" + runtimeContext.varying_url_params)
if runtimeContext.prefer_cached_downloads:
command.append("--prefer-cached-downloads")
@@ -741,6 +1000,12 @@ class RunnerContainer(Runner):
if self.fast_parser:
command.append("--fast-parser")
+ if self.arvrunner.selected_credential is not None:
+ command.append("--use-credential="+self.arvrunner.selected_credential["uuid"])
+
+ if runtimeContext.s3_public_bucket is True:
+ command.append("--s3-public-bucket")
+
command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
container_req["command"] = command
diff --git a/sdk/cwl/arvados_cwl/arvdocker.py b/sdk/cwl/arvados_cwl/arvdocker.py
index f5e67a6649..ae5a434074 100644
--- a/sdk/cwl/arvados_cwl/arvdocker.py
+++ b/sdk/cwl/arvados_cwl/arvdocker.py
@@ -19,11 +19,10 @@ logger = logging.getLogger('arvados.cwl-runner')
def determine_image_id(dockerImageId):
for line in (
- subprocess.check_output( # nosec
- ["docker", "images", "--no-trunc", "--all"]
- )
- .decode("utf-8")
- .splitlines()
+ str(subprocess.check_output( # nosec
+ ["docker", "images", "--no-trunc", "--all"]
+ ), "utf-8")
+ .splitlines()
):
try:
match = re.match(r"^([^ ]+)\s+([^ ]+)\s+([^ ]+)", line)
diff --git a/sdk/cwl/arvados_cwl/arvtool.py b/sdk/cwl/arvados_cwl/arvtool.py
index 86fecc0a1d..165467f414 100644
--- a/sdk/cwl/arvados_cwl/arvtool.py
+++ b/sdk/cwl/arvados_cwl/arvtool.py
@@ -11,6 +11,9 @@ from functools import partial
from schema_salad.sourceline import SourceLine
from cwltool.errors import WorkflowException
from arvados.util import portable_data_hash_pattern
+from cwltool.utils import aslist
+
+from typing import Sequence, Mapping
def validate_cluster_target(arvrunner, runtimeContext):
if (runtimeContext.submit_runner_cluster and
@@ -70,10 +73,35 @@ class ArvadosCommandTool(CommandLineTool):
"dockerPull": loadingContext.default_docker_image})
self.arvrunner = arvrunner
+ self.globpatterns = []
+ self._collect_globs(toolpath_object["outputs"])
+
+ def _collect_globs(self, inputschema):
+ if isinstance(inputschema, str):
+ return
+
+ if isinstance(inputschema, Sequence):
+ for i in inputschema:
+ self._collect_globs(i)
+
+ if isinstance(inputschema, Mapping):
+ if "type" in inputschema:
+ self._collect_globs(inputschema["type"])
+ if inputschema["type"] == "record":
+ for field in inputschema["fields"]:
+ self._collect_globs(field)
+
+ if "outputBinding" in inputschema and "glob" in inputschema["outputBinding"]:
+ for gb in aslist(inputschema["outputBinding"]["glob"]):
+ self.globpatterns.append(gb)
+ if "secondaryFiles" in inputschema:
+ for sf in aslist(inputschema["secondaryFiles"]):
+ for gb in aslist(inputschema["outputBinding"]["glob"]):
+ self.globpatterns.append({"pattern": sf["pattern"], "glob": gb})
def make_job_runner(self, runtimeContext):
if runtimeContext.work_api == "containers":
- return partial(ArvadosContainer, self.arvrunner, runtimeContext)
+ return partial(ArvadosContainer, self.arvrunner, runtimeContext, self.globpatterns)
else:
raise Exception("Unsupported work_api %s", runtimeContext.work_api)
diff --git a/sdk/cwl/arvados_cwl/arvworkflow.py b/sdk/cwl/arvados_cwl/arvworkflow.py
index c592b83dc7..4751e48c04 100644
--- a/sdk/cwl/arvados_cwl/arvworkflow.py
+++ b/sdk/cwl/arvados_cwl/arvworkflow.py
@@ -2,18 +2,15 @@
#
# SPDX-License-Identifier: Apache-2.0
-from past.builtins import basestring
-from future.utils import viewitems
-
import os
import json
import copy
import logging
import urllib
-from io import StringIO
import sys
import re
+from io import StringIO
from typing import (MutableSequence, MutableMapping)
from ruamel.yaml import YAML
@@ -320,7 +317,7 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid,
text = tool.doc_loader.fetch_text(w)
if isinstance(text, bytes):
- textIO = StringIO(text.decode('utf-8'))
+ textIO = StringIO(str(text, 'utf-8'))
else:
textIO = StringIO(text)
@@ -588,7 +585,7 @@ class ArvadosWorkflowStep(WorkflowStep):
runtimeContext = runtimeContext.copy()
runtimeContext.toplevel = True # Preserve behavior for #13365
- builder = make_builder({shortname(k): v for k,v in viewitems(joborder)}, self.hints, self.requirements,
+ builder = make_builder({shortname(k): v for k, v in joborder.items()}, self.hints, self.requirements,
runtimeContext, self.metadata)
runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
return super(ArvadosWorkflowStep, self).job(joborder, output_callback, runtimeContext)
@@ -655,7 +652,7 @@ class ArvadosWorkflow(Workflow):
dyn = False
for k in max_res_pars + sum_res_pars:
if k in req:
- if isinstance(req[k], basestring):
+ if isinstance(req[k], str):
if item["id"] == "#main":
# only the top-level requirements/hints may contain expressions
self.dynamic_resource_req.append(req)
diff --git a/sdk/cwl/arvados_cwl/context.py b/sdk/cwl/arvados_cwl/context.py
index 60ea9bdff5..8ad8c9a2f2 100644
--- a/sdk/cwl/arvados_cwl/context.py
+++ b/sdk/cwl/arvados_cwl/context.py
@@ -29,8 +29,8 @@ class ArvRuntimeContext(RuntimeContext):
self.submit_runner_image = None
self.wait = True
self.cwl_runner_job = None
- self.storage_classes = "default"
- self.intermediate_storage_classes = "default"
+ self.storage_classes = []
+ self.intermediate_storage_classes = []
self.current_container = None
self.http_timeout = 300
self.submit_runner_cluster = None
@@ -39,6 +39,7 @@ class ArvRuntimeContext(RuntimeContext):
self.collection_cache_size = 256
self.match_local_docker = False
self.enable_preemptible = None
+ self.enable_resubmit_non_preemptible = None
self.copy_deps = None
self.defer_downloads = False
self.varying_url_params = ""
@@ -48,6 +49,9 @@ class ArvRuntimeContext(RuntimeContext):
self.git_info = {}
self.enable_usage_report = None
self.usage_report_notes = []
+ self.aws_credential_capture = True
+ self.selected_credential = None
+ self.s3_public_bucket = False
super(ArvRuntimeContext, self).__init__(kwargs)
diff --git a/sdk/cwl/arvados_cwl/done.py b/sdk/cwl/arvados_cwl/done.py
index 5c12419765..98c9f3a5df 100644
--- a/sdk/cwl/arvados_cwl/done.py
+++ b/sdk/cwl/arvados_cwl/done.py
@@ -2,11 +2,10 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future.utils import viewvalues
-
import re
-from cwltool.errors import WorkflowException
+
from collections import deque
+from cwltool.errors import WorkflowException
def done(self, record, tmpdir, outdir, keepdir):
cols = [
diff --git a/sdk/cwl/arvados_cwl/executor.py b/sdk/cwl/arvados_cwl/executor.py
index 432b380aab..ecc7f8c25c 100644
--- a/sdk/cwl/arvados_cwl/executor.py
+++ b/sdk/cwl/arvados_cwl/executor.py
@@ -2,12 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import division
-from builtins import next
-from builtins import object
-from builtins import str
-from future.utils import viewvalues, viewitems
-
import argparse
import logging
import os
@@ -29,12 +23,13 @@ from schema_salad.ref_resolver import file_uri, uri_file_path
import arvados
import arvados.config
+import arvados.util
from arvados.keep import KeepClient
from arvados.errors import ApiError
import arvados_cwl.util
from .arvcontainer import RunnerContainer, cleanup_name_for_collection
-from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder, update_from_merged_map, print_keep_deps
+from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder, update_from_merged_map, print_keep_deps, ArvSecretStore
from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
from .arvworkflow import ArvadosWorkflow, upload_workflow, make_workflow_record
from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
@@ -147,6 +142,8 @@ class ArvCwlExecutor(object):
self.fast_submit = False
self.git_info = arvargs.git_info
self.debug = False
+ self.botosession = None
+ self.selected_credential = None
if keep_client is not None:
self.keep_client = keep_client
@@ -212,6 +209,7 @@ The 'jobs' API is no longer supported.
self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))
self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=self.collection_cache)
+ self.toplevel_runtimeContext.secret_store = ArvSecretStore()
self.defer_downloads = arvargs.submit and arvargs.defer_downloads
@@ -241,7 +239,7 @@ The 'jobs' API is no longer supported.
body={"state": state}).execute(num_retries=self.num_retries)
self.final_status = processStatus
self.final_output = out
- self.workflow_eval_lock.notifyAll()
+ self.workflow_eval_lock.notify_all()
def start_run(self, runnable, runtimeContext):
@@ -335,9 +333,17 @@ The 'jobs' API is no longer supported.
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
logger.info("%s %s is Running", self.label(j), uuid)
- elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
+ elif event["properties"]["new_attributes"]["state"] == "Final":
+ # underlying container is completed or cancelled
+ self.process_done(uuid, event["properties"]["new_attributes"])
+ elif (event["properties"]["new_attributes"]["state"] == "Committed" and
+ event["properties"]["new_attributes"]["priority"] == 0):
+ # cancelled before it got a chance to run, remains in
+ # comitted state but isn't going to run so treat it as
+ # cancelled.
self.process_done(uuid, event["properties"]["new_attributes"])
+
def label(self, obj):
return "[%s %s]" % (self.work_api[0:-1], obj.name)
@@ -371,7 +377,7 @@ The 'jobs' API is no longer supported.
try:
proc_states = table.list(filters=[["uuid", "in", page]], select=["uuid", "container_uuid", "state", "log_uuid",
"output_uuid", "modified_at", "properties",
- "runtime_constraints"]).execute(num_retries=self.num_retries)
+ "runtime_constraints", "priority"]).execute(num_retries=self.num_retries)
except Exception as e:
logger.warning("Temporary error checking states on API server: %s", e)
remain_wait = self.poll_interval
@@ -421,7 +427,7 @@ The 'jobs' API is no longer supported.
if obj.get("class") == "InplaceUpdateRequirement":
if obj["inplaceUpdate"] and parentfield == "requirements":
raise SourceLine(obj, "class", UnsupportedRequirement).makeError("InplaceUpdateRequirement not supported for keep collections.")
- for k,v in viewitems(obj):
+ for k,v in obj.items():
self.check_features(v, parentfield=k)
elif isinstance(obj, list):
for i,v in enumerate(obj):
@@ -432,11 +438,16 @@ The 'jobs' API is no longer supported.
outputObj = copy.deepcopy(outputObj)
files = []
- def capture(fileobj):
+ def captureFile(fileobj):
files.append(fileobj)
- adjustDirObjs(outputObj, capture)
- adjustFileObjs(outputObj, capture)
+ def captureDir(dirobj):
+ if dirobj["location"].startswith("keep:") and 'listing' in dirobj:
+ del dirobj['listing']
+ files.append(dirobj)
+
+ adjustDirObjs(outputObj, captureDir)
+ adjustFileObjs(outputObj, captureFile)
generatemapper = NoFollowPathMapper(files, "", "", separateDirs=False)
@@ -545,7 +556,12 @@ The 'jobs' API is no longer supported.
try:
filepath = uri_file_path(tool.tool["id"])
cwd = os.path.dirname(filepath)
- subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, check=True, capture_output=True, text=True)
+ subprocess.run(
+ ["git", "log", "--format=%H", "-n1", "HEAD"],
+ cwd=cwd,
+ check=True,
+ stdout=subprocess.DEVNULL,
+ )
in_a_git_repo = True
except Exception as e:
pass
@@ -553,25 +569,34 @@ The 'jobs' API is no longer supported.
gitproperties = {}
if in_a_git_repo:
- git_commit = subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
- git_date = subprocess.run(["git", "log", "--format=%cD", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
- git_committer = subprocess.run(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
- git_branch = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
- git_origin = subprocess.run(["git", "remote", "get-url", "origin"], cwd=cwd, capture_output=True, text=True).stdout
- git_status = subprocess.run(["git", "status", "--untracked-files=no", "--porcelain"], cwd=cwd, capture_output=True, text=True).stdout
- git_describe = subprocess.run(["git", "describe", "--always", "--tags"], cwd=cwd, capture_output=True, text=True).stdout
- git_toplevel = subprocess.run(["git", "rev-parse", "--show-toplevel"], cwd=cwd, capture_output=True, text=True).stdout
+ def git_output(cmd):
+ return subprocess.run(
+ cmd,
+ cwd=cwd,
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ ).stdout.strip()
+ git_commit = git_output(["git", "log", "--format=%H", "-n1", "HEAD"])
+ git_date = git_output(["git", "log", "--format=%cD", "-n1", "HEAD"])
+ git_committer = git_output(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"])
+ git_branch = git_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
+ git_origin = git_output(["git", "remote", "get-url", "origin"])
+ git_status = git_output(["git", "status", "--untracked-files=no", "--porcelain"])
+ git_describe = git_output(["git", "describe", "--always", "--tags"])
+ git_toplevel = git_output(["git", "rev-parse", "--show-toplevel"])
git_path = filepath[len(git_toplevel):]
+ git_origin = arvados_cwl.util.sanitize_url(git_origin)
+
gitproperties = {
- "http://arvados.org/cwl#gitCommit": git_commit.strip(),
- "http://arvados.org/cwl#gitDate": git_date.strip(),
- "http://arvados.org/cwl#gitCommitter": git_committer.strip(),
- "http://arvados.org/cwl#gitBranch": git_branch.strip(),
- "http://arvados.org/cwl#gitOrigin": git_origin.strip(),
- "http://arvados.org/cwl#gitStatus": git_status.strip(),
- "http://arvados.org/cwl#gitDescribe": git_describe.strip(),
- "http://arvados.org/cwl#gitPath": git_path.strip(),
+ "http://arvados.org/cwl#gitCommit": git_commit,
+ "http://arvados.org/cwl#gitDate": git_date,
+ "http://arvados.org/cwl#gitCommitter": git_committer,
+ "http://arvados.org/cwl#gitBranch": git_branch,
+ "http://arvados.org/cwl#gitOrigin": git_origin,
+ "http://arvados.org/cwl#gitStatus": git_status,
+ "http://arvados.org/cwl#gitDescribe": git_describe,
+ "http://arvados.org/cwl#gitPath": git_path,
}
else:
for g in ("http://arvados.org/cwl#gitCommit",
@@ -593,6 +618,22 @@ The 'jobs' API is no longer supported.
cr["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in properties.items()})
self.api.container_requests().update(uuid=cr["uuid"], body={"container_request": {"properties": cr["properties"]}}).execute(num_retries=self.num_retries)
+ def get_credential(self, runtimeContext):
+ if runtimeContext.selected_credential is None:
+ return
+
+ for key in ("uuid", "name"):
+ result = self.api.credentials().list(filters=[[key, "=", runtimeContext.selected_credential]]).execute()
+ if len(result["items"]) == 1:
+ self.selected_credential = result["items"][0]
+ break
+
+ def get_credential_secret(self):
+ if self.selected_credential is None:
+ return
+ self.selected_credential.update(self.api.credentials().secret(uuid=self.selected_credential["uuid"]).execute())
+
+
def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
@@ -634,12 +675,6 @@ The 'jobs' API is no longer supported.
runtimeContext = runtimeContext.copy()
- default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
- if runtimeContext.storage_classes == "default":
- runtimeContext.storage_classes = default_storage_classes
- if runtimeContext.intermediate_storage_classes == "default":
- runtimeContext.intermediate_storage_classes = default_storage_classes
-
if not runtimeContext.name:
self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
if git_info.get("http://arvados.org/cwl#gitDescribe"):
@@ -668,6 +703,12 @@ The 'jobs' API is no longer supported.
self.runtime_status_update("activity", "data transfer")
+ current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+ self.get_credential(runtimeContext)
+ if current_container:
+ logger.info("Running inside container %s", current_container.get("uuid"))
+ self.get_credential_secret()
+
# Upload local file references in the job order.
with Perf(metrics, "upload_job_order"):
job_order, jobmapper = upload_job_order(self, "%s input" % runtimeContext.name,
@@ -679,10 +720,11 @@ The 'jobs' API is no longer supported.
# are going to wait for the result, and always_submit_runner
# is false, then we don't submit a runner process.
- submitting = (runtimeContext.submit and not
+ submitting = ((runtimeContext.submit and not
(updated_tool.tool["class"] == "CommandLineTool" and
runtimeContext.wait and
- not runtimeContext.always_submit_runner))
+ not runtimeContext.always_submit_runner)) or
+ runtimeContext.defer_downloads)
loadingContext = self.loadingContext.copy()
loadingContext.do_validate = False
@@ -841,9 +883,7 @@ The 'jobs' API is no longer supported.
self.runtime_status_update("activity", "workflow execution")
- current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
- logger.info("Running inside container %s", current_container.get("uuid"))
self.set_container_request_properties(current_container, git_info)
self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
@@ -897,9 +937,9 @@ The 'jobs' API is no longer supported.
raise
except:
if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
- logger.error("Interrupted, workflow will be cancelled")
+ logger.error("Interrupted, workflow will be cancelled", exc_info=self.debug)
elif isinstance(sys.exc_info()[1], WorkflowException):
- logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
+ logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=self.debug)
else:
logger.exception("Workflow execution failed")
@@ -947,7 +987,10 @@ The 'jobs' API is no longer supported.
if storage_class_req and storage_class_req.get("finalStorageClass"):
storage_classes = aslist(storage_class_req["finalStorageClass"])
else:
- storage_classes = runtimeContext.storage_classes.strip().split(",")
+ storage_classes = (
+ runtimeContext.storage_classes
+ or list(arvados.util.iter_storage_classes(self.api.config()))
+ )
output_properties = {}
output_properties_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputCollectionProperties")
@@ -969,3 +1012,7 @@ The 'jobs' API is no longer supported.
self.trash_intermediate_output()
return (self.final_output, self.final_status)
+
+def blank_secrets(job_order_object, process):
+ secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
+ pass
diff --git a/sdk/cwl/arvados_cwl/fsaccess.py b/sdk/cwl/arvados_cwl/fsaccess.py
index a5e9db0cfe..dc8a3ec91f 100644
--- a/sdk/cwl/arvados_cwl/fsaccess.py
+++ b/sdk/cwl/arvados_cwl/fsaccess.py
@@ -2,12 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from builtins import object
-from builtins import str
-from future.utils import viewvalues
-
import fnmatch
import os
import errno
@@ -296,7 +290,7 @@ class CollectionFetcher(DefaultFetcher):
return super(CollectionFetcher, self).urljoin(base_url, url)
- schemes = [u"file", u"http", u"https", u"mailto", u"keep", u"arvwf"]
+ schemes = ["file", "http", "https", "mailto", "keep", "arvwf", "s3"]
def supported_schemes(self): # type: () -> List[Text]
return self.schemes
@@ -314,7 +308,7 @@ def collectionResolver(api_client, document_loader, uri, num_retries=4):
if pipeline_template_uuid_pattern.match(uri):
pt = api_client.pipeline_templates().get(uuid=uri).execute(num_retries=num_retries)
- return u"keep:" + viewvalues(pt["components"])[0]["script_parameters"]["cwl:tool"]
+ return u"keep:" + next(pt["components"].values())["script_parameters"]["cwl:tool"]
p = uri.split("/")
if arvados.util.keep_locator_pattern.match(p[0]):
@@ -326,3 +320,10 @@ def collectionResolver(api_client, document_loader, uri, num_retries=4):
uri[len(p[0]):])
return cwltool.resolver.tool_resolver(document_loader, uri)
+
+# This is published as an entry point and picked up by cwltest so that
+# it uses CollectionFsAccess from Arvados instead of the standard
+# FsAccess that only works for the local file system.
+def get_fsaccess():
+ api_client = arvados.api('v1')
+ return CollectionFsAccess("", CollectionCache(api_client, api_client.keep, 3))
diff --git a/sdk/cwl/arvados_cwl/pathmapper.py b/sdk/cwl/arvados_cwl/pathmapper.py
index 448facf776..8def4881d1 100644
--- a/sdk/cwl/arvados_cwl/pathmapper.py
+++ b/sdk/cwl/arvados_cwl/pathmapper.py
@@ -2,31 +2,24 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-from past.builtins import basestring
-from future.utils import viewitems
-
import re
import logging
import uuid
import os
+import datetime
import urllib.request, urllib.parse, urllib.error
import arvados_cwl.util
import arvados.commands.run
import arvados.collection
-from schema_salad.sourceline import SourceLine
-
from arvados.errors import ApiError
+from arvados._internal.http_to_keep import http_to_keep
from cwltool.pathmapper import PathMapper, MapperEnt
from cwltool.utils import adjustFileObjs, adjustDirObjs
from cwltool.stdfsaccess import abspath
from cwltool.workflow import WorkflowException
-
-from arvados.http_to_keep import http_to_keep
+from schema_salad.sourceline import SourceLine
logger = logging.getLogger('arvados.cwl-runner')
@@ -48,6 +41,36 @@ collection_pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+$')
collection_pdh_pattern = re.compile(r'^keep:([0-9a-f]{32}\+\d+)(/.*)?')
collection_uuid_pattern = re.compile(r'^keep:([a-z0-9]{5}-4zz18-[a-z0-9]{15})(/.*)?$')
+def resolve_aws_key(apiclient, s3url):
+ if "credentials" not in apiclient._rootDesc["resources"]:
+ raise WorkflowException("Arvados instance does not support the external credentials API. Use --enable-aws-credential-capture to use locally-defined credentials.")
+
+ parsed = urllib.parse.urlparse(s3url)
+ bucket = "s3://%s" % parsed.netloc
+ expires_at = (datetime.datetime.now(datetime.UTC) + datetime.timedelta(minutes=5)).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ results = apiclient.credentials().list(filters=[["credential_class", "=", "aws_access_key"],
+ ["scopes", "contains", bucket],
+ ["expires_at", ">", expires_at]]).execute()
+ if len(results["items"]) > 1:
+ raise WorkflowException("Multiple credentials found for bucket '%s' in Arvados, use --use-credential to specify which one to use." % bucket)
+
+ if len(results["items"]) == 1:
+ return results["items"][0]
+
+ results = apiclient.credentials().list(filters=[["credential_class", "=", "aws_access_key"],
+ ["scopes", "=", []],
+ ["expires_at", ">", expires_at]]).execute()
+
+ if len(results["items"]) > 1:
+ raise WorkflowException("Multiple AWS credentials found in Arvados, provide --use-credential to specify which one to use")
+
+ if len(results["items"]) == 1:
+ return results["items"][0]
+
+ raise WorkflowException("No AWS credentials found, must register AWS credentials with Arvados or use --enable-aws-credential-capture to use locally-defined credentials.")
+
+
class ArvPathMapper(PathMapper):
"""Convert container-local paths to and from Keep collection ids."""
@@ -72,7 +95,7 @@ class ArvPathMapper(PathMapper):
debug = logger.isEnabledFor(logging.DEBUG)
- if isinstance(src, basestring) and src.startswith("keep:"):
+ if isinstance(src, str) and src.startswith("keep:"):
if collection_pdh_pattern.match(src):
self._pathmap[src] = MapperEnt(src, self.collection_pattern % urllib.parse.unquote(src[5:]), srcobj["class"], True)
@@ -82,6 +105,10 @@ class ArvPathMapper(PathMapper):
with SourceLine(srcobj, "location", WorkflowException, debug):
raise WorkflowException("Invalid keep reference '%s'" % src)
+ # Note: fsaccess->CollectionFetcher and
+ # runner->upload_dependencies->collect_uploads have lists of
+ # supported URL schemes that has to be updated when new
+ # schemes are added.
if src not in self._pathmap:
if src.startswith("file:"):
# Local FS ref, may need to be uploaded or may be on keep
@@ -110,13 +137,61 @@ class ArvPathMapper(PathMapper):
self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
else:
results = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src,
- varying_url_params=self.arvrunner.toplevel_runtimeContext.varying_url_params,
- prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads)
+ varying_url_params=self.arvrunner.toplevel_runtimeContext.varying_url_params,
+ prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads)
keepref = "keep:%s/%s" % (results[0], results[1])
logger.info("%s is %s", src, keepref)
self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
except Exception as e:
logger.warning("Download error: %s", e)
+ elif src.startswith("s3:"):
+ try:
+ # Using inline imports here instead of at the top
+ # of the file to defer importing boto3 until we
+ # actually need it, because if the user isn't
+ # using s3 import there's zero reason to have the
+ # module loaded at all.
+ if self.arvrunner.botosession is None and (self.arvrunner.defer_downloads is False or self.arvrunner.toplevel_runtimeContext.aws_credential_capture):
+ # Create a boto session, which we will either
+ # use to download from S3 now, or to get the
+ # credentials that will be passed to the
+ # workflow runner container later.
+ import boto3.session
+ if self.arvrunner.selected_credential is not None:
+ # Fetch the secret and create the boto session.
+ self.arvrunner.botosession = boto3.session.Session(aws_access_key_id=self.arvrunner.selected_credential["external_id"],
+ aws_secret_access_key=self.arvrunner.selected_credential["secret"])
+ logger.info("Using Arvados credential %s (%s)", self.arvrunner.selected_credential["name"], self.arvrunner.selected_credential["uuid"])
+ else:
+ self.arvrunner.botosession = boto3.session.Session()
+ if not self.arvrunner.botosession.get_credentials() and not self.arvrunner.toplevel_runtimeContext.s3_public_bucket:
+ raise WorkflowException("boto3 did not find any local AWS credentials to use to download from S3. If you want to use credentials registered with Arvados, use --defer-downloads. If the bucket is public, use --s3-public-bucket.")
+ if self.arvrunner.botosession.get_credentials():
+ logger.info("S3 downloads will use AWS access key id %s", self.arvrunner.botosession.get_credentials().access_key)
+ if self.arvrunner.defer_downloads:
+ # passthrough, we'll download it later.
+ self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
+ if (self.arvrunner.selected_credential is None and
+ self.arvrunner.botosession is None and
+ not self.arvrunner.toplevel_runtimeContext.s3_public_bucket):
+ self.arvrunner.selected_credential = resolve_aws_key(self.arvrunner.api, src)
+ logger.info("S3 downloads will use access key id %s which is Arvados credential '%s' (%s)",
+ self.arvrunner.selected_credential['external_id'],
+ self.arvrunner.selected_credential['name'],
+ self.arvrunner.selected_credential['uuid'])
+ else:
+ from arvados._internal.s3_to_keep import s3_to_keep
+ results = s3_to_keep(self.arvrunner.api,
+ self.arvrunner.botosession,
+ self.arvrunner.project_uuid,
+ src,
+ prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads,
+ unsigned_requests=self.arvrunner.toplevel_runtimeContext.s3_public_bucket)
+ keepref = "keep:%s/%s" % (results[0], results[1])
+ logger.info("%s is %s", src, keepref)
+ self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
+ except Exception as e:
+ logger.warning("Download error: %s", e, exc_info=debug)
else:
self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
@@ -163,7 +238,7 @@ class ArvPathMapper(PathMapper):
if loc.startswith("_:"):
return True
- if self.arvrunner.defer_downloads and (loc.startswith("http:") or loc.startswith("https:")):
+ if self.arvrunner.defer_downloads and (loc.startswith("http:") or loc.startswith("https:") or loc.startswith("s3:")):
return False
i = loc.rfind("/")
@@ -346,7 +421,7 @@ class StagingPathMapper(PathMapper):
# Overridden to maintain the use case of mapping by source (identifier) to
# target regardless of how the map is structured interally.
def getMapperEnt(src):
- for k,v in viewitems(self._pathmap):
+ for k,v in self._pathmap.items():
if (v.type != "CreateFile" and v.resolved == src) or (v.type == "CreateFile" and k == src):
return v
@@ -365,7 +440,7 @@ class VwdPathMapper(StagingPathMapper):
# with any secondary files.
self.visitlisting(referenced_files, self.stagedir, basedir)
- for path, (ab, tgt, type, staged) in viewitems(self._pathmap):
+ for path, (ab, tgt, type, staged) in self._pathmap.items():
if type in ("File", "Directory") and ab.startswith("keep:"):
self._pathmap[path] = MapperEnt("$(task.keep)/%s" % ab[5:], tgt, type, staged)
diff --git a/sdk/cwl/arvados_cwl/perf.py b/sdk/cwl/arvados_cwl/perf.py
index cc3ea969df..39f475fe8d 100644
--- a/sdk/cwl/arvados_cwl/perf.py
+++ b/sdk/cwl/arvados_cwl/perf.py
@@ -2,8 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import object
-
import time
import uuid
diff --git a/sdk/cwl/arvados_cwl/runner.py b/sdk/cwl/arvados_cwl/runner.py
index 437aa39eb8..377741f921 100644
--- a/sdk/cwl/arvados_cwl/runner.py
+++ b/sdk/cwl/arvados_cwl/runner.py
@@ -2,11 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from future.utils import viewvalues, viewitems
-from past.builtins import basestring
-
import os
import sys
import re
@@ -36,11 +31,6 @@ from typing import (
Union,
cast,
)
-from cwltool.utils import (
- CWLObjectType,
- CWLOutputAtomType,
- CWLOutputType,
-)
import subprocess
@@ -58,6 +48,7 @@ from cwltool.update import INTERNAL_VERSION
from cwltool.builder import Builder
import schema_salad.validate as validate
import schema_salad.ref_resolver
+from cwltool.secrets import SecretStore
import arvados.collection
import arvados.util
@@ -72,6 +63,7 @@ from . import done
from . context import ArvRuntimeContext
from .perf import Perf
+basestring = (bytes, str)
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
@@ -103,7 +95,7 @@ def find_defaults(d, op):
if "default" in d:
op(d)
else:
- for i in viewvalues(d):
+ for i in d.values():
find_defaults(i, op)
def make_builder(joborder, hints, requirements, runtimeContext, metadata):
@@ -351,7 +343,7 @@ def upload_dependencies(arvrunner, name, document_loader,
sp = loc.split(":")
if len(sp) < 1:
return
- if sp[0] in ("file", "http", "https"):
+ if sp[0] in ("file", "http", "https", "s3"):
# Record local files than need to be uploaded,
# don't include file literals, keep references, etc.
sc.append(obj)
@@ -567,7 +559,7 @@ def packed_workflow(arvrunner, tool, merged_map, runtimeContext, git_info):
rewrite_out=rewrites,
loader=tool.doc_loader)
- rewrite_to_orig = {v: k for k,v in viewitems(rewrites)}
+ rewrite_to_orig = {v: k for k,v in rewrites.items()}
def visit(v, cur_id):
if isinstance(v, dict):
@@ -832,6 +824,7 @@ class Runner(Process):
super(Runner, self).__init__(tool.tool, loadingContext)
+ # This is called "arvrunner" but it's actually ArvCwlExecutor
self.arvrunner = runner
self.embedded_tool = tool
self.job_order = None
@@ -933,7 +926,7 @@ class Runner(Process):
if "cwl.output.json" in outc:
with outc.open("cwl.output.json", "rb") as f:
if f.size() > 0:
- outputs = json.loads(f.read().decode())
+ outputs = json.loads(str(f.read(), 'utf-8'))
def keepify(fileobj):
path = fileobj["location"]
if not path.startswith("keep:"):
@@ -984,3 +977,9 @@ def print_keep_deps(arvRunner, runtimeContext, merged_map, tool):
json.dump(sorted(references), arvRunner.stdout)
print(file=arvRunner.stdout)
+
+class ArvSecretStore(SecretStore):
+ def add(self, value):
+ if value is None:
+ return None
+ return super().add(value)
diff --git a/sdk/cwl/arvados_cwl/util.py b/sdk/cwl/arvados_cwl/util.py
index 299f854ec2..21b7909f20 100644
--- a/sdk/cwl/arvados_cwl/util.py
+++ b/sdk/cwl/arvados_cwl/util.py
@@ -3,24 +3,28 @@
# SPDX-License-Identifier: Apache-2.0
import datetime
+import urllib.parse
+
from arvados.errors import ApiError
collectionUUID = "http://arvados.org/cwl#collectionUUID"
+
def get_intermediate_collection_info(workflow_step_name, current_container, intermediate_output_ttl):
- if workflow_step_name:
- name = "Intermediate collection for step %s" % (workflow_step_name)
- else:
- name = "Intermediate collection"
- trash_time = None
- if intermediate_output_ttl > 0:
- trash_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=intermediate_output_ttl)
- container_uuid = None
- props = {"type": "intermediate"}
- if current_container:
- props["container_uuid"] = current_container['uuid']
-
- return {"name" : name, "trash_at" : trash_time, "properties" : props}
+ if workflow_step_name:
+ name = "Intermediate collection for step %s" % (workflow_step_name)
+ else:
+ name = "Intermediate collection"
+ trash_time = None
+ if intermediate_output_ttl > 0:
+ trash_time = datetime.datetime.now(datetime.UTC) + datetime.timedelta(seconds=intermediate_output_ttl)
+ container_uuid = None
+ props = {"type": "intermediate"}
+ if current_container:
+ props["container_uuid"] = current_container['uuid']
+
+ return {"name" : name, "trash_at" : trash_time, "properties" : props}
+
def get_current_container(api, num_retries=0, logger=None):
current_container = None
@@ -49,3 +53,14 @@ def common_prefix(firstfile, all_files):
if not any(common_parts):
break
return '/'.join(common_parts)
+
+
+def sanitize_url(url):
+ """Remove username/password from http URL."""
+
+ parts = urllib.parse.urlparse(url)
+ if parts.port is None:
+ netloc = parts.hostname
+ else:
+ netloc = f'{parts.hostname}:{parts.port}'
+ return urllib.parse.urlunparse(parts._replace(netloc=netloc))
diff --git a/sdk/cwl/arvados_version.py b/sdk/cwl/arvados_version.py
index 794b6afe42..cafc7391b4 100644
--- a/sdk/cwl/arvados_version.py
+++ b/sdk/cwl/arvados_version.py
@@ -26,6 +26,7 @@ PACKAGE_DEPENDENCY_MAP = {
'arvados-user-activity': ['arvados-python-client'],
'arvados_fuse': ['arvados-python-client'],
'crunchstat_summary': ['arvados-python-client'],
+ 'arvados_cluster_activity': ['arvados-python-client'],
}
PACKAGE_MODULE_MAP = {
'arvados-cwl-runner': 'arvados_cwl',
@@ -34,6 +35,7 @@ PACKAGE_MODULE_MAP = {
'arvados-user-activity': 'arvados_user_activity',
'arvados_fuse': 'arvados_fuse',
'crunchstat_summary': 'crunchstat_summary',
+ 'arvados_cluster_activity': 'arvados_cluster_activity',
}
PACKAGE_SRCPATH_MAP = {
'arvados-cwl-runner': Path('sdk', 'cwl'),
@@ -42,6 +44,7 @@ PACKAGE_SRCPATH_MAP = {
'arvados-user-activity': Path('tools', 'user-activity'),
'arvados_fuse': Path('services', 'fuse'),
'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+ 'arvados_cluster_activity': Path('tools', 'cluster-activity'),
}
ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -72,14 +75,6 @@ if REPO_PATH is None:
if (SETUP_DIR / mod_name).is_dir()
)
-def short_tests_only(arglist=sys.argv):
- try:
- arglist.remove('--short-tests-only')
- except ValueError:
- return False
- else:
- return True
-
def git_log_output(path, *args):
return subprocess.check_output(
['git', '-C', str(REPO_PATH),
@@ -120,7 +115,7 @@ def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
return read_version(setup_dir, module)
else:
version = git_version_at_commit()
- version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ version = version.replace("~dev", ".dev").replace("~rc", "rc").lstrip("development-")
save_version(setup_dir, module, version)
return version
diff --git a/sdk/cwl/fpm-info.sh b/sdk/cwl/fpm-info.sh
index 5c17a2fd14..56c2b6fa4e 100644
--- a/sdk/cwl/fpm-info.sh
+++ b/sdk/cwl/fpm-info.sh
@@ -6,7 +6,7 @@ fpm_depends+=(nodejs)
case "$TARGET" in
debian* | ubuntu*)
- fpm_depends+=(libcurl3-gnutls python3-distutils)
+ fpm_depends+=(libcurl4)
;;
esac
diff --git a/sdk/cwl/pytest.ini b/sdk/cwl/pytest.ini
new file mode 120000
index 0000000000..05a82dbfef
--- /dev/null
+++ b/sdk/cwl/pytest.ini
@@ -0,0 +1 @@
+../../sdk/python/pytest.ini
\ No newline at end of file
diff --git a/sdk/cwl/setup.py b/sdk/cwl/setup.py
index 551bd964b1..b7da812d2c 100644
--- a/sdk/cwl/setup.py
+++ b/sdk/cwl/setup.py
@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import os
import sys
@@ -24,18 +23,14 @@ setup(name='arvados-cwl-runner',
license='Apache 2.0',
packages=find_packages(),
package_data={'arvados_cwl': ['arv-cwl-schema-v1.0.yml', 'arv-cwl-schema-v1.1.yml', 'arv-cwl-schema-v1.2.yml']},
- entry_points={"console_scripts": ["cwl-runner=arvados_cwl:main", "arvados-cwl-runner=arvados_cwl:main"]},
- # Note that arvados/build/run-build-packages.sh looks at this
- # file to determine what version of cwltool and schema-salad to
- # build.
+ entry_points={"console_scripts": ["cwl-runner=arvados_cwl:main",
+ "arvados-cwl-runner=arvados_cwl:main"],
+ "cwltest.fsaccess": ["fsaccess=arvados_cwl.fsaccess:get_fsaccess"]},
install_requires=[
*arvados_version.iter_dependencies(version),
- 'cwltool==3.1.20230601100705',
- 'schema-salad==8.4.20230601112322',
+ 'cwltool==3.1.20240508115724',
+ 'schema-salad==8.5.20240503091721',
'ciso8601 >= 2.0.0',
- 'networkx < 2.6',
- 'msgpack==1.0.3',
- 'importlib-metadata<5',
'setuptools>=40.3.0',
],
data_files=[
@@ -45,9 +40,9 @@ setup(name='arvados-cwl-runner',
classifiers=[
'Programming Language :: Python :: 3',
],
- test_suite='tests',
- tests_require=[
- 'mock>=1.0,<4',
+ test_requires=[
+ 'parameterized'
],
+ test_suite='tests',
zip_safe=True,
)
diff --git a/sdk/cwl/test_with_arvbox.sh b/sdk/cwl/test_with_arvbox.sh
index 51d64b3f84..f61b76542b 100755
--- a/sdk/cwl/test_with_arvbox.sh
+++ b/sdk/cwl/test_with_arvbox.sh
@@ -89,28 +89,32 @@ fi
arvbox start $config $tag
-# Copy the integration test suite from our local arvados clone instead
-# of using the one inside the container, so we can make changes to the
-# integration tests without necessarily having to rebuilding the
-# container image.
-docker cp -L $cwldir/tests $ARVBOX_CONTAINER:/usr/src/arvados/sdk/cwl
+githead=$(git rev-parse HEAD)
arvbox pipe <=2.5.20241122133319,<3'
mkdir -p /tmp/cwltest
cd /tmp/cwltest
diff --git a/sdk/cwl/tests/22466-output-glob-expressions-secondaryfile.cwl b/sdk/cwl/tests/22466-output-glob-expressions-secondaryfile.cwl
new file mode 100644
index 0000000000..80649a3d65
--- /dev/null
+++ b/sdk/cwl/tests/22466-output-glob-expressions-secondaryfile.cwl
@@ -0,0 +1,54 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+label: Output glob test for bug 22466
+
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+
+requirements:
+- class: ShellCommandRequirement
+- class: InitialWorkDirRequirement
+ listing:
+ - $(inputs.input_bam)
+- class: InlineJavascriptRequirement
+
+inputs:
+- id: input_bam
+ label: Input bam
+ type: File
+- id: output_bam_name
+ label: Output BAM file name
+ type: string?
+ default: deduped
+- id: sample_id
+ label: Sample ID
+ type: string
+
+outputs:
+- id: metrics_file
+ label: Metrics file
+ doc: File to which the duplication metrics will be written.
+ type: File?
+ outputBinding:
+ glob: '*.txt'
+- id: deduped_bam
+ label: Deduped BAM
+ doc: The output file to which marked records will be written.
+ type: File?
+ secondaryFiles:
+ - pattern: ^.bai
+ required: false
+ - pattern: .bai
+ required: false
+ outputBinding:
+ glob: |-
+ ${
+ var ext = inputs.input_bam.nameext.slice(1)
+ return ["*", inputs.output_bam_name, ext].join(".")
+ }
+
+arguments: [touch, fake.deduped.bam, fake.deduped.bai, metrics.txt]
diff --git a/tools/compute-images/.licenseignore b/sdk/cwl/tests/22466/fake.bam
similarity index 83%
rename from tools/compute-images/.licenseignore
rename to sdk/cwl/tests/22466/fake.bam
index 6288dbbc87..18faca27aa 100644
--- a/tools/compute-images/.licenseignore
+++ b/sdk/cwl/tests/22466/fake.bam
@@ -1,5 +1,3 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
-*.json
-1078ECD7.asc
diff --git a/docker/migrate-docker19/build.sh b/sdk/cwl/tests/22466/input.yml
old mode 100755
new mode 100644
similarity index 61%
rename from docker/migrate-docker19/build.sh
rename to sdk/cwl/tests/22466/input.yml
index 5d76ec7faa..1f65e97994
--- a/docker/migrate-docker19/build.sh
+++ b/sdk/cwl/tests/22466/input.yml
@@ -1,6 +1,8 @@
-#!/bin/sh
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
-exec docker build -t arvados/migrate-docker19:1.0 .
+input_bam:
+ class: File
+ location: fake.bam
+sample_id: fake
diff --git a/sdk/cwl/tests/arvados-tests.yml b/sdk/cwl/tests/arvados-tests.yml
index cb4a151f0e..91bac80046 100644
--- a/sdk/cwl/tests/arvados-tests.yml
+++ b/sdk/cwl/tests/arvados-tests.yml
@@ -505,3 +505,89 @@
out: out
tool: wf/runseparate-wf.cwl
doc: "test arv:SeparateRunner"
+
+- job: null
+ output: {
+ "val": {
+ "basename": "testdir",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "a",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "testdir/a",
+ "size": 0
+ },
+ {
+ "basename": "b",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "testdir/b",
+ "size": 0
+ },
+ {
+ "basename": "c",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "d",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "testdir/c/d",
+ "size": 0
+ }
+ ],
+ "location": "testdir/c"
+ }
+ ],
+ "location": "testdir"
+ },
+ "val2": [
+ {
+ "basename": "a",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "a",
+ "size": 0
+ },
+ {
+ "basename": "b",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "b",
+ "size": 0
+ }
+ ]
+ }
+ tool: wf/output_dir_wf.cwl
+ doc: "test same file appearing in output of both Directory and array"
+
+- job: 22466/input.yml
+ output: {
+ "metrics_file": {
+ "location": "metrics.txt",
+ "basename": "metrics.txt",
+ "class": "File",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "size": 0
+ },
+ "deduped_bam": {
+ "location": "fake.deduped.bam",
+ "basename": "fake.deduped.bam",
+ "class": "File",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "size": 0,
+ "secondaryFiles": [
+ {
+ "basename": "fake.deduped.bai",
+ "location": "fake.deduped.bai",
+ "class": "File",
+ "size": 0,
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
+ }
+ ]
+ }
+ }
+ tool: 22466-output-glob-expressions-secondaryfile.cwl
+ doc: "test bug 22466"
diff --git a/sdk/cwl/tests/federation/framework/check_exist.py b/sdk/cwl/tests/federation/framework/check_exist.py
index 1458772a3f..b3338939ed 100644
--- a/sdk/cwl/tests/federation/framework/check_exist.py
+++ b/sdk/cwl/tests/federation/framework/check_exist.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
import arvados
import json
diff --git a/sdk/cwl/tests/federation/framework/prepare.py b/sdk/cwl/tests/federation/framework/prepare.py
index 40bb843b29..6fe90813e7 100644
--- a/sdk/cwl/tests/federation/framework/prepare.py
+++ b/sdk/cwl/tests/federation/framework/prepare.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
import arvados
import json
diff --git a/sdk/cwl/tests/hw.py b/sdk/cwl/tests/hw.py
index e45bd72642..43c20dc03d 100644
--- a/sdk/cwl/tests/hw.py
+++ b/sdk/cwl/tests/hw.py
@@ -2,5 +2,4 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
print("Hello world")
diff --git a/sdk/cwl/tests/matcher.py b/sdk/cwl/tests/matcher.py
index 04e67b7dbd..3c49b87a86 100644
--- a/sdk/cwl/tests/matcher.py
+++ b/sdk/cwl/tests/matcher.py
@@ -2,13 +2,10 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import object
-
import difflib
import json
import re
-
class JsonDiffMatcher(object):
"""Raise AssertionError with a readable JSON diff when not __eq__().
diff --git a/sdk/cwl/tests/submit_test_job_s3.json b/sdk/cwl/tests/submit_test_job_s3.json
new file mode 100644
index 0000000000..0892baacdb
--- /dev/null
+++ b/sdk/cwl/tests/submit_test_job_s3.json
@@ -0,0 +1,24 @@
+{
+ "x": {
+ "class": "File",
+ "location": "s3://examplebucket/blorp.txt"
+ },
+ "y": {
+ "class": "Directory",
+ "location": "keep:99999999999999999999999999999998+99",
+ "listing": [{
+ "class": "File",
+ "location": "keep:99999999999999999999999999999998+99/file1.txt"
+ }]
+ },
+ "z": {
+ "class": "Directory",
+ "basename": "anonymous",
+ "listing": [{
+ "basename": "renamed.txt",
+ "class": "File",
+ "location": "keep:99999999999999999999999999999998+99/file1.txt"
+ }],
+ "location": "_:df80736f-f14d-4b10-b2e3-03aa27f034bb"
+ }
+}
diff --git a/sdk/cwl/tests/test_container.py b/sdk/cwl/tests/test_container.py
index b95b8eb67b..536252777d 100644
--- a/sdk/cwl/tests/test_container.py
+++ b/sdk/cwl/tests/test_container.py
@@ -2,17 +2,12 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import str
-from builtins import object
-
import arvados_cwl
import arvados_cwl.context
import arvados_cwl.util
-#from arvados_cwl.arvdocker import arv_docker_clear_cache
import copy
import arvados.config
import logging
-import mock
import unittest
import os
import functools
@@ -24,6 +19,9 @@ from cwltool.update import INTERNAL_VERSION
from schema_salad.ref_resolver import Loader
from schema_salad.sourceline import cmap
import io
+from parameterized import parameterized
+
+from unittest import mock
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
@@ -62,7 +60,6 @@ class TestContainer(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
- #arv_docker_clear_cache()
def tearDown(self):
root_logger = logging.getLogger('')
@@ -87,7 +84,7 @@ class TestContainer(unittest.TestCase):
"fetcher_constructor": functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=fs_access),
"loader": Loader({}),
"metadata": cmap({"cwlVersion": INTERNAL_VERSION, "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"}),
- "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__
+ "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__,
})
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
@@ -127,72 +124,73 @@ class TestContainer(unittest.TestCase):
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @parameterized.expand([
+ (True,),
+ (False,),
+ ])
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
- def test_run(self, keepdocker):
- for enable_reuse in (True, False):
- #arv_docker_clear_cache()
-
- runner = mock.MagicMock()
- runner.ignore_docker_for_reuse = False
- runner.intermediate_output_ttl = 0
- runner.secret_store = cwltool.secrets.SecretStore()
- runner.api._rootDesc = {"revision": "20210628"}
- runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
-
- keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
- runner.api.collections().get().execute.return_value = {
- "portable_data_hash": "99999999999999999999999999999993+99"}
-
- tool = cmap({
- "inputs": [],
- "outputs": [],
- "baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}],
- "id": "",
- "class": "CommandLineTool",
- "cwlVersion": "v1.2"
- })
+ def test_run(self, enable_reuse, keepdocker):
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
- loadingContext, runtimeContext = self.helper(runner, enable_reuse)
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "class": "CommandLineTool",
+ "cwlVersion": "v1.2"
+ })
- arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
- arvtool.formatgraph = None
+ loadingContext, runtimeContext = self.helper(runner, enable_reuse)
- for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
- j.run(runtimeContext)
- runner.api.container_requests().create.assert_called_with(
- body=JsonDiffMatcher({
- 'environment': {
- 'HOME': '/var/spool/cwl',
- 'TMPDIR': '/tmp'
- },
- 'name': 'test_run_'+str(enable_reuse),
- 'runtime_constraints': {
- 'vcpus': 1,
- 'ram': 268435456
- },
- 'use_existing': enable_reuse,
- 'priority': 500,
- 'mounts': {
- '/tmp': {'kind': 'tmp',
- "capacity": 1073741824
- },
- '/var/spool/cwl': {'kind': 'tmp',
- "capacity": 1073741824 }
- },
- 'state': 'Committed',
- 'output_name': 'Output from step test_run_'+str(enable_reuse),
- 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
- 'output_path': '/var/spool/cwl',
- 'output_ttl': 0,
- 'container_image': '99999999999999999999999999999993+99',
- 'command': ['ls', '/var/spool/cwl'],
- 'cwd': '/var/spool/cwl',
- 'scheduling_parameters': {},
- 'properties': {'cwl_input': {}},
- 'secret_mounts': {},
- 'output_storage_classes': ["default"]
- }))
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher({
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': 'test_run_'+str(enable_reuse),
+ 'runtime_constraints': {
+ 'vcpus': 1,
+ 'ram': 268435456
+ },
+ 'use_existing': enable_reuse,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'output_name': 'Output from step test_run_'+str(enable_reuse),
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': '99999999999999999999999999999993+99',
+ 'command': ['ls', '/var/spool/cwl'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': {},
+ 'properties': {'cwl_input': {}},
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
+ }))
# The test passes some fields in builder.resources
# For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@@ -565,6 +563,7 @@ class TestContainer(unittest.TestCase):
arvjob = arvados_cwl.ArvadosContainer(runner,
runtimeContext,
+ [],
mock.MagicMock(),
{},
None,
@@ -669,6 +668,7 @@ class TestContainer(unittest.TestCase):
arvjob = arvados_cwl.ArvadosContainer(runner,
runtimeContext,
+ [],
mock.MagicMock(),
{},
None,
@@ -974,8 +974,6 @@ class TestContainer(unittest.TestCase):
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_setting_storage_class(self, keepdocker):
- #arv_docker_clear_cache()
-
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
@@ -1050,8 +1048,6 @@ class TestContainer(unittest.TestCase):
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_setting_process_properties(self, keepdocker):
- #arv_docker_clear_cache()
-
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
@@ -1143,106 +1139,185 @@ class TestContainer(unittest.TestCase):
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @parameterized.expand([
+ # Legacy CUDA API
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": "9.0",
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'cuda': {
+ 'device_count': 1,
+ 'driver_version': "11.0",
+ 'hardware_capability': "9.0"
+ }
+ }, "20210628"),
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": "9.0",
+ "cudaDeviceCountMin": 2
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'cuda': {
+ 'device_count': 2,
+ 'driver_version': "11.0",
+ 'hardware_capability': "9.0"
+ }
+ }, "20210628"),
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": ["4.0", "5.0"],
+ "cudaDeviceCountMin": 2
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'cuda': {
+ 'device_count': 2,
+ 'driver_version': "11.0",
+ 'hardware_capability': "4.0"
+ }
+ }, "20210628"),
+
+ # New GPU API
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": "9.0",
+ "cudaVram": 8000,
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'gpu': {
+ 'device_count': 1,
+ 'driver_version': "11.0",
+ 'hardware_target': ["9.0"],
+ 'stack': "cuda",
+ 'vram': 8000*1024*1024,
+ }
+ }, "20250128"),
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": "9.0",
+ "cudaDeviceCountMin": 2,
+ "cudaVram": 8000,
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'gpu': {
+ 'device_count': 2,
+ 'driver_version': "11.0",
+ 'hardware_target': ["9.0"],
+ 'stack': "cuda",
+ 'vram': 8000*1024*1024,
+ }
+ }, "20250128"),
+ ({
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "11.0",
+ "cudaComputeCapability": ["4.0", "5.0"],
+ "cudaDeviceCountMin": 2,
+ "cudaVram": 8000,
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'gpu': {
+ 'device_count': 2,
+ 'driver_version': "11.0",
+ 'hardware_target': ["4.0", "5.0"],
+ 'stack': "cuda",
+ 'vram': 8000*1024*1024,
+ }
+ }, "20250128"),
+
+ # ROCm
+ ({
+ "class": "http://arvados.org/cwl#ROCmRequirement",
+ "rocmDriverVersion": "6.2",
+ "rocmTarget": ["gfx1100", "gfx1103"],
+ "rocmDeviceCountMin": 1,
+ "rocmVram": 8000,
+ }, {
+ 'vcpus': 1,
+ 'ram': 268435456,
+ 'gpu': {
+ 'device_count': 1,
+ 'driver_version': "6.2",
+ 'hardware_target': ["gfx1100", "gfx1103"],
+ 'stack': "rocm",
+ 'vram': 8000*1024*1024,
+ }
+ }, "20250128"),
+
+ ])
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
- def test_cuda_requirement(self, keepdocker):
+ def test_gpu_requirement(self, test_cwl_req, test_arv_req, apiRevision, keepdocker):
arvados_cwl.add_arv_hints()
- #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
- runner.api._rootDesc = {"revision": "20210628"}
+ runner.api._rootDesc = {"revision": apiRevision}
runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
- test_cwl_req = [{
- "class": "http://commonwl.org/cwltool#CUDARequirement",
- "cudaVersionMin": "11.0",
- "cudaComputeCapability": "9.0",
- }, {
- "class": "http://commonwl.org/cwltool#CUDARequirement",
- "cudaVersionMin": "11.0",
- "cudaComputeCapability": "9.0",
- "cudaDeviceCountMin": 2
- }, {
- "class": "http://commonwl.org/cwltool#CUDARequirement",
- "cudaVersionMin": "11.0",
- "cudaComputeCapability": ["4.0", "5.0"],
- "cudaDeviceCountMin": 2
- }]
-
- test_arv_req = [{
- 'device_count': 1,
- 'driver_version': "11.0",
- 'hardware_capability': "9.0"
- }, {
- 'device_count': 2,
- 'driver_version': "11.0",
- 'hardware_capability': "9.0"
- }, {
- 'device_count': 2,
- 'driver_version': "11.0",
- 'hardware_capability': "4.0"
- }]
-
- for test_case in range(0, len(test_cwl_req)):
-
- tool = cmap({
- "inputs": [],
- "outputs": [],
- "baseCommand": "nvidia-smi",
- "arguments": [],
- "id": "",
- "cwlVersion": "v1.2",
- "class": "CommandLineTool",
- "requirements": [test_cwl_req[test_case]]
- })
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "nvidia-smi",
+ "arguments": [],
+ "id": "",
+ "cwlVersion": "v1.2",
+ "class": "CommandLineTool",
+ "requirements": [test_cwl_req]
+ })
- loadingContext, runtimeContext = self.helper(runner, True)
+ loadingContext, runtimeContext = self.helper(runner, True)
- arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
- arvtool.formatgraph = None
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
- for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
- j.run(runtimeContext)
- runner.api.container_requests().create.assert_called_with(
- body=JsonDiffMatcher({
- 'environment': {
- 'HOME': '/var/spool/cwl',
- 'TMPDIR': '/tmp'
- },
- 'name': 'test_run_True' + ("" if test_case == 0 else "_"+str(test_case+1)),
- 'runtime_constraints': {
- 'vcpus': 1,
- 'ram': 268435456,
- 'cuda': test_arv_req[test_case]
- },
- 'use_existing': True,
- 'priority': 500,
- 'mounts': {
- '/tmp': {'kind': 'tmp',
- "capacity": 1073741824
- },
- '/var/spool/cwl': {'kind': 'tmp',
- "capacity": 1073741824 }
- },
- 'state': 'Committed',
- 'output_name': 'Output from step test_run_True' + ("" if test_case == 0 else "_"+str(test_case+1)),
- 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
- 'output_path': '/var/spool/cwl',
- 'output_ttl': 0,
- 'container_image': '99999999999999999999999999999993+99',
- 'command': ['nvidia-smi'],
- 'cwd': '/var/spool/cwl',
- 'scheduling_parameters': {},
- 'properties': {'cwl_input': {}},
- 'secret_mounts': {},
- 'output_storage_classes': ["default"]
- }))
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher({
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': 'test_run_True',
+ 'runtime_constraints': test_arv_req,
+ 'use_existing': True,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'output_name': 'Output from step test_run_True',
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': '99999999999999999999999999999993+99',
+ 'command': ['nvidia-smi'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': {},
+ 'properties': {'cwl_input': {}},
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
+ }))
# The test passes no builder.resources
@@ -1337,159 +1412,460 @@ class TestContainer(unittest.TestCase):
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @parameterized.expand([
+ ("None, None", None, None, None),
+ ("None, True", None, True, True),
+ ("None, False", None, False, False),
+ ("False, None", False, None, False),
+ ("False, True", False, True, False),
+ ("False, False", False, False, False),
+ ("True, None", True, None, True),
+ ("True, True", True, True, True),
+ ("True, False", True, False, False),
+ ])
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
- def test_run_preemptible_hint(self, keepdocker):
+ def test_run_preemptible_hint(self, _, enable_preemptible, preemptible_hint,
+ preemptible_setting, keepdocker):
arvados_cwl.add_arv_hints()
- for enable_preemptible in (None, True, False):
- for preemptible_hint in (None, True, False):
- #arv_docker_clear_cache()
-
- runner = mock.MagicMock()
- runner.ignore_docker_for_reuse = False
- runner.intermediate_output_ttl = 0
- runner.secret_store = cwltool.secrets.SecretStore()
- runner.api._rootDesc = {"revision": "20210628"}
- runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
-
- keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
- runner.api.collections().get().execute.return_value = {
- "portable_data_hash": "99999999999999999999999999999993+99"}
-
- if preemptible_hint is not None:
- hints = [{
- "class": "http://arvados.org/cwl#UsePreemptible",
- "usePreemptible": preemptible_hint
- }]
- else:
- hints = []
-
- tool = cmap({
- "inputs": [],
- "outputs": [],
- "baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}],
- "id": "",
- "class": "CommandLineTool",
- "cwlVersion": "v1.2",
- "hints": hints
- })
-
- loadingContext, runtimeContext = self.helper(runner)
-
- runtimeContext.name = 'test_run_enable_preemptible_'+str(enable_preemptible)+str(preemptible_hint)
- runtimeContext.enable_preemptible = enable_preemptible
-
- arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
- arvtool.formatgraph = None
-
- # Test the interactions between --enable/disable-preemptible
- # and UsePreemptible hint
-
- if enable_preemptible is None:
- if preemptible_hint is None:
- sched = {}
- else:
- sched = {'preemptible': preemptible_hint}
- else:
- if preemptible_hint is None:
- sched = {'preemptible': enable_preemptible}
- else:
- sched = {'preemptible': enable_preemptible and preemptible_hint}
-
- for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
- j.run(runtimeContext)
- runner.api.container_requests().create.assert_called_with(
- body=JsonDiffMatcher({
- 'environment': {
- 'HOME': '/var/spool/cwl',
- 'TMPDIR': '/tmp'
- },
- 'name': runtimeContext.name,
- 'runtime_constraints': {
- 'vcpus': 1,
- 'ram': 268435456
- },
- 'use_existing': True,
- 'priority': 500,
- 'mounts': {
- '/tmp': {'kind': 'tmp',
- "capacity": 1073741824
- },
- '/var/spool/cwl': {'kind': 'tmp',
- "capacity": 1073741824 }
- },
- 'state': 'Committed',
- 'output_name': 'Output from step '+runtimeContext.name,
- 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
- 'output_path': '/var/spool/cwl',
- 'output_ttl': 0,
- 'container_image': '99999999999999999999999999999993+99',
- 'command': ['ls', '/var/spool/cwl'],
- 'cwd': '/var/spool/cwl',
- 'scheduling_parameters': sched,
- 'properties': {'cwl_input': {}},
- 'secret_mounts': {},
- 'output_storage_classes': ["default"]
- }))
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ if preemptible_hint is not None:
+ hints = [{
+ "class": "http://arvados.org/cwl#UsePreemptible",
+ "usePreemptible": preemptible_hint
+ }]
+ else:
+ hints = []
+
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "class": "CommandLineTool",
+ "cwlVersion": "v1.2",
+ "hints": hints
+ })
+
+ loadingContext, runtimeContext = self.helper(runner)
+ runtimeContext.name = 'test_run_enable_preemptible_'+str(enable_preemptible)+str(preemptible_hint)
+ runtimeContext.enable_preemptible = enable_preemptible
+
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ # Test the interactions between --enable/disable-preemptible
+ # and UsePreemptible hint
+
+ sched = {}
+ if preemptible_setting is not None:
+ sched['preemptible'] = preemptible_setting
+
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher({
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': runtimeContext.name,
+ 'runtime_constraints': {
+ 'vcpus': 1,
+ 'ram': 268435456
+ },
+ 'use_existing': True,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'output_name': 'Output from step '+runtimeContext.name,
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': '99999999999999999999999999999993+99',
+ 'command': ['ls', '/var/spool/cwl'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': sched,
+ 'properties': {'cwl_input': {}},
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
+ }))
+
+
+ # The test passes no builder.resources
+ # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @parameterized.expand([
+ ("None, None", None, None, False),
+ ("None, True", None, True, True),
+ ("None, False", None, False, False),
+ ("False, None", False, None, False),
+ ("False, True", False, True, False), # command line overrides hint
+ ("False, False", False, False, False),
+ ("True, None", True, None, True),
+ ("True, True", True, True, True),
+ ("True, False", True, False, False), # hint overrides command line
+ ])
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
- def test_output_properties(self, keepdocker):
+ def test_spot_retry(self, _, enable_resubmit_non_preemptible,
+ preemption_behavior_hint,
+ expect_resubmit_behavior,
+ keepdocker):
arvados_cwl.add_arv_hints()
- for rev in ["20210628", "20220510"]:
- runner = mock.MagicMock()
- runner.ignore_docker_for_reuse = False
- runner.intermediate_output_ttl = 0
- runner.secret_store = cwltool.secrets.SecretStore()
- runner.api._rootDesc = {"revision": rev}
- runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
-
- keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
- runner.api.collections().get().execute.return_value = {
- "portable_data_hash": "99999999999999999999999999999993+99"}
-
- tool = cmap({
- "inputs": [{
- "id": "inp",
- "type": "string"
- }],
- "outputs": [],
- "baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}],
- "id": "",
- "cwlVersion": "v1.2",
- "class": "CommandLineTool",
- "hints": [
- {
- "class": "http://arvados.org/cwl#OutputCollectionProperties",
- "outputProperties": {
- "foo": "bar",
- "baz": "$(inputs.inp)"
- }
- }
- ]
+
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+
+ hints = [{
+ "class": "http://arvados.org/cwl#UsePreemptible",
+ "usePreemptible": True
+ }]
+
+ if preemption_behavior_hint is not None:
+ hints.append({
+ "class": "http://arvados.org/cwl#PreemptionBehavior",
+ "resubmitNonPreemptible": preemption_behavior_hint
})
- loadingContext, runtimeContext = self.helper(runner)
- runtimeContext.name = "test_timelimit"
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "class": "CommandLineTool",
+ "cwlVersion": "v1.2",
+ "hints": hints
+ })
- arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
- arvtool.formatgraph = None
+ loadingContext, runtimeContext = self.helper(runner)
- for j in arvtool.job({"inp": "quux"}, mock.MagicMock(), runtimeContext):
- j.run(runtimeContext)
+ runtimeContext.name = 'test_spot_retry_'+str(enable_resubmit_non_preemptible)+str(preemption_behavior_hint)
+ runtimeContext.enable_resubmit_non_preemptible = enable_resubmit_non_preemptible
- _, kwargs = runner.api.container_requests().create.call_args
- if rev == "20220510":
- self.assertEqual({"foo": "bar", "baz": "quux"}, kwargs['body'].get('output_properties'))
- else:
- self.assertEqual(None, kwargs['body'].get('output_properties'))
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ # Test the interactions between --enable/disable-preemptible
+ # and UsePreemptible hint
+
+ expect_container_request = {
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': runtimeContext.name,
+ 'runtime_constraints': {
+ 'vcpus': 1,
+ 'ram': 268435456
+ },
+ 'use_existing': True,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'output_name': 'Output from step '+runtimeContext.name,
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': '99999999999999999999999999999993+99',
+ 'command': ['ls', '/var/spool/cwl'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': {'preemptible': True},
+ 'properties': {'cwl_input': {}},
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"],
+ }
+
+ expect_resubmit_container_request = expect_container_request.copy()
+ expect_resubmit_container_request['scheduling_parameters'] = {'preemptible': False}
+
+ runner.api.container_requests().create().execute.return_value = {"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+ "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
+
+ if expect_resubmit_behavior:
+ expect_container_request['container_count_max'] = 1
+
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container_request))
+ runner.api.containers().get().execute.return_value = {
+ "state":"Cancelled",
+ "output": "abc+123",
+ "exit_code": 1,
+ "log": "def+234",
+ "runtime_status": {
+ "preemptionNotice": "bye bye"
+ }
+ }
+ runner.api.container_requests().create().execute.return_value = {"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzz2",
+ "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzz2"}
+
+ j.done({
+ "state": "Final",
+ "log_uuid": "",
+ "output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
+ "uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+ "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
+ "modified_at": "2017-05-26T12:01:22Z",
+ "properties": {},
+ "name": "testjob"
+ })
+ if expect_resubmit_behavior:
+ runner.api.container_requests().update.assert_any_call(
+ uuid="zzzzz-xvhdp-zzzzzzzzzzzzzzz", body={"properties": {"arv:failed_container_resubmitted": "zzzzz-xvhdp-zzzzzzzzzzzzzz2"}})
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_resubmit_container_request))
+
+ @parameterized.expand([
+ ("20210628",),
+ ("20220510",),
+ ])
+ @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+ def test_output_properties(self, rev, keepdocker):
+ arvados_cwl.add_arv_hints()
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": rev}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ tool = cmap({
+ "inputs": [{
+ "id": "inp",
+ "type": "string"
+ }],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "cwlVersion": "v1.2",
+ "class": "CommandLineTool",
+ "hints": [
+ {
+ "class": "http://arvados.org/cwl#OutputCollectionProperties",
+ "outputProperties": {
+ "foo": "bar",
+ "baz": "$(inputs.inp)"
+ }
+ }
+ ]
+ })
+
+ loadingContext, runtimeContext = self.helper(runner)
+ runtimeContext.name = "test_timelimit"
+
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ for j in arvtool.job({"inp": "quux"}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+
+ _, kwargs = runner.api.container_requests().create.call_args
+ if rev == "20220510":
+ self.assertEqual({"foo": "bar", "baz": "quux"}, kwargs['body'].get('output_properties'))
+ else:
+ self.assertEqual(None, kwargs['body'].get('output_properties'))
+
+ @parameterized.expand([
+ ("20231117",),
+ ("20240502",),
+ ])
+ @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+ def test_output_glob(self, rev, keepdocker):
+ arvados_cwl.add_arv_hints()
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": rev}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ tool = cmap({
+ "inputs": [{
+ "id": "inp",
+ "type": "string"
+ }],
+ "outputs": [
+ {
+ "id": "o1",
+ "type": "File",
+ "outputBinding": {
+ "glob": "*.txt"
+ }
+ },
+ {
+ "id": "o2",
+ "type": "File",
+ "outputBinding": {
+ "glob": ["*.dat", "*.bat"]
+ }
+ },
+ {
+ "id": "o3",
+ "type": {
+ "type": "record",
+ "fields": [
+ {
+ "name": "f1",
+ "type": "File",
+ "outputBinding": {
+ "glob": ["*.cat"]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "id": "o4",
+ "type": "File",
+ "outputBinding": {
+ "glob": "$(inputs.inp)"
+ }
+ },
+ {
+ "id": "o5",
+ "type": "File",
+ "outputBinding": {
+ "glob": "*.foo"
+ },
+ "secondaryFiles": [".goo", "^.hoo"]
+ },
+
+ ],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "cwlVersion": "v1.2",
+ "class": "CommandLineTool",
+ "hints": [ ]
+ })
+
+ loadingContext, runtimeContext = self.helper(runner)
+ runtimeContext.name = "test_timelimit"
+
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ for j in arvtool.job({"inp": "quux"}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+
+ _, kwargs = runner.api.container_requests().create.call_args
+ if rev == "20240502":
+ self.assertEqual(['*.txt', '*.txt/**',
+ '*.dat', '*.dat/**',
+ '*.bat', '*.bat/**',
+ '*.cat', '*.cat/**',
+ 'quux', 'quux/**',
+ '*.foo', '*.foo/**',
+ '*.foo.goo', '*.foo.goo/**',
+ '*.hoo', '*.hoo/**',
+ 'cwl.output.json',
+ ], kwargs['body'].get('output_glob'))
+ else:
+ self.assertEqual(None, kwargs['body'].get('output_glob'))
+
+
+ # The test passes no builder.resources
+ # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @parameterized.expand([
+ ("Uncommitted",),
+ ("Committed",),
+ ("Final",),
+
+ ])
+ @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+ def test_recheck_on_error(self, get_state, keepdocker):
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "",
+ "class": "CommandLineTool",
+ "cwlVersion": "v1.2"
+ })
+
+ loadingContext, runtimeContext = self.helper(runner, False)
+
+ arvtool = cwltool.load_tool.load_tool(tool, loadingContext)
+ arvtool.formatgraph = None
+
+ # Test that if update() raises an exception, we re-check the
+ # container request record to see if we can proceed anyway.
+ runner.api.container_requests().update.side_effect = Exception("Invalid state transition")
+
+ runner.api.container_requests().create().execute.return_value = {
+ 'state': 'Uncommitted',
+ 'uuid': "zzzzz-xvhdp-zzzzzzzzzzzzzz1",
+ "container_uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+ }
+ runner.api.container_requests().get().execute.return_value = {
+ 'state': get_state,
+ 'uuid': "zzzzz-xvhdp-zzzzzzzzzzzzzz1",
+ }
+
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().get.assert_called_with(uuid="zzzzz-xvhdp-zzzzzzzzzzzzzz1")
+ assert j.attempt_count == (0 if get_state == "Uncommitted" else 1)
class TestWorkflow(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
- #arv_docker_clear_cache()
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
@@ -1509,7 +1885,8 @@ class TestWorkflow(unittest.TestCase):
"loader": document_loader,
"metadata": {"cwlVersion": INTERNAL_VERSION, "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"},
"construct_tool_object": runner.arv_make_tool,
- "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__})
+ "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__,
+ })
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
"basedir": "",
diff --git a/sdk/cwl/tests/test_copy_deps.py b/sdk/cwl/tests/test_copy_deps.py
index 28a5915b11..8ad735fddc 100644
--- a/sdk/cwl/tests/test_copy_deps.py
+++ b/sdk/cwl/tests/test_copy_deps.py
@@ -73,7 +73,7 @@ def check_contents(group, wf_uuid):
raise Exception("Couldn't find collection containing expected "+expect_file)
-def test_create():
+def check_create():
group = api.groups().create(body={"group": {"name": "test-19070-project-1", "group_class": "project"}}, ensure_unique_name=True).execute()
try:
contents = api.groups().contents(uuid=group["uuid"]).execute()
@@ -90,7 +90,7 @@ def test_create():
api.groups().delete(uuid=group["uuid"]).execute()
-def test_update():
+def check_update():
group = api.groups().create(body={"group": {"name": "test-19070-project-2", "group_class": "project"}}, ensure_unique_name=True).execute()
try:
contents = api.groups().contents(uuid=group["uuid"]).execute()
@@ -132,7 +132,7 @@ def test_update():
api.groups().delete(uuid=group["uuid"]).execute()
-def test_execute():
+def check_execute():
group = api.groups().create(body={"group": {"name": "test-19070-project-3", "group_class": "project"}}, ensure_unique_name=True).execute()
try:
contents = api.groups().contents(uuid=group["uuid"]).execute()
@@ -193,6 +193,6 @@ def test_execute():
api.groups().delete(uuid=group["uuid"]).execute()
if __name__ == '__main__':
- test_create()
- test_update()
- test_execute()
+ check_create()
+ check_update()
+ check_execute()
diff --git a/sdk/cwl/tests/test_fsaccess.py b/sdk/cwl/tests/test_fsaccess.py
index f83612a8b0..c086f0e832 100644
--- a/sdk/cwl/tests/test_fsaccess.py
+++ b/sdk/cwl/tests/test_fsaccess.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_make_output.py b/sdk/cwl/tests/test_make_output.py
index dd1da0b524..eb39d801fe 100644
--- a/sdk/cwl/tests/test_make_output.py
+++ b/sdk/cwl/tests/test_make_output.py
@@ -2,17 +2,15 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-
import functools
import json
import logging
-import mock
import os
import io
import unittest
+from unittest import mock
+
import arvados
import arvados_cwl
import arvados_cwl.executor
diff --git a/sdk/cwl/tests/test_pathmapper.py b/sdk/cwl/tests/test_pathmapper.py
index 194092db7a..1a13fc7079 100644
--- a/sdk/cwl/tests/test_pathmapper.py
+++ b/sdk/cwl/tests/test_pathmapper.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_set_output_prop.py b/sdk/cwl/tests/test_set_output_prop.py
index 3219eac989..0e829eeb92 100644
--- a/sdk/cwl/tests/test_set_output_prop.py
+++ b/sdk/cwl/tests/test_set_output_prop.py
@@ -7,7 +7,7 @@ import subprocess
api = arvados.api()
-def test_execute():
+def check_execute():
group = api.groups().create(body={"group": {"name": "test-17004-project", "group_class": "project"}}, ensure_unique_name=True).execute()
try:
contents = api.groups().contents(uuid=group["uuid"]).execute()
@@ -34,4 +34,4 @@ def test_execute():
api.groups().delete(uuid=group["uuid"]).execute()
if __name__ == '__main__':
- test_execute()
+ check_execute()
diff --git a/sdk/cwl/tests/test_submit.py b/sdk/cwl/tests/test_submit.py
index c8bf127951..1f53729b53 100644
--- a/sdk/cwl/tests/test_submit.py
+++ b/sdk/cwl/tests/test_submit.py
@@ -2,12 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from builtins import object
-from builtins import str
-from future.utils import viewvalues
-
import copy
import io
import itertools
@@ -15,23 +9,14 @@ import functools
import hashlib
import json
import logging
-import mock
import sys
import unittest
-import cwltool.process
import re
import os
+import collections
-from io import BytesIO
-
-# StringIO.StringIO and io.StringIO have different behavior write() is
-# called with both python2 (byte) strings and unicode strings
-# (specifically there's some logging in cwltool that causes trouble).
-# This isn't a problem on python3 because all string are unicode.
-if sys.version_info[0] < 3:
- from StringIO import StringIO
-else:
- from io import StringIO
+from io import BytesIO, StringIO
+from unittest import mock
import arvados
import arvados.collection
@@ -40,6 +25,8 @@ import arvados_cwl.executor
import arvados_cwl.runner
import arvados.keep
+import cwltool.process
+
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
@@ -101,10 +88,7 @@ def stubs(wfdetails=('submit_wf.cwl', None)):
stubs.fake_user_uuid = "zzzzz-tpzed-zzzzzzzzzzzzzzz"
stubs.fake_container_uuid = "zzzzz-dz642-zzzzzzzzzzzzzzz"
- if sys.version_info[0] < 3:
- stubs.capture_stdout = BytesIO()
- else:
- stubs.capture_stdout = StringIO()
+ stubs.capture_stdout = StringIO()
stubs.api = mock.MagicMock()
stubs.api._rootDesc = get_rootDesc()
@@ -142,7 +126,7 @@ def stubs(wfdetails=('submit_wf.cwl', None)):
return CollectionExecute(created_collections[uuid])
def collection_getstub(created_collections, uuid):
- for v in viewvalues(created_collections):
+ for v in created_collections.values():
if uuid in (v["uuid"], v["portable_data_hash"]):
return CollectionExecute(v)
@@ -280,7 +264,8 @@ def stubs(wfdetails=('submit_wf.cwl', None)):
},
'properties': stubs.git_props,
'use_existing': False,
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'environment': {},
}
stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
@@ -409,7 +394,7 @@ class TestSubmit(unittest.TestCase):
"class": "http://arvados.org/cwl#WorkflowRunnerResources"
}
]
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:fa5fbf21deb74f9f239daa3f5bb4b902+292/wf/submit_wf_no_reuse.cwl"
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:0c12c72eb112405548c0369c987aef61+292/wf/submit_wf_no_reuse.cwl"
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -509,6 +494,75 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
+ @stubs()
+ def test_submit_invalid_empty_storage_classes(self, stubs):
+ exited = arvados_cwl.main(
+ ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ "--debug",
+ '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ @stubs()
+ def test_submit_invalid_malformed_storage_classes(self, stubs):
+ exited = arvados_cwl.main(
+ ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=,,,,,",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ "--debug",
+ '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ @stubs()
+ def test_submit_duplicate_storage_classes(self, stubs):
+ exited = arvados_cwl.main(
+ ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=,foo,bar,,foo,",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ "--debug",
+ "--storage-classes=foo,bar", '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
@mock.patch("cwltool.task_queue.TaskQueue")
@mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
@mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
@@ -733,7 +787,8 @@ class TestSubmit(unittest.TestCase):
},
'use_existing': False,
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'environment': {},
}
stubs.api.container_requests().create.assert_called_with(
@@ -833,7 +888,8 @@ class TestSubmit(unittest.TestCase):
'properties': {
"template_uuid": "962eh-7fd4e-gkbzl62qqtfig37"
},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'environment': {},
}
stubs.api.container_requests().create.assert_called_with(
@@ -1029,7 +1085,7 @@ class TestSubmit(unittest.TestCase):
# "arv": "http://arvados.org/cwl#",
#}
expect_container["command"] = ["--collection-cache-size=512" if v == "--collection-cache-size=256" else v for v in expect_container["command"]]
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:80b60e39456505b91d3989a1f5058b98+308/wf/submit_wf_runner_resources.cwl"
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:758635b2486327c80fa90055c8b5b4d2+308/wf/submit_wf_runner_resources.cwl"
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -1180,7 +1236,7 @@ class TestSubmit(unittest.TestCase):
"out": [
{"id": "#main/step/out"}
],
- "run": "keep:991302581d01db470345a131480e623b+247/secret_wf.cwl"
+ "run": "keep:a3b72b40f6df7bc7335df62e066b86ed+247/secret_wf.cwl"
}
]
}
@@ -1215,7 +1271,8 @@ class TestSubmit(unittest.TestCase):
}
},
"state": "Committed",
- "use_existing": False
+ "use_existing": False,
+ "environment": {}
}
stubs.api.container_requests().create.assert_called_with(
@@ -1423,7 +1480,7 @@ class TestSubmit(unittest.TestCase):
# "arv": "http://arvados.org/cwl#"
#}
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:df44f9dd4b9467159f210f967e45417f+312/wf/submit_wf_process_properties.cwl"
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:14b65f1869348873add49327cd63630c+312/wf/submit_wf_process_properties.cwl"
expect_container["properties"].update({
"baz": "blorp.txt",
@@ -1531,6 +1588,85 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
+ @mock.patch("boto3.session.Session")
+ @stubs()
+ def test_submit_defer_s3_download(self, stubs, botosession):
+
+ sessionmock = mock.MagicMock(region_name='us-east-2')
+ botosession.return_value = sessionmock
+
+ CredsTuple = collections.namedtuple('CredsTuple', ['access_key', 'secret_key'])
+
+ sessionmock.get_credentials.return_value = CredsTuple('123key', '789secret')
+
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--defer-download", "--enable-aws-credential-capture",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job_s3.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+
+ expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['location'] = 's3://examplebucket/blorp.txt'
+ del expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['size']
+ expect_container['environment']['AWS_SHARED_CREDENTIALS_FILE'] = '/var/lib/cwl/.aws/credentials'
+ expect_container['environment']['AWS_CONFIG_FILE'] = '/var/lib/cwl/.aws/config'
+ expect_container['secret_mounts'] = {
+ "/var/lib/cwl/.aws/credentials": {
+ "content": "[default]\naws_access_key_id = 123key\naws_secret_access_key = 789secret\n",
+ "kind": "text"
+ },
+ "/var/lib/cwl/.aws/config": {
+ "content": "[default]\nregion = us-east-2\n",
+ "kind": "text"
+ }
+ }
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ @mock.patch("boto3.session.Session")
+ @stubs()
+ def test_submit_defer_s3_download_no_credential_capture(self, stubs, botosession):
+
+ sessionmock = mock.MagicMock(region_name='us-east-2')
+ botosession.return_value = sessionmock
+
+ stubs.api.credentials().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-oss07-8jgyh6siwlfoofw",
+ "name": "AWS_TEST_CRED",
+ "external_id": "AKIASRXXXXXXXXXXYZKG",
+ "scopes": []
+ }]
+ }
+
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--defer-download", "--disable-aws-credential-capture",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job_s3.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+
+ expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['location'] = 's3://examplebucket/blorp.txt'
+ del expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['size']
+
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug', "--on-error=continue", '--use-credential=zzzzz-oss07-8jgyh6siwlfoofw',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ sessionmock.get_credentials.assert_not_called()
class TestCreateWorkflow(unittest.TestCase):
existing_workflow_uuid = "zzzzz-7fd4e-validworkfloyml"
diff --git a/sdk/cwl/tests/test_tq.py b/sdk/cwl/tests/test_tq.py
index 05e5116d72..bf53f8912e 100644
--- a/sdk/cwl/tests/test_tq.py
+++ b/sdk/cwl/tests/test_tq.py
@@ -3,7 +3,6 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
@@ -11,6 +10,8 @@ import logging
import os
import threading
+from unittest import mock
+
from cwltool.task_queue import TaskQueue
def success_task():
diff --git a/sdk/cwl/tests/test_urljoin.py b/sdk/cwl/tests/test_urljoin.py
index 86a053ea48..08bca55e3d 100644
--- a/sdk/cwl/tests/test_urljoin.py
+++ b/sdk/cwl/tests/test_urljoin.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_util.py b/sdk/cwl/tests/test_util.py
index bf3d6fe0ef..ab4dfafe22 100644
--- a/sdk/cwl/tests/test_util.py
+++ b/sdk/cwl/tests/test_util.py
@@ -2,22 +2,25 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import bytes
-
-import unittest
-import mock
import datetime
import httplib2
+import unittest
+
+from unittest import mock
from arvados_cwl.util import *
from arvados.errors import ApiError
-from arvados_cwl.util import common_prefix
+from arvados_cwl.util import common_prefix, sanitize_url
class MockDateTime(datetime.datetime):
@classmethod
def utcnow(cls):
return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)
+ @classmethod
+ def now(cls, tz):
+ return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)
+
datetime.datetime = MockDateTime
class TestUtil(unittest.TestCase):
@@ -70,3 +73,8 @@ class TestUtil(unittest.TestCase):
# just confirm the logic doesn't have a fencepost error
prefix = "file:///"
self.assertEqual("file:///foo/bar"[len(prefix):], "foo/bar")
+
+ def test_sanitize_url(self):
+ self.assertEqual(sanitize_url("https://x-access-token:blahblahblah@github.com/foo/bar.git"), "https://github.com/foo/bar.git")
+ self.assertEqual(sanitize_url("https://github.com/foo/bar.git"), "https://github.com/foo/bar.git")
+ self.assertEqual(sanitize_url("git@github.com:foo/bar.git"), "git@github.com:foo/bar.git")
diff --git a/sdk/cwl/tests/wf/check_mem.py b/sdk/cwl/tests/wf/check_mem.py
index b4322a8093..8cc4d6fb91 100644
--- a/sdk/cwl/tests/wf/check_mem.py
+++ b/sdk/cwl/tests/wf/check_mem.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import division
-
import arvados
import sys
import os
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper.cwl
index 05599b652d..8c93b6c002 100644
--- a/sdk/cwl/tests/wf/expect_upload_wrapper.cwl
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper.cwl
@@ -79,7 +79,7 @@
],
"label": "submit_wf.cwl",
"out": [],
- "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ "run": "keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl"
}
]
}
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl
index 63031110cd..712dfd8115 100644
--- a/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl
@@ -79,7 +79,7 @@
],
"label": "testing 123",
"out": [],
- "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ "run": "keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl"
}
]
}
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl
index 8f98f4718c..d42e532488 100644
--- a/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl
@@ -79,7 +79,7 @@
],
"label": "submit_wf_map.cwl",
"out": [],
- "run": "keep:2b94b65162db72023301a582e085646f+290/wf/submit_wf_map.cwl"
+ "run": "keep:6e94cbbad95593da698f57a28762f5c1+290/wf/submit_wf_map.cwl"
}
]
}
diff --git a/sdk/cwl/tests/wf/output_dir.cwl b/sdk/cwl/tests/wf/output_dir.cwl
new file mode 100644
index 0000000000..bf0cce363d
--- /dev/null
+++ b/sdk/cwl/tests/wf/output_dir.cwl
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: ExpressionTool
+inputs:
+ file1:
+ type: Directory
+ loadListing: deep_listing
+outputs:
+ val: Directory
+ val2: File[]
+requirements:
+ InlineJavascriptRequirement: {}
+expression: |
+ ${
+ var val2 = inputs.file1.listing.filter(function (f) { return f.class == 'File'; } );
+ return {val: inputs.file1, val2: val2}
+ }
diff --git a/sdk/cwl/tests/wf/output_dir_wf.cwl b/sdk/cwl/tests/wf/output_dir_wf.cwl
new file mode 100644
index 0000000000..c416da4473
--- /dev/null
+++ b/sdk/cwl/tests/wf/output_dir_wf.cwl
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: Workflow
+inputs:
+ file1:
+ type: Directory
+ loadListing: deep_listing
+ default:
+ class: Directory
+ location: ../testdir
+
+steps:
+ step1:
+ in:
+ file1: file1
+ run: output_dir.cwl
+ out: [val, val2]
+
+outputs:
+ val:
+ type: Directory
+ outputSource: step1/val
+ val2:
+ type: File[]
+ outputSource: step1/val2
diff --git a/sdk/cwl/tests/wf/submit_wf_wrapper.cwl b/sdk/cwl/tests/wf/submit_wf_wrapper.cwl
index 3e015d65df..b67b772fa9 100644
--- a/sdk/cwl/tests/wf/submit_wf_wrapper.cwl
+++ b/sdk/cwl/tests/wf/submit_wf_wrapper.cwl
@@ -52,7 +52,7 @@
],
"label": "submit_wf.cwl",
"out": [],
- "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ "run": "keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl"
}
]
}
diff --git a/sdk/dev-jobs.dockerfile b/sdk/dev-jobs.dockerfile
index f66f670d81..c7e1018e0c 100644
--- a/sdk/dev-jobs.dockerfile
+++ b/sdk/dev-jobs.dockerfile
@@ -14,7 +14,7 @@ FROM debian:bullseye-slim
MAINTAINER Arvados Package Maintainers
RUN DEBIAN_FRONTEND=noninteractive apt-get update -q && apt-get install -qy --no-install-recommends \
- git python3-dev python3-venv libcurl4-gnutls-dev libgnutls28-dev nodejs build-essential
+ git python3-dev python3-venv libcurl4-gnutls-dev nodejs build-essential
RUN python3 -m venv /opt/arvados-py
ENV PATH=/opt/arvados-py/bin:/usr/local/bin:/usr/bin:/bin
diff --git a/sdk/go/arvados/api.go b/sdk/go/arvados/api.go
index c3d0ea8aef..8945b22376 100644
--- a/sdk/go/arvados/api.go
+++ b/sdk/go/arvados/api.go
@@ -8,6 +8,7 @@ import (
"bufio"
"context"
"encoding/json"
+ "errors"
"io"
"net"
"net/http"
@@ -42,11 +43,7 @@ var (
EndpointCollectionDelete = APIEndpoint{"DELETE", "arvados/v1/collections/{uuid}", ""}
EndpointCollectionTrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/trash", ""}
EndpointCollectionUntrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/untrash", ""}
- EndpointSpecimenCreate = APIEndpoint{"POST", "arvados/v1/specimens", "specimen"}
- EndpointSpecimenUpdate = APIEndpoint{"PATCH", "arvados/v1/specimens/{uuid}", "specimen"}
- EndpointSpecimenGet = APIEndpoint{"GET", "arvados/v1/specimens/{uuid}", ""}
- EndpointSpecimenList = APIEndpoint{"GET", "arvados/v1/specimens", ""}
- EndpointSpecimenDelete = APIEndpoint{"DELETE", "arvados/v1/specimens/{uuid}", ""}
+ EndpointComputedPermissionList = APIEndpoint{"GET", "arvados/v1/computed_permissions", ""}
EndpointContainerCreate = APIEndpoint{"POST", "arvados/v1/containers", "container"}
EndpointContainerUpdate = APIEndpoint{"PATCH", "arvados/v1/containers/{uuid}", "container"}
EndpointContainerPriorityUpdate = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/update_priority", "container"}
@@ -108,8 +105,21 @@ var (
EndpointAPIClientAuthorizationList = APIEndpoint{"GET", "arvados/v1/api_client_authorizations", ""}
EndpointAPIClientAuthorizationDelete = APIEndpoint{"DELETE", "arvados/v1/api_client_authorizations/{uuid}", ""}
EndpointAPIClientAuthorizationGet = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/{uuid}", ""}
+ EndpointCredentialCreate = APIEndpoint{"POST", "arvados/v1/credentials", "credential"}
+ EndpointCredentialUpdate = APIEndpoint{"PATCH", "arvados/v1/credentials/{uuid}", "credential"}
+ EndpointCredentialGet = APIEndpoint{"GET", "arvados/v1/credentials/{uuid}", ""}
+ EndpointCredentialDelete = APIEndpoint{"DELETE", "arvados/v1/credentials/{uuid}", ""}
+ EndpointCredentialSecret = APIEndpoint{"GET", "arvados/v1/credentials/{uuid}/credential_secret", ""}
)
+type ContainerHTTPProxyOptions struct {
+ // "{container uuid}-{port}", ":{dynamic-external-port}", or
+ // the name of a published port
+ Target string `json:"target"`
+ NoForward bool `json:"no_forward"`
+ Request *http.Request `json:"-"`
+}
+
type ContainerSSHOptions struct {
UUID string `json:"uuid"`
DetachKeys string `json:"detach_keys"`
@@ -156,7 +166,7 @@ type ListOptions struct {
IncludeOldVersions bool `json:"include_old_versions"`
BypassFederation bool `json:"bypass_federation"`
ForwardedFor string `json:"forwarded_for,omitempty"`
- Include string `json:"include"`
+ Include []string `json:"include"`
}
type CreateOptions struct {
@@ -164,8 +174,10 @@ type CreateOptions struct {
EnsureUniqueName bool `json:"ensure_unique_name"`
Select []string `json:"select"`
Attrs map[string]interface{} `json:"attrs"`
- // ReplaceFiles only applies when creating a collection.
- ReplaceFiles map[string]string `json:"replace_files"`
+ // ReplaceFiles and ReplaceSegments only apply when creating a
+ // collection.
+ ReplaceFiles map[string]string `json:"replace_files"`
+ ReplaceSegments map[BlockSegment]BlockSegment `json:"replace_segments"`
}
type UpdateOptions struct {
@@ -173,8 +185,10 @@ type UpdateOptions struct {
Attrs map[string]interface{} `json:"attrs"`
Select []string `json:"select"`
BypassFederation bool `json:"bypass_federation"`
- // ReplaceFiles only applies when updating a collection.
- ReplaceFiles map[string]string `json:"replace_files"`
+ // ReplaceFiles and ReplaceSegments only apply when updating a
+ // collection.
+ ReplaceFiles map[string]string `json:"replace_files"`
+ ReplaceSegments map[BlockSegment]BlockSegment `json:"replace_segments"`
}
type GroupContentsOptions struct {
@@ -187,7 +201,7 @@ type GroupContentsOptions struct {
Order []string `json:"order"`
Distinct bool `json:"distinct"`
Count string `json:"count"`
- Include string `json:"include"`
+ Include []string `json:"include"`
Recursive bool `json:"recursive"`
IncludeTrash bool `json:"include_trash"`
IncludeOldVersions bool `json:"include_old_versions"`
@@ -246,8 +260,15 @@ type BlockReadOptions struct {
Locator string
WriteTo io.Writer
LocalLocator func(string)
+ // If true, do not read the block data, just check whether the
+ // block is available in a local filesystem or memory cache.
+ // If not, return ErrNotCached.
+ CheckCacheOnly bool
}
+// See CheckCacheOnly field of BlockReadOptions.
+var ErrNotCached = errors.New("block is not in cache")
+
type BlockWriteOptions struct {
Hash string
Data []byte
@@ -277,6 +298,12 @@ type ContainerLogOptions struct {
WebDAVOptions
}
+type RepackOptions struct {
+ CachedOnly bool
+ Full bool
+ DryRun bool
+}
+
type API interface {
ConfigGet(ctx context.Context) (json.RawMessage, error)
VocabularyGet(ctx context.Context) (Vocabulary, error)
@@ -296,6 +323,7 @@ type API interface {
CollectionDelete(ctx context.Context, options DeleteOptions) (Collection, error)
CollectionTrash(ctx context.Context, options DeleteOptions) (Collection, error)
CollectionUntrash(ctx context.Context, options UntrashOptions) (Collection, error)
+ ComputedPermissionList(ctx context.Context, options ListOptions) (ComputedPermissionList, error)
ContainerCreate(ctx context.Context, options CreateOptions) (Container, error)
ContainerUpdate(ctx context.Context, options UpdateOptions) (Container, error)
ContainerPriorityUpdate(ctx context.Context, options UpdateOptions) (Container, error)
@@ -306,6 +334,7 @@ type API interface {
ContainerUnlock(ctx context.Context, options GetOptions) (Container, error)
ContainerSSH(ctx context.Context, options ContainerSSHOptions) (ConnectionResponse, error)
ContainerGatewayTunnel(ctx context.Context, options ContainerGatewayTunnelOptions) (ConnectionResponse, error)
+ ContainerHTTPProxy(ctx context.Context, options ContainerHTTPProxyOptions) (http.Handler, error)
ContainerRequestCreate(ctx context.Context, options CreateOptions) (ContainerRequest, error)
ContainerRequestUpdate(ctx context.Context, options UpdateOptions) (ContainerRequest, error)
ContainerRequestGet(ctx context.Context, options GetOptions) (ContainerRequest, error)
@@ -332,11 +361,6 @@ type API interface {
LogGet(ctx context.Context, options GetOptions) (Log, error)
LogList(ctx context.Context, options ListOptions) (LogList, error)
LogDelete(ctx context.Context, options DeleteOptions) (Log, error)
- SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
- SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
- SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
- SpecimenList(ctx context.Context, options ListOptions) (SpecimenList, error)
- SpecimenDelete(ctx context.Context, options DeleteOptions) (Specimen, error)
SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error)
UserCreate(ctx context.Context, options CreateOptions) (User, error)
UserUpdate(ctx context.Context, options UpdateOptions) (User, error)
diff --git a/sdk/go/arvados/api_client_authorization.go b/sdk/go/arvados/api_client_authorization.go
index c920d2dc34..e357da96b1 100644
--- a/sdk/go/arvados/api_client_authorization.go
+++ b/sdk/go/arvados/api_client_authorization.go
@@ -8,22 +8,18 @@ import "time"
// APIClientAuthorization is an arvados#apiClientAuthorization resource.
type APIClientAuthorization struct {
- UUID string `json:"uuid"`
- APIClientID int `json:"api_client_id"`
- APIToken string `json:"api_token"`
- CreatedAt time.Time `json:"created_at"`
- CreatedByIPAddress string `json:"created_by_ip_address"`
- DefaultOwnerUUID string `json:"default_owner_uuid"`
- Etag string `json:"etag"`
- ExpiresAt time.Time `json:"expires_at"`
- LastUsedAt time.Time `json:"last_used_at"`
- LastUsedByIPAddress string `json:"last_used_by_ip_address"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- OwnerUUID string `json:"owner_uuid"`
- Scopes []string `json:"scopes"`
- UserID int `json:"user_id"`
+ UUID string `json:"uuid"`
+ APIToken string `json:"api_token"`
+ CreatedAt time.Time `json:"created_at"`
+ CreatedByIPAddress string `json:"created_by_ip_address"`
+ Etag string `json:"etag"`
+ ExpiresAt time.Time `json:"expires_at"`
+ LastUsedAt time.Time `json:"last_used_at"`
+ LastUsedByIPAddress string `json:"last_used_by_ip_address"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ OwnerUUID string `json:"owner_uuid"`
+ Scopes []string `json:"scopes"`
}
// APIClientAuthorizationList is an arvados#apiClientAuthorizationList resource.
diff --git a/sdk/go/arvados/authorized_key.go b/sdk/go/arvados/authorized_key.go
index 642fc11261..0d363e1164 100644
--- a/sdk/go/arvados/authorized_key.go
+++ b/sdk/go/arvados/authorized_key.go
@@ -8,18 +8,17 @@ import "time"
// AuthorizedKey is an arvados#authorizedKey resource.
type AuthorizedKey struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- Name string `json:"name"`
- AuthorizedUserUUID string `json:"authorized_user_uuid"`
- PublicKey string `json:"public_key"`
- KeyType string `json:"key_type"`
- ExpiresAt time.Time `json:"expires_at"`
+ UUID string `json:"uuid"`
+ Etag string `json:"etag"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ Name string `json:"name"`
+ AuthorizedUserUUID string `json:"authorized_user_uuid"`
+ PublicKey string `json:"public_key"`
+ KeyType string `json:"key_type"`
+ ExpiresAt time.Time `json:"expires_at"`
}
// AuthorizedKeyList is an arvados#authorizedKeyList resource.
diff --git a/sdk/go/arvados/block_segment.go b/sdk/go/arvados/block_segment.go
new file mode 100644
index 0000000000..38eb4dbeb4
--- /dev/null
+++ b/sdk/go/arvados/block_segment.go
@@ -0,0 +1,45 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// BlockSegment is a portion of a block stored in Keep. It is used in
+// the replace_segments API.
+type BlockSegment struct {
+ Locator string
+ Offset int
+ Length int
+}
+
+func (bs *BlockSegment) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ _, err := fmt.Sscanf(s, "%s %d %d", &bs.Locator, &bs.Offset, &bs.Length)
+ return err
+}
+
+// MarshalText enables encoding/json to encode BlockSegment as a map
+// key.
+func (bs BlockSegment) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%s %d %d", bs.Locator, bs.Offset, bs.Length)), nil
+}
+
+// MarshalText enables encoding/json to decode BlockSegment as a map
+// key.
+func (bs *BlockSegment) UnmarshalText(p []byte) error {
+ _, err := fmt.Sscanf(string(p), "%s %d %d", &bs.Locator, &bs.Offset, &bs.Length)
+ return err
+}
+
+func (bs BlockSegment) StripAllHints() BlockSegment {
+ bs.Locator = stripAllHints(bs.Locator)
+ return bs
+}
diff --git a/sdk/go/arvados/block_segment_test.go b/sdk/go/arvados/block_segment_test.go
new file mode 100644
index 0000000000..651ffc62f2
--- /dev/null
+++ b/sdk/go/arvados/block_segment_test.go
@@ -0,0 +1,49 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "encoding/json"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&blockSegmentSuite{})
+
+type blockSegmentSuite struct{}
+
+func (s *blockSegmentSuite) TestMarshal(c *C) {
+ dst, err := json.Marshal(map[BlockSegment]BlockSegment{
+ BlockSegment{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1", 0, 1}: BlockSegment{"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3", 2, 1},
+ })
+ c.Check(err, IsNil)
+ c.Check(string(dst), Equals, `{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1 0 1":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3 2 1"}`)
+}
+
+func (s *blockSegmentSuite) TestUnmarshal(c *C) {
+ var dst struct {
+ F map[BlockSegment]BlockSegment
+ }
+ err := json.Unmarshal([]byte(`{"f": {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1 0 1": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3 2 1"}}`), &dst)
+ c.Check(err, IsNil)
+ c.Check(dst.F, HasLen, 1)
+ for k, v := range dst.F {
+ c.Check(k, Equals, BlockSegment{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1", 0, 1})
+ c.Check(v, Equals, BlockSegment{"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3", 2, 1})
+ }
+}
+
+func (s *blockSegmentSuite) TestRoundTrip(c *C) {
+ orig := map[BlockSegment]BlockSegment{
+ BlockSegment{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1", 0, 1}: BlockSegment{"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+50", 0, 1},
+ BlockSegment{"cccccccccccccccccccccccccccccccc+49", 0, 49}: BlockSegment{"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+50", 1, 49},
+ }
+ j, err := json.Marshal(orig)
+ c.Check(err, IsNil)
+ var dst map[BlockSegment]BlockSegment
+ err = json.Unmarshal(j, &dst)
+ c.Check(err, IsNil)
+ c.Check(dst, DeepEquals, orig)
+}
diff --git a/sdk/go/arvados/client.go b/sdk/go/arvados/client.go
index 7bc3d5bc42..39697f0827 100644
--- a/sdk/go/arvados/client.go
+++ b/sdk/go/arvados/client.go
@@ -32,6 +32,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/httpserver"
"github.com/hashicorp/go-retryablehttp"
+ "github.com/sirupsen/logrus"
)
// A Client is an HTTP client with an API endpoint and a set of
@@ -82,6 +83,13 @@ type Client struct {
// filesystem size.
DiskCacheSize ByteSizeOrPercent
+ // Where to write debug logs. May be nil.
+ Logger logrus.FieldLogger
+
+ // The cluster config, if the Client was initialized via
+ // NewClientFromConfig. Otherwise nil.
+ Cluster *Cluster
+
dd *DiscoveryDocument
defaultRequestID string
@@ -154,13 +162,15 @@ func NewClientFromConfig(cluster *Cluster) (*Client, error) {
}
}
return &Client{
- Client: hc,
- Scheme: ctrlURL.Scheme,
- APIHost: ctrlURL.Host,
- Insecure: cluster.TLS.Insecure,
- Timeout: 5 * time.Minute,
- DiskCacheSize: cluster.Collections.WebDAVCache.DiskCacheSize,
- requestLimiter: &requestLimiter{maxlimit: int64(cluster.API.MaxConcurrentRequests / 4)},
+ Client: hc,
+ Scheme: ctrlURL.Scheme,
+ APIHost: ctrlURL.Host,
+ Insecure: cluster.TLS.Insecure,
+ KeepServiceURIs: parseKeepServiceURIs(os.Getenv("ARVADOS_KEEP_SERVICES")),
+ Timeout: 5 * time.Minute,
+ DiskCacheSize: cluster.Collections.WebDAVCache.DiskCacheSize,
+ requestLimiter: &requestLimiter{maxlimit: int64(cluster.API.MaxConcurrentRequests / 4)},
+ Cluster: cluster,
}, nil
}
@@ -217,18 +227,6 @@ func NewClientFromEnv() *Client {
vars[kv[0]] = kv[1]
}
}
- var svcs []string
- for _, s := range strings.Split(vars["ARVADOS_KEEP_SERVICES"], " ") {
- if s == "" {
- continue
- } else if u, err := url.Parse(s); err != nil {
- log.Printf("ARVADOS_KEEP_SERVICES: %q: %s", s, err)
- } else if !u.IsAbs() {
- log.Printf("ARVADOS_KEEP_SERVICES: %q: not an absolute URI", s)
- } else {
- svcs = append(svcs, s)
- }
- }
var insecure bool
if s := strings.ToLower(vars["ARVADOS_API_HOST_INSECURE"]); s == "1" || s == "yes" || s == "true" {
insecure = true
@@ -238,18 +236,32 @@ func NewClientFromEnv() *Client {
APIHost: vars["ARVADOS_API_HOST"],
AuthToken: vars["ARVADOS_API_TOKEN"],
Insecure: insecure,
- KeepServiceURIs: svcs,
+ KeepServiceURIs: parseKeepServiceURIs(vars["ARVADOS_KEEP_SERVICES"]),
Timeout: 5 * time.Minute,
loadedFromEnv: true,
}
}
+func parseKeepServiceURIs(svclist string) []string {
+ var svcs []string
+ for _, s := range strings.Split(svclist, " ") {
+ if s == "" {
+ continue
+ } else if u, err := url.Parse(s); err != nil {
+ log.Printf("ARVADOS_KEEP_SERVICES: %q: %s", s, err)
+ } else if !u.IsAbs() {
+ log.Printf("ARVADOS_KEEP_SERVICES: %q: not an absolute URI", s)
+ } else {
+ svcs = append(svcs, s)
+ }
+ }
+ return svcs
+}
+
var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
var nopCancelFunc context.CancelFunc = func() {}
-var reqErrorRe = regexp.MustCompile(`net/http: invalid header `)
-
// Do augments (*http.Client)Do(): adds Authorization and X-Request-Id
// headers, delays in order to comply with rate-limiting restrictions,
// and retries failed requests when appropriate.
@@ -258,7 +270,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
if auth, _ := ctx.Value(contextKeyAuthorization{}).(string); auth != "" {
req.Header.Add("Authorization", auth)
} else if c.AuthToken != "" {
- req.Header.Add("Authorization", "OAuth2 "+c.AuthToken)
+ req.Header.Add("Authorization", "Bearer "+c.AuthToken)
}
if req.Header.Get("X-Request-Id") == "" {
@@ -307,14 +319,6 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
if c.Timeout == 0 {
return false, nil
}
- // This check can be removed when
- // https://github.com/hashicorp/go-retryablehttp/pull/210
- // (or equivalent) is merged and we update go.mod.
- // Until then, it is needed to pass
- // TestNonRetryableStdlibError.
- if respErr != nil && reqErrorRe.MatchString(respErr.Error()) {
- return false, nil
- }
retrying, err := retryablehttp.DefaultRetryPolicy(ctx, resp, respErr)
if retrying {
lastResp, lastRespBody, lastErr = resp, nil, respErr
diff --git a/sdk/go/arvados/client_test.go b/sdk/go/arvados/client_test.go
index 55e2f998c4..90e19b7a68 100644
--- a/sdk/go/arvados/client_test.go
+++ b/sdk/go/arvados/client_test.go
@@ -98,7 +98,7 @@ func (*clientSuite) TestCurrentUser(c *check.C) {
c.Check(u.UUID, check.Equals, "zzzzz-abcde-012340123401234")
c.Check(stub.Requests, check.Not(check.HasLen), 0)
hdr := stub.Requests[len(stub.Requests)-1].Header
- c.Check(hdr.Get("Authorization"), check.Equals, "OAuth2 xyzzy")
+ c.Check(hdr.Get("Authorization"), check.Equals, "Bearer xyzzy")
client.Client.Transport = &errorTransport{}
u, err = client.CurrentUser()
@@ -255,10 +255,29 @@ func (*clientSuite) TestLoadConfig(c *check.C) {
// Environment variables override settings.conf
os.Setenv("ARVADOS_API_HOST", "[::]:3")
os.Setenv("ARVADOS_API_HOST_INSECURE", "0")
+ os.Setenv("ARVADOS_KEEP_SERVICES", "http://[::]:12345")
client = NewClientFromEnv()
c.Check(client.AuthToken, check.Equals, "token_from_settings_file2")
c.Check(client.APIHost, check.Equals, "[::]:3")
c.Check(client.Insecure, check.Equals, false)
+ c.Check(client.KeepServiceURIs, check.DeepEquals, []string{"http://[::]:12345"})
+
+ // ARVADOS_KEEP_SERVICES environment variable overrides
+ // cluster config, but ARVADOS_API_HOST/TOKEN do not.
+ os.Setenv("ARVADOS_KEEP_SERVICES", "http://[::]:12345")
+ os.Setenv("ARVADOS_API_HOST", "wronghost.example")
+ os.Setenv("ARVADOS_API_TOKEN", "wrongtoken")
+ cfg := Cluster{}
+ cfg.Services.Controller.ExternalURL = URL{Scheme: "https", Host: "ctrl.example:55555", Path: "/"}
+ cfg.Services.Keepstore.InternalURLs = map[URL]ServiceInstance{
+ URL{Scheme: "https", Host: "keep0.example:55555", Path: "/"}: ServiceInstance{},
+ }
+ client, err := NewClientFromConfig(&cfg)
+ c.Check(err, check.IsNil)
+ c.Check(client.AuthToken, check.Equals, "")
+ c.Check(client.APIHost, check.Equals, "ctrl.example:55555")
+ c.Check(client.Insecure, check.Equals, false)
+ c.Check(client.KeepServiceURIs, check.DeepEquals, []string{"http://[::]:12345"})
}
var _ = check.Suite(&clientRetrySuite{})
@@ -402,7 +421,7 @@ func (s *clientRetrySuite) TestExponentialBackoff(c *check.C) {
for e := float64(1); e < 5; e += 1 {
ok := false
- for i := 0; i < 20; i++ {
+ for i := 0; i < 30; i++ {
t = exponentialBackoff(min, max, int(e), nil)
// Every returned value must be between min and min(2^e, max)
c.Check(t >= min, check.Equals, true)
diff --git a/sdk/go/arvados/collection.go b/sdk/go/arvados/collection.go
index 389fe4e484..f9702bb5a8 100644
--- a/sdk/go/arvados/collection.go
+++ b/sdk/go/arvados/collection.go
@@ -31,7 +31,6 @@ type Collection struct {
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
ModifiedByUserUUID string `json:"modified_by_user_uuid"`
PortableDataHash string `json:"portable_data_hash"`
ReplicationConfirmed *int `json:"replication_confirmed"`
@@ -104,28 +103,57 @@ type CollectionList struct {
Limit int `json:"limit"`
}
-var (
- blkRe = regexp.MustCompile(`^ [0-9a-f]{32}\+\d+`)
- tokRe = regexp.MustCompile(` ?[^ ]*`)
-)
-
// PortableDataHash computes the portable data hash of the given
// manifest.
func PortableDataHash(mt string) string {
+ // To calculate the PDH, we write the manifest to an md5 hash
+ // func, except we skip the "extra" part of block tokens that
+ // look like "abcdef0123456789abcdef0123456789+12345+extra".
+ //
+ // This code is simplified by the facts that (A) all block
+ // tokens -- even the first and last in a stream -- are
+ // preceded and followed by a space character; and (B) all
+ // non-block tokens either start with '.' or contain ':'.
+ //
+ // A regexp-based approach (like the one this replaced) would
+ // be more readable, but very slow.
h := md5.New()
size := 0
- _ = tokRe.ReplaceAllFunc([]byte(mt), func(tok []byte) []byte {
- if m := blkRe.Find(tok); m != nil {
- // write hash+size, ignore remaining block hints
- tok = m
+ todo := []byte(mt)
+ for len(todo) > 0 {
+ // sp is the end of the current token (note that if
+ // the current token is the last file token in a
+ // stream, we'll also include the \n and the dirname
+ // token on the next line, which is perfectly fine for
+ // our purposes).
+ sp := bytes.IndexByte(todo, ' ')
+ if sp < 0 {
+ // Last token of the manifest, which is never
+ // a block token.
+ n, _ := h.Write(todo)
+ size += n
+ break
}
- n, err := h.Write(tok)
- if err != nil {
- panic(err)
+ if sp >= 34 && todo[32] == '+' && bytes.IndexByte(todo[:32], ':') == -1 && todo[0] != '.' {
+ // todo[:sp] is a block token.
+ sizeend := bytes.IndexByte(todo[33:sp], '+')
+ if sizeend < 0 {
+ // "hash+size"
+ sizeend = sp
+ } else {
+ // "hash+size+extra"
+ sizeend += 33
+ }
+ n, _ := h.Write(todo[:sizeend])
+ h.Write([]byte{' '})
+ size += n + 1
+ } else {
+ // todo[:sp] is not a block token.
+ n, _ := h.Write(todo[:sp+1])
+ size += n
}
- size += n
- return nil
- })
+ todo = todo[sp+1:]
+ }
return fmt.Sprintf("%x+%d", h.Sum(nil), size)
}
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 116051b09e..3db4531750 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -104,7 +104,6 @@ type Cluster struct {
MaxQueuedRequests int
MaxGatewayTunnels int
MaxQueueTimeForLockRequests Duration
- LogCreateRequestFraction float64
MaxKeepBlobBuffers int
MaxRequestAmplification int
MaxRequestSize int
@@ -156,15 +155,11 @@ type Cluster struct {
WebDAVCache WebDAVCacheConfig
- KeepproxyPermission UploadDownloadRolePermissions
- WebDAVPermission UploadDownloadRolePermissions
- WebDAVLogEvents bool
- WebDAVOutputBuffer ByteSize
- }
- Git struct {
- GitCommand string
- GitoliteHome string
- Repositories string
+ KeepproxyPermission UploadDownloadRolePermissions
+ WebDAVPermission UploadDownloadRolePermissions
+ WebDAVLogEvents bool
+ WebDAVLogDownloadInterval Duration
+ WebDAVOutputBuffer ByteSize
}
Login struct {
LDAP struct {
@@ -218,15 +213,6 @@ type Cluster struct {
TrustPrivateNetworks bool
IssueTrustedTokens bool
}
- Mail struct {
- MailchimpAPIKey string
- MailchimpListID string
- SendUserSetupNotificationEmail bool
- IssueReporterEmailFrom string
- IssueReporterEmailTo string
- SupportEmailAddress string
- EmailFrom string
- }
SystemLogs struct {
LogLevel string
Format string
@@ -248,13 +234,14 @@ type Cluster struct {
AutoAdminFirstUser bool
AutoAdminUserWithEmail string
AutoSetupNewUsers bool
- AutoSetupNewUsersWithRepository bool
AutoSetupNewUsersWithVmUUID string
AutoSetupUsernameBlacklist StringSet
EmailSubjectPrefix string
NewInactiveUserNotificationRecipients StringSet
NewUserNotificationRecipients StringSet
NewUsersAreActive bool
+ SendUserSetupNotificationEmail bool
+ SupportEmailAddress string
UserNotifierEmailFrom string
UserNotifierEmailBcc StringSet
UserProfileNotificationAddress string
@@ -315,7 +302,6 @@ type Volume struct {
}
type S3VolumeDriverParameters struct {
- IAMRole string
AccessKeyID string
SecretAccessKey string
Endpoint string
@@ -329,6 +315,7 @@ type S3VolumeDriverParameters struct {
RaceWindow Duration
UnsafeDelete bool
PrefixLength int
+ UsePathStyle bool
}
type AzureVolumeDriverParameters struct {
@@ -351,24 +338,23 @@ type VolumeAccess struct {
}
type Services struct {
- Composer Service
- Controller Service
- DispatchCloud Service
- DispatchLSF Service
- DispatchSLURM Service
- GitHTTP Service
- GitSSH Service
- Health Service
- Keepbalance Service
- Keepproxy Service
- Keepstore Service
- RailsAPI Service
- WebDAVDownload Service
- WebDAV Service
- WebShell Service
- Websocket Service
- Workbench1 Service
- Workbench2 Service
+ Composer Service
+ ContainerWebServices ServiceWithPortRange
+ Controller Service
+ DispatchCloud Service
+ DispatchLSF Service
+ DispatchSLURM Service
+ Health Service
+ Keepbalance Service
+ Keepproxy Service
+ Keepstore Service
+ RailsAPI Service
+ WebDAVDownload Service
+ WebDAV Service
+ WebShell Service
+ Websocket Service
+ Workbench1 Service
+ Workbench2 Service
}
type Service struct {
@@ -376,6 +362,12 @@ type Service struct {
ExternalURL URL
}
+type ServiceWithPortRange struct {
+ Service
+ ExternalPortMin int
+ ExternalPortMax int
+}
+
type TestUser struct {
Email string
Password string
@@ -471,10 +463,13 @@ type RemoteCluster struct {
ActivateUsers bool
}
-type CUDAFeatures struct {
- DriverVersion string
- HardwareCapability string
- DeviceCount int
+type GPUFeatures struct {
+ // as of this writing, stack is "cuda" or "rocm"
+ Stack string
+ DriverVersion string
+ HardwareTarget string
+ DeviceCount int
+ VRAM ByteSize
}
type InstanceType struct {
@@ -487,7 +482,7 @@ type InstanceType struct {
AddedScratch ByteSize
Price float64
Preemptible bool
- CUDA CUDAFeatures
+ GPU GPUFeatures
}
type ContainersConfig struct {
@@ -510,22 +505,9 @@ type ContainersConfig struct {
LocalKeepBlobBuffersPerVCPU int
LocalKeepLogsToContainerLog string
- JobsAPI struct {
- Enable string
- GitInternalDir string
- }
Logging struct {
- MaxAge Duration
- SweepInterval Duration
- LogBytesPerEvent int
- LogSecondsBetweenEvents Duration
- LogThrottlePeriod Duration
- LogThrottleBytes int
- LogThrottleLines int
- LimitLogBytesPerJob int
- LogPartialLineThrottlePeriod Duration
- LogUpdatePeriod Duration
- LogUpdateSize ByteSize
+ LogUpdatePeriod Duration
+ LogUpdateSize ByteSize
}
ShellAccess struct {
Admin bool
@@ -535,20 +517,11 @@ type ContainersConfig struct {
PrioritySpread int64
SbatchArgumentsList []string
SbatchEnvironmentVariables map[string]string
- Managed struct {
- DNSServerConfDir string
- DNSServerConfTemplate string
- DNSServerReloadCommand string
- DNSServerUpdateCommand string
- ComputeNodeDomain string
- ComputeNodeNameservers StringSet
- AssignNodeHostname string
- }
}
LSF struct {
BsubSudoUser string
BsubArgumentsList []string
- BsubCUDAArguments []string
+ BsubGPUArguments []string
MaxRunTimeOverhead Duration
MaxRunTimeDefault Duration
}
@@ -560,6 +533,7 @@ type CloudVMsConfig struct {
BootProbeCommand string
InstanceInitCommand string
DeployRunnerBinary string
+ DeployRunnerDirectory string
DeployPublicKey bool
ImageID string
MaxCloudOpsPerSecond int
@@ -670,7 +644,6 @@ const (
ServiceNameDispatchCloud ServiceName = "arvados-dispatch-cloud"
ServiceNameDispatchLSF ServiceName = "arvados-dispatch-lsf"
ServiceNameDispatchSLURM ServiceName = "crunch-dispatch-slurm"
- ServiceNameGitHTTP ServiceName = "arvados-git-httpd"
ServiceNameHealth ServiceName = "arvados-health"
ServiceNameKeepbalance ServiceName = "keep-balance"
ServiceNameKeepproxy ServiceName = "keepproxy"
@@ -690,7 +663,6 @@ func (svcs Services) Map() map[ServiceName]Service {
ServiceNameDispatchCloud: svcs.DispatchCloud,
ServiceNameDispatchLSF: svcs.DispatchLSF,
ServiceNameDispatchSLURM: svcs.DispatchSLURM,
- ServiceNameGitHTTP: svcs.GitHTTP,
ServiceNameHealth: svcs.Health,
ServiceNameKeepbalance: svcs.Keepbalance,
ServiceNameKeepproxy: svcs.Keepproxy,
diff --git a/sdk/go/arvados/container.go b/sdk/go/arvados/container.go
index 91c8fbfe29..95db394d85 100644
--- a/sdk/go/arvados/container.go
+++ b/sdk/go/arvados/container.go
@@ -8,79 +8,82 @@ import "time"
// Container is an arvados#container resource.
type Container struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- Command []string `json:"command"`
- ContainerImage string `json:"container_image"`
- Cwd string `json:"cwd"`
- Environment map[string]string `json:"environment"`
- LockedByUUID string `json:"locked_by_uuid"`
- LockCount int `json:"lock_count"`
- Mounts map[string]Mount `json:"mounts"`
- Output string `json:"output"`
- OutputPath string `json:"output_path"`
- Priority int64 `json:"priority"`
- RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
- State ContainerState `json:"state"`
- SchedulingParameters SchedulingParameters `json:"scheduling_parameters"`
- ExitCode int `json:"exit_code"`
- RuntimeStatus map[string]interface{} `json:"runtime_status"`
- StartedAt *time.Time `json:"started_at"` // nil if not yet started
- FinishedAt *time.Time `json:"finished_at"` // nil if not yet finished
- GatewayAddress string `json:"gateway_address"`
- InteractiveSessionStarted bool `json:"interactive_session_started"`
- OutputStorageClasses []string `json:"output_storage_classes"`
- RuntimeUserUUID string `json:"runtime_user_uuid"`
- RuntimeAuthScopes []string `json:"runtime_auth_scopes"`
- RuntimeToken string `json:"runtime_token"`
- AuthUUID string `json:"auth_uuid"`
- Log string `json:"log"`
- Cost float64 `json:"cost"`
- SubrequestsCost float64 `json:"subrequests_cost"`
+ UUID string `json:"uuid"`
+ Etag string `json:"etag"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ ModifiedAt time.Time `json:"modified_at"`
+ Command []string `json:"command"`
+ ContainerImage string `json:"container_image"`
+ Cwd string `json:"cwd"`
+ Environment map[string]string `json:"environment"`
+ LockedByUUID string `json:"locked_by_uuid"`
+ LockCount int `json:"lock_count"`
+ Mounts map[string]Mount `json:"mounts"`
+ Output string `json:"output"`
+ OutputPath string `json:"output_path"`
+ OutputGlob []string `json:"output_glob"`
+ Priority int64 `json:"priority"`
+ RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
+ State ContainerState `json:"state"`
+ SchedulingParameters SchedulingParameters `json:"scheduling_parameters"`
+ ExitCode int `json:"exit_code"`
+ RuntimeStatus map[string]interface{} `json:"runtime_status"`
+ StartedAt *time.Time `json:"started_at"` // nil if not yet started
+ FinishedAt *time.Time `json:"finished_at"` // nil if not yet finished
+ GatewayAddress string `json:"gateway_address"`
+ InteractiveSessionStarted bool `json:"interactive_session_started"`
+ OutputStorageClasses []string `json:"output_storage_classes"`
+ RuntimeUserUUID string `json:"runtime_user_uuid"`
+ RuntimeAuthScopes []string `json:"runtime_auth_scopes"`
+ RuntimeToken string `json:"runtime_token"`
+ AuthUUID string `json:"auth_uuid"`
+ Log string `json:"log"`
+ Cost float64 `json:"cost"`
+ SubrequestsCost float64 `json:"subrequests_cost"`
+ Service bool `json:"service"`
+ PublishedPorts map[string]PublishedPort `json:"published_ports"`
}
// ContainerRequest is an arvados#container_request resource.
type ContainerRequest struct {
- UUID string `json:"uuid"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- Href string `json:"href"`
- Etag string `json:"etag"`
- Name string `json:"name"`
- Description string `json:"description"`
- Properties map[string]interface{} `json:"properties"`
- State ContainerRequestState `json:"state"`
- RequestingContainerUUID string `json:"requesting_container_uuid"`
- ContainerUUID string `json:"container_uuid"`
- ContainerCountMax int `json:"container_count_max"`
- Mounts map[string]Mount `json:"mounts"`
- RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
- SchedulingParameters SchedulingParameters `json:"scheduling_parameters"`
- ContainerImage string `json:"container_image"`
- Environment map[string]string `json:"environment"`
- Cwd string `json:"cwd"`
- Command []string `json:"command"`
- OutputPath string `json:"output_path"`
- OutputName string `json:"output_name"`
- OutputTTL int `json:"output_ttl"`
- Priority int `json:"priority"`
- UseExisting bool `json:"use_existing"`
- LogUUID string `json:"log_uuid"`
- OutputUUID string `json:"output_uuid"`
- RuntimeToken string `json:"runtime_token"`
- ExpiresAt time.Time `json:"expires_at"`
- Filters []Filter `json:"filters"`
- ContainerCount int `json:"container_count"`
- OutputStorageClasses []string `json:"output_storage_classes"`
- OutputProperties map[string]interface{} `json:"output_properties"`
- CumulativeCost float64 `json:"cumulative_cost"`
+ UUID string `json:"uuid"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ ModifiedAt time.Time `json:"modified_at"`
+ Etag string `json:"etag"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Properties map[string]interface{} `json:"properties"`
+ State ContainerRequestState `json:"state"`
+ RequestingContainerUUID string `json:"requesting_container_uuid"`
+ ContainerUUID string `json:"container_uuid"`
+ ContainerCountMax int `json:"container_count_max"`
+ Mounts map[string]Mount `json:"mounts"`
+ RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
+ SchedulingParameters SchedulingParameters `json:"scheduling_parameters"`
+ ContainerImage string `json:"container_image"`
+ Environment map[string]string `json:"environment"`
+ Cwd string `json:"cwd"`
+ Command []string `json:"command"`
+ OutputPath string `json:"output_path"`
+ OutputGlob []string `json:"output_glob"`
+ OutputName string `json:"output_name"`
+ OutputTTL int `json:"output_ttl"`
+ Priority int `json:"priority"`
+ UseExisting bool `json:"use_existing"`
+ LogUUID string `json:"log_uuid"`
+ OutputUUID string `json:"output_uuid"`
+ RuntimeToken string `json:"runtime_token"`
+ ExpiresAt time.Time `json:"expires_at"`
+ Filters []Filter `json:"filters"`
+ ContainerCount int `json:"container_count"`
+ OutputStorageClasses []string `json:"output_storage_classes"`
+ OutputProperties map[string]interface{} `json:"output_properties"`
+ CumulativeCost float64 `json:"cumulative_cost"`
+ Service bool `json:"service"`
+ PublishedPorts map[string]RequestPublishedPort `json:"published_ports"`
}
// Mount is special behavior to attach to a filesystem path or device.
@@ -94,26 +97,25 @@ type Mount struct {
Content interface{} `json:"content"`
ExcludeFromOutput bool `json:"exclude_from_output"`
Capacity int64 `json:"capacity"`
- Commit string `json:"commit"` // only if kind=="git_tree"
- RepositoryName string `json:"repository_name"` // only if kind=="git_tree"
- GitURL string `json:"git_url"` // only if kind=="git_tree"
}
-type CUDARuntimeConstraints struct {
- DriverVersion string `json:"driver_version"`
- HardwareCapability string `json:"hardware_capability"`
- DeviceCount int `json:"device_count"`
+type GPURuntimeConstraints struct {
+ Stack string `json:"stack"`
+ DriverVersion string `json:"driver_version"`
+ HardwareTarget []string `json:"hardware_target"`
+ DeviceCount int `json:"device_count"`
+ VRAM int64 `json:"vram"`
}
// RuntimeConstraints specify a container's compute resources (RAM,
// CPU) and network connectivity.
type RuntimeConstraints struct {
- API bool `json:"API"`
- RAM int64 `json:"ram"`
- VCPUs int `json:"vcpus"`
- KeepCacheRAM int64 `json:"keep_cache_ram"`
- KeepCacheDisk int64 `json:"keep_cache_disk"`
- CUDA CUDARuntimeConstraints `json:"cuda"`
+ API bool `json:"API"`
+ RAM int64 `json:"ram"`
+ VCPUs int `json:"vcpus"`
+ KeepCacheRAM int64 `json:"keep_cache_ram"`
+ KeepCacheDisk int64 `json:"keep_cache_disk"`
+ GPU GPURuntimeConstraints `json:"gpu"`
}
// SchedulingParameters specify a container's scheduling parameters
@@ -166,3 +168,22 @@ type ContainerStatus struct {
State ContainerState `json:"container_state"`
SchedulingStatus string `json:"scheduling_status"`
}
+
+type PublishedPort struct {
+ RequestPublishedPort
+ BaseURL string `json:"base_url"`
+ InitialURL string `json:"initial_url"`
+}
+
+type RequestPublishedPort struct {
+ Access PublishedPortAccess `json:"access"`
+ Label string `json:"label"`
+ InitialPath string `json:"initial_path"`
+}
+
+type PublishedPortAccess string
+
+const (
+ PublishedPortAccessPrivate = PublishedPortAccess("private")
+ PublishedPortAccessPublic = PublishedPortAccess("public")
+)
diff --git a/sdk/go/arvados/container_gateway.go b/sdk/go/arvados/container_gateway.go
index 897ae434e1..51f3d3b8a4 100644
--- a/sdk/go/arvados/container_gateway.go
+++ b/sdk/go/arvados/container_gateway.go
@@ -11,27 +11,26 @@ import (
"sync"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
"github.com/sirupsen/logrus"
)
func (cresp ConnectionResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
defer cresp.Conn.Close()
- hj, ok := w.(http.Hijacker)
- if !ok {
- http.Error(w, "ResponseWriter does not support connection upgrade", http.StatusInternalServerError)
+ conn, bufrw, err := http.NewResponseController(w).Hijack()
+ if err != nil {
+ http.Error(w, "connection upgrade failed: "+err.Error(), http.StatusInternalServerError)
return
}
+ defer conn.Close()
+ conn.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n"))
w.Header().Set("Connection", "upgrade")
for k, v := range cresp.Header {
w.Header()[k] = v
}
- w.WriteHeader(http.StatusSwitchingProtocols)
- conn, bufrw, err := hj.Hijack()
- if err != nil {
- ctxlog.FromContext(req.Context()).WithError(err).Error("error hijacking ResponseWriter")
- return
- }
- defer conn.Close()
+ w.Header().Write(conn)
+ conn.Write([]byte("\r\n"))
+ httpserver.ExemptFromDeadline(req)
var bytesIn, bytesOut int64
ctx, cancel := context.WithCancel(req.Context())
diff --git a/sdk/go/arvados/credential.go b/sdk/go/arvados/credential.go
new file mode 100644
index 0000000000..f1c7fea13c
--- /dev/null
+++ b/sdk/go/arvados/credential.go
@@ -0,0 +1,32 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// Credential is an arvados#credential record
+type Credential struct {
+ UUID string `json:"uuid,omitempty"`
+ Etag string `json:"etag"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ CredentialClass string `json:"credential_class"`
+ Scopes []string `json:"scopes"`
+ ExternalId string `json:"external_id"`
+ Secret string `json:"secret,omitempty"`
+ ExpiresAt time.Time `json:"expires_at"`
+}
+
+// CredentialList is an arvados#credentialList resource.
+type CredentialList struct {
+ Items []Credential `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Offset int `json:"offset"`
+ Limit int `json:"limit"`
+}
diff --git a/sdk/go/arvados/fs_backend.go b/sdk/go/arvados/fs_backend.go
index cc4c32ffe9..e58037a38f 100644
--- a/sdk/go/arvados/fs_backend.go
+++ b/sdk/go/arvados/fs_backend.go
@@ -24,6 +24,7 @@ type keepBackend struct {
type keepClient interface {
ReadAt(locator string, p []byte, off int) (int, error)
+ BlockRead(context.Context, BlockReadOptions) (int, error)
BlockWrite(context.Context, BlockWriteOptions) (BlockWriteResponse, error)
LocalLocator(locator string) (string, error)
}
@@ -36,8 +37,9 @@ var errStubClient = errors.New("stub client")
type StubClient struct{}
-func (*StubClient) ReadAt(string, []byte, int) (int, error) { return 0, errStubClient }
-func (*StubClient) LocalLocator(loc string) (string, error) { return loc, nil }
+func (*StubClient) ReadAt(string, []byte, int) (int, error) { return 0, errStubClient }
+func (*StubClient) LocalLocator(loc string) (string, error) { return loc, nil }
+func (*StubClient) BlockRead(context.Context, BlockReadOptions) (int, error) { return 0, errStubClient }
func (*StubClient) BlockWrite(context.Context, BlockWriteOptions) (BlockWriteResponse, error) {
return BlockWriteResponse{}, errStubClient
}
diff --git a/sdk/go/arvados/fs_base.go b/sdk/go/arvados/fs_base.go
index 430a0d4c9b..6ba601b0c7 100644
--- a/sdk/go/arvados/fs_base.go
+++ b/sdk/go/arvados/fs_base.go
@@ -22,7 +22,6 @@ import (
var (
ErrReadOnlyFile = errors.New("read-only file")
ErrNegativeOffset = errors.New("cannot seek to negative offset")
- ErrFileExists = errors.New("file exists")
ErrInvalidOperation = errors.New("invalid operation")
ErrInvalidArgument = errors.New("invalid argument")
ErrDirectoryNotEmpty = errors.New("directory not empty")
@@ -536,7 +535,7 @@ func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*fileha
return nil, ErrInvalidArgument
}
} else if flag&os.O_EXCL != 0 {
- return nil, ErrFileExists
+ return nil, os.ErrExist
} else if flag&os.O_TRUNC != 0 {
if !writable {
return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode")
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 052cc1aa37..a2694a7049 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -8,11 +8,13 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
"path"
"regexp"
+ "slices"
"sort"
"strconv"
"strings"
@@ -37,6 +39,25 @@ type CollectionFileSystem interface {
// prepended to all paths in the returned manifest.
MarshalManifest(prefix string) (string, error)
+ // Given map {x->y}, replace each occurrence of x with y.
+ // Except: If segment x is not referenced anywhere in the
+ // collection, do not make any replacements that reference the
+ // same locator as y. The first return value is true if any
+ // substitutions were made.
+ ReplaceSegments(map[BlockSegment]BlockSegment) (bool, error)
+
+ // If needed, combine small stored blocks into larger blocks
+ // and update the in-memory representation to reference the
+ // larger blocks. Returns the number of (small) blocks that
+ // were replaced.
+ //
+ // After repacking, Sync() will persist the repacking results
+ // and load the server's latest version of the collection,
+ // reverting any other local changes. To repack without
+ // abandoning local changes, call Sync, then Repack, then Sync
+ // again.
+ Repack(context.Context, RepackOptions) (int, error)
+
// Total data bytes in all files.
Size() int64
}
@@ -49,6 +70,10 @@ type collectionFileSystem struct {
// PDH returned by the server as of last sync/load.
loadedPDH atomic.Value
+ // Time when the most recently synced version was retrieved
+ // from the server, if any. See checkChangesOnServer.
+ reloadTime time.Time
+ reloadTimeMtx sync.Mutex
// PDH of the locally generated manifest as of last
// sync/load. This can differ from loadedPDH after loading a
// version that was generated with different code and sorts
@@ -66,6 +91,11 @@ type collectionFileSystem struct {
guessSignatureTTL time.Duration
holdCheckChanges time.Time
lockCheckChanges sync.Mutex
+
+ // Pending updates to send via replace_segments. See
+ // repackTree().
+ repacked map[BlockSegment]BlockSegment
+ repackedMtx sync.Mutex
}
// FileSystem returns a CollectionFileSystem for the collection.
@@ -93,7 +123,13 @@ func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFile
name: ".",
mode: os.ModeDir | 0755,
modTime: modTime,
- sys: func() interface{} { return c },
+ sys: func() interface{} {
+ return &Collection{
+ UUID: fs.uuid,
+ PortableDataHash: fs.loadedPDH.Load().(string),
+ Properties: c.Properties,
+ }
+ },
},
inodes: make(map[string]inode),
},
@@ -334,6 +370,7 @@ func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {
loadedPDH, _ := fs.loadedPDH.Load().(string)
getparams := map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}}
if fs.uuid != "" {
+ reloadTime := time.Now()
var coll Collection
err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+fs.uuid, nil, getparams)
if err != nil {
@@ -343,6 +380,21 @@ func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {
// collection has changed upstream since we
// last loaded or saved. Refresh local data,
// losing any unsaved local changes.
+ fs.reloadTimeMtx.Lock()
+ defer fs.reloadTimeMtx.Unlock()
+ if fs.reloadTime.After(reloadTime) {
+ // Another goroutine called
+ // checkChangesOnServer after we
+ // started, and already updated the
+ // collection. This means their GET
+ // request started after our caller
+ // called Sync, so their response is
+ // new enough to be consistent with
+ // our semantics. The converse is not
+ // true, so the only safe option is to
+ // leave their update in place.
+ return true, nil
+ }
newfs, err := coll.FileSystem(fs.fileSystem.fsBackend, fs.fileSystem.fsBackend)
if err != nil {
return false, err
@@ -357,6 +409,7 @@ func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {
}
fs.loadedPDH.Store(coll.PortableDataHash)
fs.savedPDH.Store(newfs.(*collectionFileSystem).savedPDH.Load())
+ fs.reloadTime = reloadTime
return true, nil
}
fs.updateSignatures(coll.ManifestText)
@@ -412,6 +465,20 @@ func (fs *collectionFileSystem) refreshSignature(locator string) string {
}
func (fs *collectionFileSystem) Sync() error {
+ fs.repackedMtx.Lock()
+ if len(fs.repacked) > 0 {
+ err := fs.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+fs.uuid, nil, map[string]interface{}{
+ "select": []string{"portable_data_hash"},
+ "replace_segments": fs.repacked,
+ })
+ if err != nil {
+ fs.repackedMtx.Unlock()
+ return fmt.Errorf("sync failed: replace_segments %s: %w", fs.uuid, err)
+ }
+ fs.repacked = nil
+ }
+ fs.repackedMtx.Unlock()
+
refreshed, err := fs.checkChangesOnServer(true)
if err != nil {
return err
@@ -510,6 +577,296 @@ func (fs *collectionFileSystem) Splice(r inode) error {
return fs.fileSystem.root.Splice(r)
}
+func (fs *collectionFileSystem) Repack(ctx context.Context, opts RepackOptions) (int, error) {
+ return fs.repackTree(ctx, opts, fs.root.(*dirnode))
+}
+
+func (fs *collectionFileSystem) repackTree(ctx context.Context, opts RepackOptions, root *dirnode) (int, error) {
+ fs.fileSystem.root.Lock()
+ plan, err := fs.planRepack(ctx, opts, fs.root.(*dirnode))
+ fs.fileSystem.root.Unlock()
+ if err != nil {
+ return 0, err
+ }
+ if opts.DryRun {
+ return len(plan), nil
+ }
+ repacked, err := fs.repackData(ctx, plan)
+ if err != nil {
+ return 0, err
+ }
+ replaced, err := fs.replaceSegments(repacked)
+ if err != nil {
+ return 0, err
+ }
+ nReplaced := len(replaced)
+
+ fs.repackedMtx.Lock()
+ if len(repacked) == 0 {
+ // nothing to save
+ } else if len(fs.repacked) == 0 {
+ fs.repacked = repacked
+ } else {
+ // Merge new repacking results with existing unsaved
+ // fs.repacked map.
+ for orig, repl := range fs.repacked {
+ // If a previous repack saved
+ // fs.repacked[A]==B, and now we have
+ // repacked[B]==C, then next time we sync to
+ // the server, we should replace A with C
+ // instead of B. So we set repacked[A]=C.
+ if newrepl, ok := repacked[repl.StripAllHints()]; ok {
+ repacked[orig] = newrepl
+ } else {
+ repacked[orig] = repl
+ }
+ }
+ fs.repacked = repacked
+ }
+ fs.repackedMtx.Unlock()
+
+ return nReplaced, nil
+}
+
+func (fs *collectionFileSystem) ReplaceSegments(m map[BlockSegment]BlockSegment) (bool, error) {
+ changed, err := fs.replaceSegments(m)
+ return len(changed) > 0, err
+}
+
+func (fs *collectionFileSystem) replaceSegments(m map[BlockSegment]BlockSegment) (map[BlockSegment]BlockSegment, error) {
+ fs.fileSystem.root.Lock()
+ defer fs.fileSystem.root.Unlock()
+ missing := make(map[BlockSegment]bool, len(m))
+ for orig := range m {
+ orig.Locator = stripAllHints(orig.Locator)
+ missing[orig] = true
+ }
+ fs.fileSystem.root.(*dirnode).walkSegments(func(seg segment) segment {
+ if seg, ok := seg.(storedSegment); ok {
+ delete(missing, seg.blockSegment().StripAllHints())
+ }
+ return seg
+ })
+ skip := make(map[string]bool)
+ for orig, repl := range m {
+ orig.Locator = stripAllHints(orig.Locator)
+ if missing[orig] {
+ skip[repl.Locator] = true
+ }
+ }
+ todo := make(map[BlockSegment]storedSegment, len(m))
+ toks := make([][]byte, 3)
+ for orig, repl := range m {
+ if !skip[repl.Locator] {
+ orig.Locator = stripAllHints(orig.Locator)
+ if orig.Length != repl.Length {
+ return nil, fmt.Errorf("mismatched length: replacing segment length %d with segment length %d", orig.Length, repl.Length)
+ }
+ if splitToToks([]byte(repl.Locator), '+', toks) < 2 {
+ return nil, errors.New("invalid replacement locator")
+ }
+ blksize, err := strconv.ParseInt(string(toks[1]), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid size hint in replacement locator: %w", err)
+ }
+ if repl.Offset+repl.Length > int(blksize) {
+ return nil, fmt.Errorf("invalid replacement: offset %d + length %d > block size %d", repl.Offset, repl.Length, blksize)
+ }
+ todo[orig] = storedSegment{
+ locator: repl.Locator,
+ offset: repl.Offset,
+ size: int(blksize),
+ }
+ }
+ }
+ changed := make(map[BlockSegment]BlockSegment, len(todo))
+ fs.fileSystem.root.(*dirnode).walkSegments(func(s segment) segment {
+ seg, ok := s.(storedSegment)
+ if !ok {
+ return s
+ }
+ orig := seg.blockSegment().StripAllHints()
+ repl, ok := todo[orig]
+ if !ok {
+ return s
+ }
+ seg.locator = repl.locator
+ seg.offset = repl.offset
+ seg.size = repl.size
+ // (leave seg.kc and seg.length unchanged)
+ changed[orig] = seg.blockSegment()
+ return seg
+ })
+ return changed, nil
+}
+
+// See (*collectionFileSystem)planRepack.
+type repackBucketThreshold struct {
+ maxIn int
+ minOut int
+}
+
+var fullRepackBucketThresholds = []repackBucketThreshold{
+ {maxIn: 1 << 25, minOut: 1 << 25},
+}
+
+var repackBucketThresholds = []repackBucketThreshold{
+ {maxIn: 1 << 23, minOut: 1 << 25},
+ {maxIn: 1 << 21, minOut: 1 << 24},
+ {maxIn: 1 << 19, minOut: 1 << 22},
+ {maxIn: 1 << 17, minOut: 1 << 20},
+ {maxIn: 1 << 15, minOut: 1 << 18},
+ {maxIn: 1 << 13, minOut: 1 << 16},
+ {maxIn: 1 << 11, minOut: 1 << 14},
+ {maxIn: 1 << 9, minOut: 1 << 12},
+ {maxIn: 1 << 7, minOut: 1 << 10},
+ {maxIn: 1 << 5, minOut: 1 << 8},
+ {maxIn: 1 << 3, minOut: 1 << 6},
+}
+
+// Produce a list of segment merges that would result in a more
+// efficient packing. Each element in the returned plan is a slice of
+// 2+ segments with a combined length no greater than maxBlockSize.
+//
+// Caller must have lock on given root node.
+func (fs *collectionFileSystem) planRepack(ctx context.Context, opts RepackOptions, root *dirnode) (plan [][]storedSegment, err error) {
+ var thresholds []repackBucketThreshold
+ if opts.Full {
+ thresholds = fullRepackBucketThresholds
+ } else {
+ thresholds = repackBucketThresholds
+ }
+ // TODO: depending on opts, plan as if large but underutilized
+ // blocks are short blocks.
+ blockSize := make(map[string]int)
+ bucketBlocks := make([][]string, len(thresholds))
+ root.walkSegments(func(seg segment) segment {
+ if ss, ok := seg.(storedSegment); ok {
+ if opts.CachedOnly {
+ if _, err := ss.kc.BlockRead(ctx, BlockReadOptions{
+ Locator: ss.locator,
+ CheckCacheOnly: true,
+ WriteTo: io.Discard,
+ }); err != nil {
+ return seg
+ }
+ }
+ hash := stripAllHints(ss.locator)
+ if blockSize[hash] == 0 {
+ blockSize[hash] = ss.size
+ for bucket, threshold := range thresholds {
+ if ss.size >= threshold.maxIn {
+ break
+ }
+ bucketBlocks[bucket] = append(bucketBlocks[bucket], hash)
+ }
+ }
+ }
+ return seg
+ })
+ // blockPlan[oldhash] == idx means plan[idx] will merge all
+ // segments in into a new block.
+ blockPlan := make(map[string]int)
+ pending := []string{}
+ for bucket := range bucketBlocks {
+ pending = pending[:0]
+ pendingSize := 0
+ for _, hash := range bucketBlocks[bucket] {
+ if _, planned := blockPlan[hash]; planned || slices.Contains(pending, hash) {
+ // already planned to merge this block
+ continue
+ }
+ size := blockSize[hash]
+ if pendingSize+size > maxBlockSize {
+ for _, hash := range pending {
+ blockPlan[hash] = len(plan)
+ }
+ plan = append(plan, nil)
+ pending = pending[:0]
+ pendingSize = 0
+ }
+ pendingSize += size
+ pending = append(pending, hash)
+ }
+ if pendingSize >= thresholds[bucket].minOut {
+ for _, hash := range pending {
+ blockPlan[hash] = len(plan)
+ }
+ plan = append(plan, nil)
+ }
+ }
+ // We have decided which blocks to merge. Now we collect all
+ // of the segments that reference those blocks, and return
+ // that as the final plan.
+ done := make(map[storedSegment]bool)
+ root.walkSegments(func(seg segment) segment {
+ ss, ok := seg.(storedSegment)
+ if !ok {
+ return seg
+ }
+ hash := stripAllHints(ss.locator)
+ if idx, planning := blockPlan[hash]; planning && !done[ss] {
+ plan[idx] = append(plan[idx], ss)
+ done[ss] = true
+ }
+ return seg
+ })
+ return plan, nil
+}
+
+// Given a plan returned by planRepack, write new blocks with the
+// merged segment data, and return a replacement mapping suitable for
+// ReplaceSegments.
+func (fs *collectionFileSystem) repackData(ctx context.Context, plan [][]storedSegment) (repl map[BlockSegment]BlockSegment, err error) {
+ if len(plan) == 0 {
+ return
+ }
+ repl = make(map[BlockSegment]BlockSegment)
+ for _, insegments := range plan {
+ // TODO: concurrency > 1
+ outsize := 0
+ for _, insegment := range insegments {
+ outsize += insegment.length
+ }
+ if outsize > maxBlockSize {
+ return nil, fmt.Errorf("combined length %d would exceed maximum block size %d", outsize, maxBlockSize)
+ }
+ piper, pipew := io.Pipe()
+ go func() {
+ for _, insegment := range insegments {
+ n, err := io.Copy(pipew, io.NewSectionReader(insegment, 0, int64(insegment.length)))
+ if err != nil {
+ pipew.CloseWithError(err)
+ return
+ }
+ if n != int64(insegment.length) {
+ pipew.CloseWithError(fmt.Errorf("internal error: copied %d bytes, expected %d", n, insegment.length))
+ return
+ }
+ if ctx.Err() != nil {
+ pipew.CloseWithError(ctx.Err())
+ return
+ }
+ }
+ pipew.Close()
+ }()
+ wrote, err := fs.BlockWrite(ctx, BlockWriteOptions{Reader: piper, DataSize: outsize})
+ if err != nil {
+ return nil, err
+ }
+ offset := 0
+ for _, insegment := range insegments {
+ repl[insegment.blockSegment().StripAllHints()] = BlockSegment{
+ Locator: wrote.Locator,
+ Offset: offset,
+ Length: insegment.length,
+ }
+ offset += insegment.length
+ }
+ }
+ return
+}
+
// filenodePtr is an offset into a file that is (usually) efficient to
// seek to. Specifically, if filenode.repacked==filenodePtr.repacked
// then
@@ -946,10 +1303,15 @@ func (fn *filenode) Snapshot() (inode, error) {
for _, seg := range fn.segments {
segments = append(segments, seg.Slice(0, seg.Len()))
}
- return &filenode{
+ newfn := &filenode{
fileinfo: fn.fileinfo,
segments: segments,
- }, nil
+ }
+ // Clear references to the original filesystem, otherwise the
+ // snapshot will prevent the old filesystem from being garbage
+ // collected.
+ newfn.setFS(nil)
+ return newfn, nil
}
func (fn *filenode) Splice(repl inode) error {
@@ -973,13 +1335,24 @@ func (fn *filenode) Splice(repl inode) error {
case *filenode:
repl.parent = fn.parent
repl.fileinfo.name = fn.fileinfo.name
- repl.fs = fn.fs
+ repl.setFS(fn.fs)
default:
return fmt.Errorf("cannot splice snapshot containing %T: %w", repl, ErrInvalidArgument)
}
return nil
}
+// Caller must have lock.
+func (fn *filenode) setFS(fs *collectionFileSystem) {
+ fn.fs = fs
+ for i, seg := range fn.segments {
+ if ss, ok := seg.(storedSegment); ok {
+ ss.kc = fs
+ fn.segments[i] = ss
+ }
+ }
+}
+
type dirnode struct {
fs *collectionFileSystem
treenode
@@ -1351,6 +1724,25 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, flush boo
return rootdir + strings.Join(subdirs, ""), err
}
+// splitToToks is similar to bytes.SplitN(token, []byte{c}, 3), but
+// splits into the toks slice rather than allocating a new one, and
+// returns the number of toks (1, 2, or 3).
+func splitToToks(src []byte, c rune, toks [][]byte) int {
+ c1 := bytes.IndexRune(src, c)
+ if c1 < 0 {
+ toks[0] = src
+ return 1
+ }
+ toks[0], src = src[:c1], src[c1+1:]
+ c2 := bytes.IndexRune(src, c)
+ if c2 < 0 {
+ toks[1] = src
+ return 2
+ }
+ toks[1], toks[2] = src[:c2], src[c2+1:]
+ return 3
+}
+
func (dn *dirnode) loadManifest(txt string) error {
streams := bytes.Split([]byte(txt), []byte{'\n'})
if len(streams[len(streams)-1]) != 0 {
@@ -1358,6 +1750,10 @@ func (dn *dirnode) loadManifest(txt string) error {
}
streams = streams[:len(streams)-1]
segments := []storedSegment{}
+ // streamoffset[n] is the position in the stream of the nth
+ // block, i.e., â segments[j].size â 0â¤j offset {
- // Can't continue where we left off.
- // TODO: binary search instead of
- // rewinding all the way (but this
- // situation might be rare anyway)
- segIdx, pos = 0, 0
+ if segIdx < len(segments) && streamoffset[segIdx] <= offset && streamoffset[segIdx+1] > offset {
+ // common case with an easy
+ // optimization: start where the
+ // previous segment ended
+ } else if guess := int(offset >> 26); guess >= 0 && guess < len(segments) && streamoffset[guess] <= offset && streamoffset[guess+1] > offset {
+ // another common case with an easy
+ // optimization: all blocks are 64 MiB
+ // (or close enough)
+ segIdx = guess
+ } else {
+ // general case
+ segIdx = sort.Search(len(segments), func(i int) bool {
+ return streamoffset[i+1] > offset
+ })
}
for ; segIdx < len(segments); segIdx++ {
- seg := segments[segIdx]
- next := pos + int64(seg.Len())
- if next <= offset || seg.Len() == 0 {
- pos = next
- continue
- }
- if pos >= offset+length {
+ blkStart := streamoffset[segIdx]
+ if blkStart >= offset+length {
break
}
+ seg := &segments[segIdx]
+ if seg.size == 0 {
+ continue
+ }
var blkOff int
- if pos < offset {
- blkOff = int(offset - pos)
+ if blkStart < offset {
+ blkOff = int(offset - blkStart)
}
- blkLen := seg.Len() - blkOff
- if pos+int64(blkOff+blkLen) > offset+length {
- blkLen = int(offset + length - pos - int64(blkOff))
+ blkLen := seg.size - blkOff
+ if blkStart+int64(seg.size) > offset+length {
+ blkLen = int(offset + length - blkStart - int64(blkOff))
}
fnode.appendSegment(storedSegment{
kc: dn.fs,
@@ -1482,14 +1877,9 @@ func (dn *dirnode) loadManifest(txt string) error {
offset: blkOff,
length: blkLen,
})
- if next > offset+length {
- break
- } else {
- pos = next
- }
}
- if segIdx == len(segments) && pos < offset+length {
- return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
+ if segIdx == len(segments) && streamoffset[segIdx] < offset+length {
+ return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, streamoffset[segIdx], token)
}
}
if !anyFileTokens {
@@ -1537,7 +1927,7 @@ func (dn *dirnode) createFileAndParents(names []string) (fn *filenode, err error
child.SetParent(node, name)
return child, nil
} else if !child.IsDir() {
- return child, ErrFileExists
+ return child, os.ErrExist
} else {
return child, nil
}
@@ -1627,6 +2017,9 @@ func (dn *dirnode) Splice(repl inode) error {
dn.Lock()
defer dn.Unlock()
dn.inodes = repl.inodes
+ for name, child := range dn.inodes {
+ child.SetParent(dn, name)
+ }
dn.setTreeFS(dn.fs)
case *filenode:
dn.parent.Lock()
@@ -1654,7 +2047,7 @@ func (dn *dirnode) Splice(repl inode) error {
if err != nil {
return fmt.Errorf("error replacing filenode: dn.parent.Child(): %w", err)
}
- repl.fs = dn.fs
+ repl.setFS(dn.fs)
}
return nil
}
@@ -1666,11 +2059,39 @@ func (dn *dirnode) setTreeFS(fs *collectionFileSystem) {
case *dirnode:
child.setTreeFS(fs)
case *filenode:
- child.fs = fs
+ child.setFS(fs)
}
}
}
+// walkSegments visits all file data in the tree beneath dn, calling
+// fn on each segment and replacing it with fn's return value.
+//
+// caller must have lock.
+func (dn *dirnode) walkSegments(fn func(segment) segment) {
+ // Visit all segments in files, then traverse subdirectories.
+ // This way planRepack will tend to repack siblings together.
+ names := dn.sortedNames()
+ for _, name := range names {
+ child := dn.inodes[name]
+ child.Lock()
+ if child, ok := child.(*filenode); ok {
+ for i, seg := range child.segments {
+ child.segments[i] = fn(seg)
+ }
+ }
+ child.Unlock()
+ }
+ for _, name := range names {
+ child := dn.inodes[name]
+ child.Lock()
+ if child, ok := child.(*dirnode); ok {
+ child.walkSegments(fn)
+ }
+ child.Unlock()
+ }
+}
+
type segment interface {
io.ReaderAt
Len() int
@@ -1801,6 +2222,14 @@ func (se storedSegment) memorySize() int64 {
return 64 + int64(len(se.locator))
}
+func (se storedSegment) blockSegment() BlockSegment {
+ return BlockSegment{
+ Locator: se.locator,
+ Offset: se.offset,
+ Length: se.length,
+ }
+}
+
func canonicalName(name string) string {
name = path.Clean("/" + name)
if name == "/" || name == "./" {
diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go
index a29371b76c..35fbcd3269 100644
--- a/sdk/go/arvados/fs_collection_test.go
+++ b/sdk/go/arvados/fs_collection_test.go
@@ -11,10 +11,13 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"io/ioutil"
"math/rand"
"net/http"
"os"
+ "os/exec"
+ "path/filepath"
"regexp"
"runtime"
"runtime/pprof"
@@ -32,11 +35,12 @@ var _ = check.Suite(&CollectionFSSuite{})
type keepClientStub struct {
blocks map[string][]byte
refreshable map[string]bool
- reads []string // locators from ReadAt() calls
- onWrite func(bufcopy []byte) // called from WriteBlock, before acquiring lock
- authToken string // client's auth token (used for signing locators)
- sigkey string // blob signing key
- sigttl time.Duration // blob signing ttl
+ cached map[string]bool
+ reads []string // locators from ReadAt() calls
+ onWrite func(bufcopy []byte) error // called from WriteBlock, before acquiring lock
+ authToken string // client's auth token (used for signing locators)
+ sigkey string // blob signing key
+ sigttl time.Duration // blob signing ttl
sync.RWMutex
}
@@ -58,15 +62,47 @@ func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error
return copy(p, buf[off:]), nil
}
+func (kcs *keepClientStub) BlockRead(_ context.Context, opts BlockReadOptions) (int, error) {
+ kcs.Lock()
+ kcs.reads = append(kcs.reads, opts.Locator)
+ kcs.Unlock()
+ kcs.RLock()
+ defer kcs.RUnlock()
+ if opts.CheckCacheOnly {
+ if kcs.cached[opts.Locator[:32]] {
+ return 0, nil
+ } else {
+ return 0, ErrNotCached
+ }
+ }
+ if err := VerifySignature(opts.Locator, kcs.authToken, kcs.sigttl, []byte(kcs.sigkey)); err != nil {
+ return 0, err
+ }
+ buf := kcs.blocks[opts.Locator[:32]]
+ if buf == nil {
+ return 0, errStub404
+ }
+ n, err := io.Copy(opts.WriteTo, bytes.NewReader(buf))
+ return int(n), err
+}
+
func (kcs *keepClientStub) BlockWrite(_ context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {
+ var buf []byte
if opts.Data == nil {
- panic("oops, stub is not made for this")
+ buf = make([]byte, opts.DataSize)
+ _, err := io.ReadFull(opts.Reader, buf)
+ if err != nil {
+ return BlockWriteResponse{}, err
+ }
+ } else {
+ buf = append([]byte(nil), opts.Data...)
}
- locator := SignLocator(fmt.Sprintf("%x+%d", md5.Sum(opts.Data), len(opts.Data)), kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey))
- buf := make([]byte, len(opts.Data))
- copy(buf, opts.Data)
+ locator := SignLocator(fmt.Sprintf("%x+%d", md5.Sum(buf), len(buf)), kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey))
if kcs.onWrite != nil {
- kcs.onWrite(buf)
+ err := kcs.onWrite(buf)
+ if err != nil {
+ return BlockWriteResponse{}, err
+ }
}
for _, sc := range opts.StorageClasses {
if sc != "default" {
@@ -319,7 +355,7 @@ func (s *CollectionFSSuite) TestCreateFile(c *check.C) {
func (s *CollectionFSSuite) TestReadWriteFile(c *check.C) {
maxBlockSize = 8
- defer func() { maxBlockSize = 2 << 26 }()
+ defer func() { maxBlockSize = 1 << 26 }()
f, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
c.Assert(err, check.IsNil)
@@ -532,7 +568,7 @@ func (s *CollectionFSSuite) TestMarshalCopiesRemoteBlocks(c *check.C) {
func (s *CollectionFSSuite) TestMarshalSmallBlocks(c *check.C) {
maxBlockSize = 8
- defer func() { maxBlockSize = 2 << 26 }()
+ defer func() { maxBlockSize = 1 << 26 }()
var err error
s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
@@ -653,7 +689,7 @@ func (s *CollectionFSSuite) TestConcurrentWriters(c *check.C) {
func (s *CollectionFSSuite) TestRandomWrites(c *check.C) {
maxBlockSize = 40
- defer func() { maxBlockSize = 2 << 26 }()
+ defer func() { maxBlockSize = 1 << 26 }()
var err error
s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
@@ -883,7 +919,7 @@ func (s *CollectionFSSuite) TestRename(c *check.C) {
func (s *CollectionFSSuite) TestPersist(c *check.C) {
maxBlockSize = 1024
- defer func() { maxBlockSize = 2 << 26 }()
+ defer func() { maxBlockSize = 1 << 26 }()
var err error
s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
@@ -1126,7 +1162,7 @@ func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {
proceed := make(chan struct{})
var started, concurrent int32
blk2done := false
- s.kc.onWrite = func([]byte) {
+ s.kc.onWrite = func([]byte) error {
atomic.AddInt32(&concurrent, 1)
switch atomic.AddInt32(&started, 1) {
case 1:
@@ -1146,6 +1182,7 @@ func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {
time.Sleep(time.Millisecond)
}
c.Check(atomic.AddInt32(&concurrent, -1) < int32(concurrentWriters), check.Equals, true)
+ return nil
}
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
@@ -1192,13 +1229,14 @@ func (s *CollectionFSSuite) TestFlushAll(c *check.C) {
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
- s.kc.onWrite = func([]byte) {
+ s.kc.onWrite = func([]byte) error {
// discard flushed data -- otherwise the stub will use
// unlimited memory
time.Sleep(time.Millisecond)
s.kc.Lock()
defer s.kc.Unlock()
s.kc.blocks = map[string][]byte{}
+ return nil
}
for i := 0; i < 256; i++ {
buf := bytes.NewBuffer(make([]byte, 524288))
@@ -1236,8 +1274,9 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
c.Assert(err, check.IsNil)
var flushed int64
- s.kc.onWrite = func(p []byte) {
+ s.kc.onWrite = func(p []byte) error {
atomic.AddInt64(&flushed, int64(len(p)))
+ return nil
}
nDirs := int64(8)
@@ -1308,7 +1347,7 @@ func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {
time.AfterFunc(10*time.Second, func() { close(timeout) })
var putCount, concurrency int64
var unflushed int64
- s.kc.onWrite = func(p []byte) {
+ s.kc.onWrite = func(p []byte) error {
defer atomic.AddInt64(&unflushed, -int64(len(p)))
cur := atomic.AddInt64(&concurrency, 1)
defer atomic.AddInt64(&concurrency, -1)
@@ -1328,6 +1367,7 @@ func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {
}
c.Assert(cur <= int64(concurrentWriters), check.Equals, true)
c.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true)
+ return nil
}
var owg sync.WaitGroup
@@ -1371,13 +1411,14 @@ func (s *CollectionFSSuite) TestFlushStress(c *check.C) {
})
wrote := 0
- s.kc.onWrite = func(p []byte) {
+ s.kc.onWrite = func(p []byte) error {
s.kc.Lock()
s.kc.blocks = map[string][]byte{}
wrote++
defer c.Logf("wrote block %d, %d bytes", wrote, len(p))
s.kc.Unlock()
time.Sleep(20 * time.Millisecond)
+ return nil
}
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
@@ -1402,10 +1443,11 @@ func (s *CollectionFSSuite) TestFlushStress(c *check.C) {
}
func (s *CollectionFSSuite) TestFlushShort(c *check.C) {
- s.kc.onWrite = func([]byte) {
+ s.kc.onWrite = func([]byte) error {
s.kc.Lock()
s.kc.blocks = map[string][]byte{}
s.kc.Unlock()
+ return nil
}
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
@@ -1477,6 +1519,643 @@ func (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) {
}
}
+var fakeLocator = func() []string {
+ locs := make([]string, 10)
+ for i := range locs {
+ locs[i] = fmt.Sprintf("%x+%d", md5.Sum(make([]byte, i)), i)
+ if i%2 == 1 {
+ locs[i] += "+Awhatever+Zotherhints"
+ }
+ }
+ return locs
+}()
+
+func (s *CollectionFSSuite) TestReplaceSegments_HappyPath(c *check.C) {
+ fs, err := (&Collection{
+ ManifestText: ". " + fakeLocator[1] + " " + fakeLocator[2] + " 0:3:file3\n",
+ }).FileSystem(nil, &keepClientStub{})
+ c.Assert(err, check.IsNil)
+ changed, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{
+ BlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},
+ BlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 1, 2},
+ })
+ c.Check(changed, check.Equals, true)
+ c.Check(err, check.IsNil)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ c.Check(mtxt, check.Equals, ". "+fakeLocator[3]+" 0:3:file3\n")
+}
+
+func (s *CollectionFSSuite) TestReplaceSegments_InvalidOffset(c *check.C) {
+ origtxt := ". " + fakeLocator[1] + " " + fakeLocator[2] + " 0:3:file3\n"
+ fs, err := (&Collection{
+ ManifestText: origtxt,
+ }).FileSystem(nil, &keepClientStub{})
+ c.Assert(err, check.IsNil)
+ changed, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{
+ BlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},
+ BlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 2, 2},
+ })
+ c.Check(changed, check.Equals, false)
+ c.Check(err, check.ErrorMatches, `invalid replacement: offset 2 \+ length 2 > block size 3`)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ c.Check(mtxt, check.Equals, origtxt)
+}
+
+func (s *CollectionFSSuite) TestReplaceSegments_LengthMismatch(c *check.C) {
+ origtxt := ". " + fakeLocator[1] + " " + fakeLocator[2] + " 0:3:file3\n"
+ fs, err := (&Collection{
+ ManifestText: origtxt,
+ }).FileSystem(nil, &keepClientStub{})
+ c.Assert(err, check.IsNil)
+ changed, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{
+ BlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},
+ BlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 0, 3},
+ })
+ c.Check(changed, check.Equals, false)
+ c.Check(err, check.ErrorMatches, `mismatched length: replacing segment length 2 with segment length 3`)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ c.Check(mtxt, check.Equals, origtxt)
+}
+
+func (s *CollectionFSSuite) TestReplaceSegments_SkipUnreferenced(c *check.C) {
+ fs, err := (&Collection{
+ ManifestText: ". " + fakeLocator[1] + " " + fakeLocator[2] + " " + fakeLocator[3] + " 0:6:file6\n",
+ }).FileSystem(nil, &keepClientStub{})
+ c.Assert(err, check.IsNil)
+ changed, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{
+ BlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[4], 0, 1}, // skipped because [5] unref
+ BlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[4], 1, 2}, // skipped because [5] unref
+ BlockSegment{fakeLocator[5], 0, 2}: BlockSegment{fakeLocator[4], 1, 2}, // [5] unreferenced in orig manifest
+ BlockSegment{fakeLocator[3], 0, 3}: BlockSegment{fakeLocator[6], 3, 3}, // applied
+ })
+ c.Check(changed, check.Equals, true)
+ c.Check(err, check.IsNil)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ c.Check(mtxt, check.Equals, ". "+fakeLocator[1]+" "+fakeLocator[2]+" "+fakeLocator[6]+" 0:3:file6 6:3:file6\n")
+}
+
+func (s *CollectionFSSuite) TestReplaceSegments_SkipIncompleteSegment(c *check.C) {
+ origtxt := ". " + fakeLocator[2] + " " + fakeLocator[3] + " 0:5:file5\n"
+ fs, err := (&Collection{
+ ManifestText: origtxt,
+ }).FileSystem(nil, &keepClientStub{})
+ c.Assert(err, check.IsNil)
+ changed, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{
+ BlockSegment{fakeLocator[2], 0, 1}: BlockSegment{fakeLocator[4], 0, 1}, // length=1 does not match the length=2 segment
+ })
+ c.Check(changed, check.Equals, false)
+ c.Check(err, check.IsNil)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ c.Check(mtxt, check.Equals, origtxt)
+}
+
+func (s *CollectionFSSuite) testPlanRepack(c *check.C, opts RepackOptions, manifest string, expectPlan [][]storedSegment) {
+ fs, err := (&Collection{ManifestText: manifest}).FileSystem(nil, s.kc)
+ c.Assert(err, check.IsNil)
+ cfs := fs.(*collectionFileSystem)
+ repl, err := cfs.planRepack(context.Background(), opts, cfs.root.(*dirnode))
+ c.Assert(err, check.IsNil)
+
+ // we always expect kc==cfs, so we fill this in instead of
+ // requiring each test case to repeat it
+ for _, pp := range expectPlan {
+ for i := range pp {
+ pp[i].kc = cfs
+ }
+ }
+ c.Check(repl, check.DeepEquals, expectPlan)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x32M(c *check.C) {
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000", size: 32000000, length: 32000000, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000", size: 32000000, length: 32000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x32M_Cached(c *check.C) {
+ s.kc.cached = map[string]bool{
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": true,
+ "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": true,
+ }
+ s.testPlanRepack(c,
+ RepackOptions{Full: true, CachedOnly: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000", size: 32000000, length: 32000000, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000", size: 32000000, length: 32000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x32M_OneCached(c *check.C) {
+ s.kc.cached = map[string]bool{
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": true,
+ }
+ s.testPlanRepack(c,
+ RepackOptions{Full: true, CachedOnly: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\n",
+ nil)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_3x32M_TwoCached(c *check.C) {
+ s.kc.cached = map[string]bool{
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": true,
+ "cccccccccccccccccccccccccccccccc": true,
+ }
+ s.testPlanRepack(c,
+ RepackOptions{Full: true, CachedOnly: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 cccccccccccccccccccccccccccccccc+32000000 0:96000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000", size: 32000000, length: 32000000, offset: 0},
+ {locator: "cccccccccccccccccccccccccccccccc+32000000", size: 32000000, length: 32000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x32Mi(c *check.C) {
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554432 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554432 0:67108864:file\n",
+ nil)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x32MiMinus1(c *check.C) {
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554431 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554431 0:67108862:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554431", size: 33554431, length: 33554431, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554431", size: 33554431, length: 33554431, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_3x32M(c *check.C) {
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 cccccccccccccccccccccccccccccccc+32000000 0:96000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000", size: 32000000, length: 32000000, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000", size: 32000000, length: 32000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_3x42M(c *check.C) {
+ // Each block is more than half full, so do nothing.
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+42000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+42000000 cccccccccccccccccccccccccccccccc+42000000 0:126000000:file\n",
+ nil)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_Premature(c *check.C) {
+ // Repacking would reduce to one block, but it would still be
+ // too short to be worthwhile, so do nothing.
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+123 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+123 cccccccccccccccccccccccccccccccc+123 0:369:file\n",
+ nil)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_4x22M_NonAdjacent(c *check.C) {
+ // Repack the first three 22M blocks into one 66M block.
+ // Don't touch the 44M blocks or the final 22M block.
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+44000000 cccccccccccccccccccccccccccccccc+22000000 dddddddddddddddddddddddddddddddd+44000000 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee+22000000 ffffffffffffffffffffffffffffffff+44000000 00000000000000000000000000000000+22000000 0:220000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000", size: 22000000, length: 22000000, offset: 0},
+ {locator: "cccccccccccccccccccccccccccccccc+22000000", size: 22000000, length: 22000000, offset: 0},
+ {locator: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee+22000000", size: 22000000, length: 22000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x22M_DuplicateBlock(c *check.C) {
+ // Repack a+b+c, not a+b+a.
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+22000000 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 0:66000000:file\n"+
+ "./dir cccccccccccccccccccccccccccccccc+22000000 0:22000000:file\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000", size: 22000000, length: 22000000, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+22000000", size: 22000000, length: 22000000, offset: 0},
+ {locator: "cccccccccccccccccccccccccccccccc+22000000", size: 22000000, length: 22000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_2x22M_DuplicateBlock_TooShort(c *check.C) {
+ // Repacking a+b would not meet the 32MiB threshold.
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 0:44000001:file\n",
+ nil)
+}
+
+func (s *CollectionFSSuite) TestPlanRepack_SiblingsTogether(c *check.C) {
+ // Pack sibling files' ("a" and "c") segments together before
+ // other subdirs ("b/b"), even though subdir "b" sorts between
+ // "a" and "c".
+ s.testPlanRepack(c,
+ RepackOptions{Full: true},
+ ". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+15000000 cccccccccccccccccccccccccccccccc+15000000 0:15000000:a 15000000:15000000:c\n"+
+ "./b bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15000000 0:15000000:b\n",
+ [][]storedSegment{
+ {
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+15000000", size: 15000000, length: 15000000, offset: 0},
+ {locator: "cccccccccccccccccccccccccccccccc+15000000", size: 15000000, length: 15000000, offset: 0},
+ {locator: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15000000", size: 15000000, length: 15000000, offset: 0},
+ },
+ })
+}
+
+func (s *CollectionFSSuite) TestRepackData(c *check.C) {
+ fs, err := (&Collection{}).FileSystem(nil, s.kc)
+ c.Assert(err, check.IsNil)
+ cfs := fs.(*collectionFileSystem)
+
+ testBlockWritten := make(map[int]string)
+ // testSegment(N) returns an N-byte segment of a block
+ // containing repeated byte N%256. The segment's offset
+ // within the block is N/1000000 (*). The block also has
+ // N/1000000 null bytes following the segment(*).
+ //
+ // If N=404, the block is not readable.
+ //
+ // (*) ...unless that would result in an oversize block.
+ testSegment := func(testSegmentNum int) storedSegment {
+ length := testSegmentNum
+ offset := testSegmentNum / 1000000
+ if offset+length > maxBlockSize {
+ offset = 0
+ }
+ size := testSegmentNum + offset
+ if size+offset <= maxBlockSize {
+ size += offset
+ }
+ if _, stored := testBlockWritten[testSegmentNum]; !stored {
+ data := make([]byte, size)
+ for b := range data[offset : offset+length] {
+ data[b] = byte(testSegmentNum & 0xff)
+ }
+ resp, err := s.kc.BlockWrite(context.Background(), BlockWriteOptions{Data: data})
+ c.Assert(err, check.IsNil)
+ testBlockWritten[testSegmentNum] = resp.Locator
+ if testSegmentNum == 404 {
+ delete(s.kc.blocks, resp.Locator[:32])
+ }
+ }
+ return storedSegment{
+ kc: cfs,
+ locator: testBlockWritten[testSegmentNum],
+ size: size,
+ length: length,
+ offset: offset,
+ }
+ }
+ for trialIndex, trial := range []struct {
+ label string
+ // "input" here has the same shape as repackData's
+ // [][]storedSegment argument, but uses int N has
+ // shorthand for testSegment(N).
+ input [][]int
+ onWrite func([]byte) error
+ expectRepackedLen int
+ expectErrorMatches string
+ }{
+ {
+ label: "one {3 blocks to 1} merge",
+ input: [][]int{{1, 2, 3}},
+ expectRepackedLen: 3,
+ },
+ {
+ label: "two {3 blocks to 1} merges",
+ input: [][]int{{1, 2, 3}, {4, 5, 6}},
+ expectRepackedLen: 6,
+ },
+ {
+ label: "merge two {3 blocks to 1} merges",
+ input: [][]int{{1, 2, 3}, {4, 5, 6}},
+ expectRepackedLen: 6,
+ },
+ {
+ label: "no-op",
+ input: nil,
+ expectRepackedLen: 0,
+ },
+ {
+ label: "merge 3 blocks plus a zero-length segment -- not expected to be used, but should work",
+ input: [][]int{{1, 2, 0, 3}},
+ expectRepackedLen: 4,
+ },
+ {
+ label: "merge a single segment -- not expected to be used, but should work",
+ input: [][]int{{12345}},
+ expectRepackedLen: 1,
+ },
+ {
+ label: "merge a single empty segment -- not expected to be used, but should work",
+ input: [][]int{{0}},
+ expectRepackedLen: 1,
+ },
+ {
+ label: "merge zero segments -- not expected to be used, but should work",
+ input: [][]int{{}},
+ expectRepackedLen: 0,
+ },
+ {
+ label: "merge same orig segment into two different replacements -- not expected to be used, but should work",
+ input: [][]int{{1, 22, 3}, {4, 22, 6}},
+ expectRepackedLen: 5,
+ },
+ {
+ label: "identical merges -- not expected to be used, but should work",
+ input: [][]int{{11, 22, 33}, {11, 22, 33}},
+ expectRepackedLen: 3,
+ },
+ {
+ label: "read error on first segment",
+ input: [][]int{{404, 2, 3}},
+ expectRepackedLen: 0,
+ expectErrorMatches: "404 block not found",
+ },
+ {
+ label: "read error on second segment",
+ input: [][]int{{1, 404, 3}},
+ expectErrorMatches: "404 block not found",
+ },
+ {
+ label: "read error on last segment",
+ input: [][]int{{1, 2, 404}},
+ expectErrorMatches: "404 block not found",
+ },
+ {
+ label: "merge does not fit in one block",
+ input: [][]int{{50000000, 20000000}},
+ expectErrorMatches: "combined length 70000000 would exceed maximum block size 67108864",
+ },
+ {
+ label: "write error",
+ input: [][]int{{1, 2, 3}},
+ onWrite: func(p []byte) error { return errors.New("stub write error") },
+ expectErrorMatches: "stub write error",
+ },
+ } {
+ c.Logf("trial %d: %s", trialIndex, trial.label)
+ var input [][]storedSegment
+ for _, seglist := range trial.input {
+ var segments []storedSegment
+ for _, segnum := range seglist {
+ segments = append(segments, testSegment(segnum))
+ }
+ input = append(input, segments)
+ }
+ s.kc.onWrite = trial.onWrite
+ repacked, err := cfs.repackData(context.Background(), input)
+ if trial.expectErrorMatches != "" {
+ c.Check(err, check.ErrorMatches, trial.expectErrorMatches)
+ continue
+ }
+ c.Assert(err, check.IsNil)
+ c.Check(repacked, check.HasLen, trial.expectRepackedLen)
+ for _, origSegments := range input {
+ replLocator := ""
+ for _, origSegment := range origSegments {
+ origBlock := BlockSegment{
+ Locator: stripAllHints(origSegment.locator),
+ Length: origSegment.length,
+ Offset: origSegment.offset,
+ }
+ buf := make([]byte, origSegment.size)
+ n, err := cfs.ReadAt(repacked[origBlock].Locator, buf, repacked[origBlock].Offset)
+ c.Assert(err, check.IsNil)
+ c.Check(n, check.Equals, len(buf))
+ expectContent := byte(origSegment.length & 0xff)
+ for segoffset, b := range buf {
+ if b != expectContent {
+ c.Errorf("content mismatch: origSegment.locator %s -> replLocator %s offset %d: byte %d is %d, expected %d", origSegment.locator, replLocator, repacked[origBlock].Offset, segoffset, b, expectContent)
+ break
+ }
+ }
+ }
+ }
+ }
+}
+
+type dataToWrite struct {
+ path string
+ data func() []byte
+}
+
+func dataToWrite_SourceTree(c *check.C, maxfiles int) (writes []dataToWrite) {
+ gitdir, err := filepath.Abs("../../..")
+ c.Assert(err, check.IsNil)
+ infs := os.DirFS(gitdir)
+ buf, err := exec.Command("git", "-C", gitdir, "ls-files").CombinedOutput()
+ c.Assert(err, check.IsNil, check.Commentf("%s", buf))
+ for _, path := range bytes.Split(buf, []byte("\n")) {
+ path := string(path)
+ if path == "" ||
+ strings.HasPrefix(path, "tools/arvbox/lib/arvbox/docker/service") &&
+ strings.HasSuffix(path, "/run") {
+ // dangling symlink
+ continue
+ }
+ fi, err := fs.Stat(infs, path)
+ c.Assert(err, check.IsNil)
+ if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 {
+ continue
+ }
+ writes = append(writes, dataToWrite{
+ path: path,
+ data: func() []byte {
+ data, err := fs.ReadFile(infs, path)
+ c.Assert(err, check.IsNil)
+ return data
+ },
+ })
+ if len(writes) >= maxfiles {
+ break
+ }
+ }
+ return
+}
+
+func dataToWrite_ConstantSizeFilesInDirs(c *check.C, ndirs, nfiles, filesize, chunksize int) (writes []dataToWrite) {
+ for chunk := 0; chunk == 0 || (chunksize > 0 && chunk < (filesize+chunksize-1)/chunksize); chunk++ {
+ for i := 0; i < nfiles; i++ {
+ datasize := filesize
+ if chunksize > 0 {
+ datasize = chunksize
+ if remain := filesize - chunk*chunksize; remain < chunksize {
+ datasize = remain
+ }
+ }
+ data := make([]byte, datasize)
+ copy(data, []byte(fmt.Sprintf("%d chunk %d", i, chunk)))
+ writes = append(writes, dataToWrite{
+ path: fmt.Sprintf("dir%d/file%d", i*ndirs/nfiles, i),
+ data: func() []byte { return data },
+ })
+ }
+ }
+ return
+}
+
+var enableRepackCharts = os.Getenv("ARVADOS_TEST_REPACK_CHARTS") != ""
+
+func (s *CollectionFSSuite) skipMostRepackCostTests(c *check.C) {
+ if !enableRepackCharts {
+ c.Skip("Set ARVADOS_TEST_REPACK_CHARTS to run more cost tests and generate data for charts like https://dev.arvados.org/issues/22320#note-14")
+ }
+}
+
+// If we upload 500 files (or 99999) and get a manifest with 60 or
+// fewer blocks (the third parameter of testRepackCost) then repacking
+// is working. The number of blocks is going to proportional to the
+// amount of data in the source tree so these numbers may need to be
+// updated periodically, but what we're really testing for is that we
+// didn't get back a manifest with 500 or 5000 blocks.
+
+func (s *CollectionFSSuite) TestRepackCost_SourceTree_Part(c *check.C) {
+ s.testRepackCost(c, dataToWrite_SourceTree(c, 500), 60)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_SourceTree(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_SourceTree(c, 99999), 60)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_1000x_1M_Files(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 1000, 1000000, 0), 80)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_100x_8M_Files(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 8000000, 0), 20)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_100x_8M_Files_1M_Chunks(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 8000000, 1000000), 50)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_100x_10M_Files_1M_Chunks(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 10000000, 1000000), 80)
+}
+
+func (s *CollectionFSSuite) TestRepackCost_100x_10M_Files(c *check.C) {
+ s.skipMostRepackCostTests(c)
+ s.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 10000000, 0), 100)
+}
+
+func (s *CollectionFSSuite) testRepackCost(c *check.C, writes []dataToWrite, maxBlocks int) {
+ s.kc.blocks = make(map[string][]byte)
+ testfs, err := (&Collection{}).FileSystem(nil, s.kc)
+ c.Assert(err, check.IsNil)
+ cfs := testfs.(*collectionFileSystem)
+ dirsCreated := make(map[string]bool)
+ bytesContent := 0
+ bytesWritten := func() (n int) {
+ s.kc.Lock()
+ defer s.kc.Unlock()
+ for _, data := range s.kc.blocks {
+ n += len(data)
+ }
+ return
+ }
+ blocksInManifest := func() int {
+ blocks := make(map[string]bool)
+ cfs.fileSystem.root.(*dirnode).walkSegments(func(s segment) segment {
+ blocks[s.(storedSegment).blockSegment().StripAllHints().Locator] = true
+ return s
+ })
+ return len(blocks)
+ }
+ tRepackNoop := time.Duration(0)
+ nRepackNoop := 0
+ tRepackTotal := time.Duration(0)
+ nRepackTotal := 0
+ filesWritten := make(map[string]bool)
+ stats := bytes.NewBuffer(nil)
+ fmt.Fprint(stats, "writes\tfiles\tbytes_in_files\tblocks\tbytes_written_backend\tn_repacked\tn_repack_noop\tseconds_repacking\n")
+ for writeIndex, write := range writes {
+ for i, c := range write.path {
+ if c == '/' && !dirsCreated[write.path[:i]] {
+ testfs.Mkdir(write.path[:i], 0700)
+ dirsCreated[write.path[:i]] = true
+ }
+ }
+ f, err := testfs.OpenFile(write.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)
+ c.Assert(err, check.IsNil)
+ filesWritten[write.path] = true
+ data := write.data()
+ _, err = f.Write(data)
+ c.Assert(err, check.IsNil)
+ err = f.Close()
+ c.Assert(err, check.IsNil)
+ bytesContent += len(data)
+
+ _, err = cfs.MarshalManifest("")
+ c.Assert(err, check.IsNil)
+ t0 := time.Now()
+ n, err := cfs.Repack(context.Background(), RepackOptions{})
+ c.Assert(err, check.IsNil)
+ tRepack := time.Since(t0)
+ tRepackTotal += tRepack
+ nRepackTotal++
+
+ if n == 0 {
+ tRepackNoop += tRepack
+ nRepackNoop++
+ } else if bytesWritten()/4 > bytesContent {
+ // Rewriting data >4x on average means
+ // something is terribly wrong -- give up now
+ // instead of going OOM.
+ c.Logf("something is terribly wrong -- bytesWritten %d >> bytesContent %d", bytesWritten(), bytesContent)
+ c.FailNow()
+ }
+ fmt.Fprintf(stats, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%.06f\n", writeIndex+1, len(filesWritten), bytesContent, blocksInManifest(), bytesWritten(), nRepackTotal-nRepackNoop, nRepackNoop, tRepackTotal.Seconds())
+ }
+ c.Check(err, check.IsNil)
+ c.Check(blocksInManifest() <= maxBlocks, check.Equals, true, check.Commentf("expect %d <= %d", blocksInManifest(), maxBlocks))
+
+ c.Logf("writes %d files %d bytesContent %d bytesWritten %d bytesRewritten %d blocksInManifest %d", len(writes), len(filesWritten), bytesContent, bytesWritten(), bytesWritten()-bytesContent, blocksInManifest())
+ c.Logf("spent %v on %d Repack calls, average %v per call", tRepackTotal, nRepackTotal, tRepackTotal/time.Duration(nRepackTotal))
+ c.Logf("spent %v on %d Repack calls that had no effect, average %v per call", tRepackNoop, nRepackNoop, tRepackNoop/time.Duration(nRepackNoop))
+
+ if enableRepackCharts {
+ // write stats to tmp/{testname}_stats.tsv
+ err = os.Mkdir("tmp", 0777)
+ if !os.IsExist(err) {
+ c.Check(err, check.IsNil)
+ }
+ err = os.WriteFile("tmp/"+c.TestName()+"_stats.tsv", stats.Bytes(), 0666)
+ c.Check(err, check.IsNil)
+ }
+}
+
func (s *CollectionFSSuite) TestSnapshotSplice(c *check.C) {
filedata1 := "hello snapshot+splice world\n"
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
@@ -1639,29 +2318,71 @@ type CollectionFSUnitSuite struct{}
var _ = check.Suite(&CollectionFSUnitSuite{})
// expect ~2 seconds to load a manifest with 256K files
-func (s *CollectionFSUnitSuite) TestLargeManifest(c *check.C) {
+func (s *CollectionFSUnitSuite) TestLargeManifest_ManyFiles(c *check.C) {
if testing.Short() {
c.Skip("slow")
}
+ s.testLargeManifest(c, 512, 512, 1, 0)
+}
- const (
- dirCount = 512
- fileCount = 512
- )
+func (s *CollectionFSUnitSuite) TestLargeManifest_LargeFiles(c *check.C) {
+ if testing.Short() {
+ c.Skip("slow")
+ }
+ s.testLargeManifest(c, 1, 800, 1000, 0)
+}
+func (s *CollectionFSUnitSuite) TestLargeManifest_InterleavedFiles(c *check.C) {
+ if testing.Short() {
+ c.Skip("slow")
+ }
+ // Timing figures here are from a dev host, (0)->(1)->(2)->(3)
+ // (0) no optimizations (main branch commit ea697fb1e8)
+ // (1) resolve streampos->blkidx with binary search
+ // (2) ...and rewrite PortableDataHash() without regexp
+ // (3) ...and use fnodeCache in loadManifest
+ s.testLargeManifest(c, 1, 800, 100, 4<<20) // 127s -> 12s -> 2.5s -> 1.5s
+ s.testLargeManifest(c, 1, 50, 1000, 4<<20) // 44s -> 10s -> 1.5s -> 0.8s
+ s.testLargeManifest(c, 1, 200, 100, 4<<20) // 13s -> 4s -> 0.6s -> 0.3s
+ s.testLargeManifest(c, 1, 200, 150, 4<<20) // 26s -> 4s -> 1s -> 0.5s
+ s.testLargeManifest(c, 1, 200, 200, 4<<20) // 38s -> 6s -> 1.3s -> 0.7s
+ s.testLargeManifest(c, 1, 200, 225, 4<<20) // 46s -> 7s -> 1.5s -> 1s
+ s.testLargeManifest(c, 1, 400, 400, 4<<20) // 477s -> 24s -> 5s -> 3s
+ // s.testLargeManifest(c, 1, 800, 1000, 4<<20) // timeout -> 186s -> 28s -> 17s
+}
+
+func (s *CollectionFSUnitSuite) testLargeManifest(c *check.C, dirCount, filesPerDir, blocksPerFile, interleaveChunk int) {
+ t0 := time.Now()
+ const blksize = 1 << 26
+ c.Logf("%s building manifest with dirCount=%d filesPerDir=%d blocksPerFile=%d", time.Now(), dirCount, filesPerDir, blocksPerFile)
mb := bytes.NewBuffer(make([]byte, 0, 40000000))
+ blkid := 0
for i := 0; i < dirCount; i++ {
fmt.Fprintf(mb, "./dir%d", i)
- for j := 0; j <= fileCount; j++ {
- fmt.Fprintf(mb, " %032x+42+A%040x@%08x", j, j, j)
+ for j := 0; j < filesPerDir; j++ {
+ for k := 0; k < blocksPerFile; k++ {
+ blkid++
+ fmt.Fprintf(mb, " %032x+%d+A%040x@%08x", blkid, blksize, blkid, blkid)
+ }
}
- for j := 0; j < fileCount; j++ {
- fmt.Fprintf(mb, " %d:%d:dir%d/file%d", j*42+21, 42, j, j)
+ for j := 0; j < filesPerDir; j++ {
+ if interleaveChunk == 0 {
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", (filesPerDir-j-1)*blocksPerFile*blksize, blocksPerFile*blksize, j, j)
+ continue
+ }
+ for todo := int64(blocksPerFile) * int64(blksize); todo > 0; todo -= int64(interleaveChunk) {
+ size := int64(interleaveChunk)
+ if size > todo {
+ size = todo
+ }
+ offset := rand.Int63n(int64(blocksPerFile)*int64(blksize)*int64(filesPerDir) - size)
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", offset, size, j, j)
+ }
}
mb.Write([]byte{'\n'})
}
coll := Collection{ManifestText: mb.String()}
- c.Logf("%s built", time.Now())
+ c.Logf("%s built manifest size=%d", time.Now(), mb.Len())
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
@@ -1670,17 +2391,28 @@ func (s *CollectionFSUnitSuite) TestLargeManifest(c *check.C) {
f, err := coll.FileSystem(NewClientFromEnv(), &keepClientStub{})
c.Check(err, check.IsNil)
c.Logf("%s loaded", time.Now())
- c.Check(f.Size(), check.Equals, int64(42*dirCount*fileCount))
+ c.Check(f.Size(), check.Equals, int64(dirCount*filesPerDir*blocksPerFile*blksize))
+ // Stat() and OpenFile() each file. This mimics the behavior
+ // of webdav propfind, which opens each file even when just
+ // listing directory entries.
for i := 0; i < dirCount; i++ {
- for j := 0; j < fileCount; j++ {
- f.Stat(fmt.Sprintf("./dir%d/dir%d/file%d", i, j, j))
+ for j := 0; j < filesPerDir; j++ {
+ fnm := fmt.Sprintf("./dir%d/dir%d/file%d", i, j, j)
+ fi, err := f.Stat(fnm)
+ c.Assert(err, check.IsNil)
+ c.Check(fi.IsDir(), check.Equals, false)
+ f, err := f.OpenFile(fnm, os.O_RDONLY, 0)
+ c.Assert(err, check.IsNil)
+ f.Close()
}
}
- c.Logf("%s Stat() x %d", time.Now(), dirCount*fileCount)
+ c.Logf("%s OpenFile() x %d", time.Now(), dirCount*filesPerDir)
runtime.ReadMemStats(&memstats)
c.Logf("%s Alloc=%d Sys=%d", time.Now(), memstats.Alloc, memstats.Sys)
+ c.Logf("%s MemorySize=%d", time.Now(), f.MemorySize())
+ c.Logf("%s ... test duration %s", time.Now(), time.Now().Sub(t0))
}
// Gocheck boilerplate
diff --git a/sdk/go/arvados/fs_filehandle.go b/sdk/go/arvados/fs_filehandle.go
index f50dd4612b..446080eda9 100644
--- a/sdk/go/arvados/fs_filehandle.go
+++ b/sdk/go/arvados/fs_filehandle.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "context"
"io"
"io/fs"
"os"
@@ -74,6 +75,16 @@ func (f *filehandle) Write(p []byte) (n int, err error) {
return
}
+func (f *filehandle) Repack(ctx context.Context, opts RepackOptions) (int, error) {
+ dn, ok := f.inode.(*dirnode)
+ if !ok {
+ return 0, ErrNotADirectory
+ }
+ dn.Lock()
+ defer dn.Unlock()
+ return dn.fs.repackTree(ctx, opts, dn)
+}
+
// dirEntry implements fs.DirEntry, see (*filehandle)ReadDir().
type dirEntry struct {
os.FileInfo
diff --git a/sdk/go/arvados/group.go b/sdk/go/arvados/group.go
index 0782bd43d1..31726e6152 100644
--- a/sdk/go/arvados/group.go
+++ b/sdk/go/arvados/group.go
@@ -10,25 +10,23 @@ import (
// Group is an arvados#group record
type Group struct {
- UUID string `json:"uuid"`
- Name string `json:"name"`
- OwnerUUID string `json:"owner_uuid"`
- GroupClass string `json:"group_class"`
- Etag string `json:"etag"`
- Href string `json:"href"`
- TrashAt *time.Time `json:"trash_at"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- DeleteAt *time.Time `json:"delete_at"`
- IsTrashed bool `json:"is_trashed"`
- Properties map[string]interface{} `json:"properties"`
- WritableBy []string `json:"writable_by,omitempty"`
- Description string `json:"description"`
- FrozenByUUID string `json:"frozen_by_uuid"`
- CanWrite bool `json:"can_write"`
- CanManage bool `json:"can_manage"`
+ UUID string `json:"uuid"`
+ Name string `json:"name"`
+ OwnerUUID string `json:"owner_uuid"`
+ GroupClass string `json:"group_class"`
+ Etag string `json:"etag"`
+ TrashAt *time.Time `json:"trash_at"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ DeleteAt *time.Time `json:"delete_at"`
+ IsTrashed bool `json:"is_trashed"`
+ Properties map[string]interface{} `json:"properties"`
+ WritableBy []string `json:"writable_by,omitempty"`
+ Description string `json:"description"`
+ FrozenByUUID string `json:"frozen_by_uuid"`
+ CanWrite bool `json:"can_write"`
+ CanManage bool `json:"can_manage"`
}
// GroupList is an arvados#groupList resource.
diff --git a/sdk/go/arvados/job.go b/sdk/go/arvados/job.go
deleted file mode 100644
index ccf752ce7c..0000000000
--- a/sdk/go/arvados/job.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package arvados
-
-import "time"
-
-// Job is an arvados#job record
-type Job struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- OwnerUUID string `json:"owner_uuid"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- SubmitID string `json:"submit_id"`
- Script string `json:"script"`
- CancelledByClientUUID string `json:"cancelled_by_client_uuid"`
- CancelledByUserUUID string `json:"cancelled_by_user_uuid"`
- CancelledAt time.Time `json:"cancelled_at"`
- StartedAt time.Time `json:"started_at"`
- FinishedAt time.Time `json:"finished_at"`
- Running bool `json:"running"`
- Success bool `json:"success"`
- Output string `json:"output"`
- CreatedAt time.Time `json:"created_at"`
- UpdatedAt time.Time `json:"updated_at"`
- IsLockedByUUID string `json:"is_locked_by_uuid"`
- Log string `json:"log"`
- TasksSummary map[string]interface{} `json:"tasks_summary"`
- RuntimeConstraints map[string]interface{} `json:"runtime_constraints"`
- Nondeterministic bool `json:"nondeterministic"`
- Repository string `json:"repository"`
- SuppliedScriptVersion string `json:"supplied_script_version"`
- DockerImageLocator string `json:"docker_image_locator"`
- Priority int `json:"priority"`
- Description string `json:"description"`
- State string `json:"state"`
- ArvadosSDKVersion string `json:"arvados_sdk_version"`
- Components map[string]interface{} `json:"components"`
- ScriptParametersDigest string `json:"script_parameters_digest"`
- WritableBy []string `json:"writable_by,omitempty"`
-}
-
-func (g Job) resourceName() string {
- return "job"
-}
diff --git a/sdk/go/arvados/keep_cache.go b/sdk/go/arvados/keep_cache.go
index 108081d5ac..4d9c53e5e4 100644
--- a/sdk/go/arvados/keep_cache.go
+++ b/sdk/go/arvados/keep_cache.go
@@ -113,7 +113,10 @@ func (cache *DiskCache) setup() {
defer sharedCachesLock.Unlock()
dir := cache.Dir
if sharedCaches[dir] == nil {
+ cache.debugf("initializing sharedCache using %s with max size %d", dir, cache.MaxSize)
sharedCaches[dir] = &sharedCache{dir: dir, maxSize: cache.MaxSize}
+ } else {
+ cache.debugf("using existing sharedCache using %s with max size %d (would have initialized with %d)", dir, sharedCaches[dir].maxSize, cache.MaxSize)
}
cache.sharedCache = sharedCaches[dir]
}
@@ -276,6 +279,10 @@ func (fw funcwriter) Write(p []byte) (int, error) {
// cache. The remainder of the block may continue to be copied into
// the cache in the background.
func (cache *DiskCache) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ return cache.readAt(locator, dst, offset, false)
+}
+
+func (cache *DiskCache) readAt(locator string, dst []byte, offset int, checkCacheOnly bool) (int, error) {
cache.setupOnce.Do(cache.setup)
cachefilename := cache.cacheFile(locator)
if n, err := cache.quickReadAt(cachefilename, dst, offset); err == nil {
@@ -285,6 +292,10 @@ func (cache *DiskCache) ReadAt(locator string, dst []byte, offset int) (int, err
cache.writingLock.Lock()
progress := cache.writing[cachefilename]
if progress == nil {
+ if checkCacheOnly {
+ cache.writingLock.Unlock()
+ return 0, ErrNotCached
+ }
// Nobody else is fetching from backend, so we'll add
// a new entry to cache.writing, fetch in a separate
// goroutine.
@@ -556,6 +567,9 @@ func (cache *DiskCache) BlockRead(ctx context.Context, opts BlockReadOptions) (i
if err != nil || blocksize < 0 {
return 0, errors.New("invalid block locator: invalid size hint")
}
+ if opts.CheckCacheOnly {
+ return cache.readAt(opts.Locator, nil, 0, true)
+ }
offset := 0
buf := make([]byte, 131072)
@@ -623,8 +637,9 @@ func (cache *DiskCache) tidy() {
}
var stat unix.Statfs_t
if nil == unix.Statfs(cache.dir, &stat) {
- maxsize = int64(stat.Bavail) * stat.Bsize * pct / 100
+ maxsize = int64(stat.Blocks) * stat.Bsize * pct / 100
atomic.StoreInt64(&cache.defaultMaxSize, maxsize)
+ cache.debugf("setting cache size %d = blocks %d * bsize %d * pct %d / 100", maxsize, stat.Blocks, stat.Bsize, pct)
} else {
// In this case we will set
// defaultMaxSize below after
diff --git a/sdk/go/arvados/keep_cache_test.go b/sdk/go/arvados/keep_cache_test.go
index 776d9bb652..cae06cd13b 100644
--- a/sdk/go/arvados/keep_cache_test.go
+++ b/sdk/go/arvados/keep_cache_test.go
@@ -74,6 +74,9 @@ func (k *keepGatewayMemoryBacked) ReadAt(locator string, dst []byte, offset int)
return n, nil
}
func (k *keepGatewayMemoryBacked) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {
+ if opts.CheckCacheOnly {
+ return 0, ErrNotCached
+ }
k.mtx.RLock()
data := k.data[opts.Locator]
k.mtx.RUnlock()
@@ -278,6 +281,68 @@ func (s *keepCacheSuite) testConcurrentReaders(c *check.C, cannotRefresh, mangle
wg.Wait()
}
+func (s *keepCacheSuite) TestBlockRead_CheckCacheOnly(c *check.C) {
+ blkCached := make([]byte, 12_000_000)
+ blkUncached := make([]byte, 13_000_000)
+ backend := &keepGatewayMemoryBacked{}
+ cache := DiskCache{
+ KeepGateway: backend,
+ MaxSize: ByteSizeOrPercent(len(blkUncached) + len(blkCached)),
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resp, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: blkUncached,
+ })
+ c.Check(err, check.IsNil)
+ locUncached := resp.Locator
+
+ resp, err = cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: blkCached,
+ })
+ c.Check(err, check.IsNil)
+ locCached := resp.Locator
+
+ os.RemoveAll(filepath.Join(cache.Dir, locUncached[:3]))
+ cache.deleteHeldopen(cache.cacheFile(locUncached), nil)
+ backend.data = make(map[string][]byte)
+
+ // Do multiple concurrent reads so we have a chance of catching
+ // race/locking bugs.
+ var wg sync.WaitGroup
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var buf bytes.Buffer
+ n, err := cache.BlockRead(ctx, BlockReadOptions{
+ Locator: locUncached,
+ WriteTo: &buf,
+ CheckCacheOnly: true})
+ c.Check(n, check.Equals, 0)
+ c.Check(err, check.Equals, ErrNotCached)
+ c.Check(buf.Len(), check.Equals, 0)
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var buf bytes.Buffer
+ n, err := cache.BlockRead(ctx, BlockReadOptions{
+ Locator: locCached,
+ WriteTo: &buf,
+ CheckCacheOnly: true})
+ c.Check(n, check.Equals, 0)
+ c.Check(err, check.IsNil)
+ c.Check(buf.Len(), check.Equals, 0)
+ }()
+ }
+ wg.Wait()
+}
+
func (s *keepCacheSuite) TestStreaming(c *check.C) {
blksize := 64000000
backend := &keepGatewayMemoryBacked{
diff --git a/sdk/go/arvados/link.go b/sdk/go/arvados/link.go
index 7df6b84d60..b0bc7419a0 100644
--- a/sdk/go/arvados/link.go
+++ b/sdk/go/arvados/link.go
@@ -8,21 +8,19 @@ import "time"
// Link is an arvados#link record
type Link struct {
- UUID string `json:"uuid,omitempty"`
- Etag string `json:"etag"`
- Href string `json:"href"`
- OwnerUUID string `json:"owner_uuid"`
- Name string `json:"name"`
- LinkClass string `json:"link_class"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- HeadUUID string `json:"head_uuid"`
- HeadKind string `json:"head_kind"`
- TailUUID string `json:"tail_uuid"`
- TailKind string `json:"tail_kind"`
- Properties map[string]interface{} `json:"properties"`
+ UUID string `json:"uuid,omitempty"`
+ Etag string `json:"etag"`
+ OwnerUUID string `json:"owner_uuid"`
+ Name string `json:"name"`
+ LinkClass string `json:"link_class"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ HeadUUID string `json:"head_uuid"`
+ HeadKind string `json:"head_kind"`
+ TailUUID string `json:"tail_uuid"`
+ TailKind string `json:"tail_kind"`
+ Properties map[string]interface{} `json:"properties"`
}
// LinkList is an arvados#linkList resource.
@@ -32,3 +30,15 @@ type LinkList struct {
Offset int `json:"offset"`
Limit int `json:"limit"`
}
+
+type ComputedPermission struct {
+ UserUUID string `json:"user_uuid"`
+ TargetUUID string `json:"target_uuid"`
+ PermLevel string `json:"perm_level"`
+}
+
+type ComputedPermissionList struct {
+ Items []ComputedPermission `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Limit int `json:"limit"`
+}
diff --git a/sdk/go/arvados/pipeline_instance.go b/sdk/go/arvados/pipeline_instance.go
deleted file mode 100644
index ace1826850..0000000000
--- a/sdk/go/arvados/pipeline_instance.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package arvados
-
-import "time"
-
-// PipelineInstance is an arvados#pipelineInstance record
-type PipelineInstance struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- PipelineTemplateUUID string `json:"pipeline_template_uuid"`
- Name string `json:"name"`
- Components map[string]interface{} `json:"components"`
- UpdatedAt time.Time `json:"updated_at"`
- Properties map[string]interface{} `json:"properties"`
- State string `json:"state"`
- ComponentsSummary map[string]interface{} `json:"components_summary"`
- StartedAt time.Time `json:"started_at"`
- FinishedAt time.Time `json:"finished_at"`
- Description string `json:"description"`
- WritableBy []string `json:"writable_by,omitempty"`
-}
-
-func (g PipelineInstance) resourceName() string {
- return "pipelineInstance"
-}
diff --git a/sdk/go/arvados/pipeline_template.go b/sdk/go/arvados/pipeline_template.go
deleted file mode 100644
index 31d9e8b2fe..0000000000
--- a/sdk/go/arvados/pipeline_template.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package arvados
-
-import "time"
-
-// PipelineTemplate is an arvados#pipelineTemplate record
-type PipelineTemplate struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- Name string `json:"name"`
- Components map[string]interface{} `json:"components"`
- UpdatedAt time.Time `json:"updated_at"`
- Description string `json:"description"`
- WritableBy []string `json:"writable_by,omitempty"`
-}
-
-func (g PipelineTemplate) resourceName() string {
- return "pipelineTemplate"
-}
diff --git a/sdk/go/arvados/specimen.go b/sdk/go/arvados/specimen.go
deleted file mode 100644
index b561fb20ae..0000000000
--- a/sdk/go/arvados/specimen.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package arvados
-
-import "time"
-
-type Specimen struct {
- UUID string `json:"uuid"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- Properties map[string]interface{} `json:"properties"`
-}
-
-type SpecimenList struct {
- Items []Specimen `json:"items"`
- ItemsAvailable int `json:"items_available"`
- Offset int `json:"offset"`
- Limit int `json:"limit"`
-}
diff --git a/sdk/go/arvados/trait.go b/sdk/go/arvados/trait.go
deleted file mode 100644
index fb0e799b6b..0000000000
--- a/sdk/go/arvados/trait.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package arvados
-
-import "time"
-
-// Trait is an arvados#trait record
-type Trait struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedAt time.Time `json:"modified_at"`
- Name string `json:"name"`
- Properties map[string]interface{} `json:"properties"`
- UpdatedAt time.Time `json:"updated_at"`
- WritableBy []string `json:"writable_by,omitempty"`
-}
-
-func (g Trait) resourceName() string {
- return "trait"
-}
diff --git a/sdk/go/arvados/user.go b/sdk/go/arvados/user.go
index 2fb061e7fb..157a3d8863 100644
--- a/sdk/go/arvados/user.go
+++ b/sdk/go/arvados/user.go
@@ -8,26 +8,25 @@ import "time"
// User is an arvados#user record
type User struct {
- UUID string `json:"uuid"`
- Etag string `json:"etag"`
- IsActive bool `json:"is_active"`
- IsAdmin bool `json:"is_admin"`
- Username string `json:"username"`
- Email string `json:"email"`
- FullName string `json:"full_name"`
- FirstName string `json:"first_name"`
- LastName string `json:"last_name"`
- IdentityURL string `json:"identity_url"`
- IsInvited bool `json:"is_invited"`
- OwnerUUID string `json:"owner_uuid"`
- CreatedAt time.Time `json:"created_at"`
- ModifiedAt time.Time `json:"modified_at"`
- ModifiedByUserUUID string `json:"modified_by_user_uuid"`
- ModifiedByClientUUID string `json:"modified_by_client_uuid"`
- Prefs map[string]interface{} `json:"prefs"`
- WritableBy []string `json:"writable_by,omitempty"`
- CanWrite bool `json:"can_write"`
- CanManage bool `json:"can_manage"`
+ UUID string `json:"uuid"`
+ Etag string `json:"etag"`
+ IsActive bool `json:"is_active"`
+ IsAdmin bool `json:"is_admin"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ FullName string `json:"full_name"`
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ IdentityURL string `json:"identity_url"`
+ IsInvited bool `json:"is_invited"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ Prefs map[string]interface{} `json:"prefs"`
+ WritableBy []string `json:"writable_by,omitempty"`
+ CanWrite bool `json:"can_write"`
+ CanManage bool `json:"can_manage"`
}
// UserList is an arvados#userList resource.
diff --git a/sdk/go/arvados/vocabulary.go b/sdk/go/arvados/vocabulary.go
index 1df43b5fb8..adde25cd92 100644
--- a/sdk/go/arvados/vocabulary.go
+++ b/sdk/go/arvados/vocabulary.go
@@ -35,15 +35,33 @@ func (v *Vocabulary) systemTagKeys() map[string]bool {
// Collection keys - set by arvados-cwl-runner
"container_request": true,
"container_uuid": true,
- "type": true,
+
+ // legacy Collection key, set by arvados-cwl-runner,
+ // was changed to container_uuid in Arvados 2.6.0 but
+ // still gets set if an older version of a-c-r is
+ // used.
+ "container": true,
+
+ // Set by several components to indicate the intended
+ // role of a collection
+ "type": true,
+
// Collection keys - set by arv-keepdocker (on the way out)
"docker-image-repo-tag": true,
+
// Container request keys - set by arvados-cwl-runner
- "cwl_input": true,
- "cwl_output": true,
+ "cwl_input": true,
+ "cwl_output": true,
+
+ // Container request key set alongside by Workbench 2
+ // to link to the Workflow definition used to launch
+ // the workflow
"template_uuid": true,
+ "workflowName": true,
+
// Group keys
"filters": true,
+
// Link keys
"groups": true,
"image_timestamp": true,
diff --git a/sdk/go/arvados/vocabulary_test.go b/sdk/go/arvados/vocabulary_test.go
index af62833a31..84dc4226dd 100644
--- a/sdk/go/arvados/vocabulary_test.go
+++ b/sdk/go/arvados/vocabulary_test.go
@@ -232,6 +232,7 @@ func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
reservedTagKeys: map[string]bool{
"container_request": true,
"container_uuid": true,
+ "container": true,
"cwl_input": true,
"cwl_output": true,
"docker-image-repo-tag": true,
@@ -241,6 +242,7 @@ func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
"template_uuid": true,
"type": true,
"username": true,
+ "workflowName": true,
},
StrictTags: false,
Tags: map[string]VocabularyTag{
diff --git a/sdk/go/arvadosclient/arvadosclient.go b/sdk/go/arvadosclient/arvadosclient.go
index d0ebdc1b01..c87cc97fe4 100644
--- a/sdk/go/arvadosclient/arvadosclient.go
+++ b/sdk/go/arvadosclient/arvadosclient.go
@@ -20,6 +20,7 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
)
type StringMatcher func(string) bool
@@ -110,6 +111,9 @@ type ArvadosClient struct {
// filesystem size.
DiskCacheSize arvados.ByteSizeOrPercent
+ // Where to write debug logs. May be nil.
+ Logger logrus.FieldLogger
+
// Discovery document
DiscoveryDoc Dict
@@ -120,6 +124,13 @@ type ArvadosClient struct {
// X-Request-Id for outgoing requests
RequestID string
+
+ // Cluster config from the arvados.Client passed to New(), if
+ // any. If non-nil, its keep services configuration is used
+ // instead of requesting a server list from controller. Note
+ // this is disabled by default in test suites via
+ // ARVADOS_FORCE_KEEP_SERVICES_TABLE environment variable.
+ Cluster *arvados.Cluster
}
// MakeTLSConfig sets up TLS configuration for communicating with
@@ -150,7 +161,9 @@ func New(c *arvados.Client) (*ArvadosClient, error) {
Retries: 2,
KeepServiceURIs: c.KeepServiceURIs,
DiskCacheSize: c.DiskCacheSize,
+ Logger: c.Logger,
lastClosedIdlesAt: time.Now(),
+ Cluster: c.Cluster,
}
return ac, nil
diff --git a/sdk/go/arvadosclient/arvadosclient_test.go b/sdk/go/arvadosclient/arvadosclient_test.go
index b074e21e81..d529c632a9 100644
--- a/sdk/go/arvadosclient/arvadosclient_test.go
+++ b/sdk/go/arvadosclient/arvadosclient_test.go
@@ -43,6 +43,7 @@ func (s *ServerRequiredSuite) SetUpTest(c *C) {
}
func (s *ServerRequiredSuite) TestMakeArvadosClientSecure(c *C) {
+ defer os.Setenv("ARVADOS_API_HOST_INSECURE", os.Getenv("ARVADOS_API_HOST_INSECURE"))
os.Setenv("ARVADOS_API_HOST_INSECURE", "")
ac, err := MakeArvadosClient()
c.Assert(err, Equals, nil)
@@ -52,6 +53,7 @@ func (s *ServerRequiredSuite) TestMakeArvadosClientSecure(c *C) {
}
func (s *ServerRequiredSuite) TestMakeArvadosClientInsecure(c *C) {
+ defer os.Setenv("ARVADOS_API_HOST_INSECURE", os.Getenv("ARVADOS_API_HOST_INSECURE"))
os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
ac, err := MakeArvadosClient()
c.Assert(err, Equals, nil)
@@ -282,7 +284,7 @@ func (h *APIStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
if status := h.respStatus[h.retryAttempts]; status < 0 {
// Fail the client's Do() by hanging up without
// sending an HTTP response header.
- conn, _, err := resp.(http.Hijacker).Hijack()
+ conn, _, err := http.NewResponseController(resp).Hijack()
if err != nil {
panic(err)
}
diff --git a/sdk/go/arvadosclient/pool.go b/sdk/go/arvadosclient/pool.go
index bb7867aef7..4272f0f759 100644
--- a/sdk/go/arvadosclient/pool.go
+++ b/sdk/go/arvadosclient/pool.go
@@ -13,8 +13,8 @@ import (
// A ClientPool is a pool of ArvadosClients. This is useful for
// applications that make API calls using a dynamic set of tokens,
// like web services that pass through their own clients'
-// credentials. See arvados-git-httpd for an example, and sync.Pool
-// for more information about garbage collection.
+// credentials. See sync.Pool for more information about garbage
+// collection.
type ClientPool struct {
// Initialize new clients by copying this one.
Prototype *ArvadosClient
diff --git a/sdk/go/arvadostest/api.go b/sdk/go/arvadostest/api.go
index e1827b5d1f..97b710b48b 100644
--- a/sdk/go/arvadostest/api.go
+++ b/sdk/go/arvadostest/api.go
@@ -108,6 +108,10 @@ func (as *APIStub) CollectionUntrash(ctx context.Context, options arvados.Untras
as.appendCall(ctx, as.CollectionUntrash, options)
return arvados.Collection{}, as.Error
}
+func (as *APIStub) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {
+ as.appendCall(ctx, as.ComputedPermissionList, options)
+ return arvados.ComputedPermissionList{}, as.Error
+}
func (as *APIStub) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
as.appendCall(ctx, as.ContainerCreate, options)
return arvados.Container{}, as.Error
@@ -148,6 +152,10 @@ func (as *APIStub) ContainerGatewayTunnel(ctx context.Context, options arvados.C
as.appendCall(ctx, as.ContainerGatewayTunnel, options)
return arvados.ConnectionResponse{}, as.Error
}
+func (as *APIStub) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {
+ as.appendCall(ctx, as.ContainerHTTPProxy, options)
+ return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {}), as.Error
+}
func (as *APIStub) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {
as.appendCall(ctx, as.ContainerRequestCreate, options)
return arvados.ContainerRequest{}, as.Error
@@ -264,26 +272,6 @@ func (as *APIStub) LogDelete(ctx context.Context, options arvados.DeleteOptions)
as.appendCall(ctx, as.LogDelete, options)
return arvados.Log{}, as.Error
}
-func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
- as.appendCall(ctx, as.SpecimenCreate, options)
- return arvados.Specimen{}, as.Error
-}
-func (as *APIStub) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
- as.appendCall(ctx, as.SpecimenUpdate, options)
- return arvados.Specimen{}, as.Error
-}
-func (as *APIStub) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
- as.appendCall(ctx, as.SpecimenGet, options)
- return arvados.Specimen{}, as.Error
-}
-func (as *APIStub) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
- as.appendCall(ctx, as.SpecimenList, options)
- return arvados.SpecimenList{}, as.Error
-}
-func (as *APIStub) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
- as.appendCall(ctx, as.SpecimenDelete, options)
- return arvados.Specimen{}, as.Error
-}
func (as *APIStub) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
as.appendCall(ctx, as.SysTrashSweep, options)
return struct{}{}, as.Error
diff --git a/sdk/go/arvadostest/fixtures.go b/sdk/go/arvadostest/fixtures.go
index 3b8a618fea..708f52795e 100644
--- a/sdk/go/arvadostest/fixtures.go
+++ b/sdk/go/arvadostest/fixtures.go
@@ -44,9 +44,6 @@ const (
FooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
FooAndBarFilesInDirPDH = "870369fc72738603c2fad16664e50e2d+58"
- Dispatch1Token = "kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw"
- Dispatch1AuthUUID = "zzzzz-gj3su-k9dvestay1plssr"
-
QueuedContainerRequestUUID = "zzzzz-xvhdp-cr4queuedcontnr"
QueuedContainerUUID = "zzzzz-dz642-queuedcontainer"
@@ -101,7 +98,7 @@ const (
AdminAuthorizedKeysUUID = "zzzzz-fngyi-12nc9ov4osp8nae"
- CrunchstatForRunningJobLogUUID = "zzzzz-57u5n-tmymyrojrbtnxh1"
+ CrunchstatForRunningContainerLogUUID = "zzzzz-57u5n-containerlog006"
IdleNodeUUID = "zzzzz-7ekkf-2z3mc76g2q73aio"
@@ -116,6 +113,24 @@ const (
DockerImage112Filename = "sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar"
)
+var TestCollectionUUIDToPDH = map[string]string{
+ FooCollection: FooCollectionPDH,
+ MultilevelCollection1: MultilevelCollection1PDH,
+ FooAndBarFilesInDirUUID: FooAndBarFilesInDirPDH,
+ BarFileCollectionUUID: BarFileCollectionPDH,
+}
+
+var TestCollectionPDHToManifest = map[string]string{
+ FooCollectionPDH: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n",
+ MultilevelCollection1PDH: `. d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3
+./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3
+./dir1/subdir d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3
+./dir2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3
+`,
+ FooAndBarFilesInDirPDH: "./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\n",
+ BarFileCollectionPDH: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n",
+}
+
// PathologicalManifest : A valid manifest designed to test
// various edge cases and parsing requirements
const PathologicalManifest = ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero@0 0:1:f 1:0:zero@1 1:4:ooba 4:0:zero@4 5:1:r 5:4:rbaz 9:0:zero@9\n" +
diff --git a/sdk/go/arvadostest/manifest.go b/sdk/go/arvadostest/manifest.go
new file mode 100644
index 0000000000..f5939cd152
--- /dev/null
+++ b/sdk/go/arvadostest/manifest.go
@@ -0,0 +1,42 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+)
+
+func FakeManifest(dirCount, filesPerDir, blocksPerFile, interleaveChunk int) string {
+ const blksize = 1 << 26
+ mb := bytes.NewBuffer(make([]byte, 0, 40000000))
+ blkid := 0
+ for i := 0; i < dirCount; i++ {
+ fmt.Fprintf(mb, "./dir%d", i)
+ for j := 0; j < filesPerDir; j++ {
+ for k := 0; k < blocksPerFile; k++ {
+ blkid++
+ fmt.Fprintf(mb, " %032x+%d+A%040x@%08x", blkid, blksize, blkid, blkid)
+ }
+ }
+ for j := 0; j < filesPerDir; j++ {
+ if interleaveChunk == 0 {
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", (filesPerDir-j-1)*blocksPerFile*blksize, blocksPerFile*blksize, j, j)
+ continue
+ }
+ for todo := int64(blocksPerFile) * int64(blksize); todo > 0; todo -= int64(interleaveChunk) {
+ size := int64(interleaveChunk)
+ if size > todo {
+ size = todo
+ }
+ offset := rand.Int63n(int64(blocksPerFile)*int64(blksize)*int64(filesPerDir) - size)
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", offset, size, j, j)
+ }
+ }
+ mb.Write([]byte{'\n'})
+ }
+ return mb.String()
+}
diff --git a/sdk/go/arvadostest/metrics.go b/sdk/go/arvadostest/metrics.go
index 5fe1d607bf..94bf3d131f 100644
--- a/sdk/go/arvadostest/metrics.go
+++ b/sdk/go/arvadostest/metrics.go
@@ -13,7 +13,7 @@ import (
func GatherMetricsAsString(reg *prometheus.Registry) string {
buf := bytes.NewBuffer(nil)
- enc := expfmt.NewEncoder(buf, expfmt.FmtText)
+ enc := expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeTextPlain))
got, _ := reg.Gather()
for _, mf := range got {
enc.Encode(mf)
diff --git a/sdk/go/arvadostest/oidc_provider.go b/sdk/go/arvadostest/oidc_provider.go
index 31a2667122..2289bbef30 100644
--- a/sdk/go/arvadostest/oidc_provider.go
+++ b/sdk/go/arvadostest/oidc_provider.go
@@ -17,8 +17,8 @@ import (
"time"
"gopkg.in/check.v1"
- "gopkg.in/square/go-jose.v2"
- "gopkg.in/square/go-jose.v2/jwt"
+ "gopkg.in/go-jose/go-jose.v2"
+ "gopkg.in/go-jose/go-jose.v2/jwt"
)
type OIDCProvider struct {
diff --git a/sdk/go/arvadostest/run_servers.go b/sdk/go/arvadostest/run_servers.go
index 8f70c5ee26..619f004ebe 100644
--- a/sdk/go/arvadostest/run_servers.go
+++ b/sdk/go/arvadostest/run_servers.go
@@ -13,6 +13,7 @@ import (
"os"
"os/exec"
"path"
+ "path/filepath"
"strconv"
"strings"
@@ -41,30 +42,20 @@ func ResetEnv() {
}
}
-var pythonTestDir string
-
-func chdirToPythonTests() {
- if pythonTestDir != "" {
- if err := os.Chdir(pythonTestDir); err != nil {
- log.Fatalf("chdir %s: %s", pythonTestDir, err)
- }
- return
- }
- for {
- if err := os.Chdir("sdk/python/tests"); err == nil {
- pythonTestDir, err = os.Getwd()
+func pythonTestDir() string {
+ reldir := "sdk/python/tests/"
+ for i := 0; i < 10; i++ {
+ if _, err := os.Stat(reldir); err == nil {
+ dir, err := filepath.Abs(reldir)
if err != nil {
log.Fatal(err)
}
- return
- }
- if parent, err := os.Getwd(); err != nil || parent == "/" {
- log.Fatalf("sdk/python/tests/ not found in any ancestor")
- }
- if err := os.Chdir(".."); err != nil {
- log.Fatal(err)
+ return dir
}
+ reldir = "../" + reldir
}
+ log.Fatalf("sdk/python/tests/ not found in any ancestor")
+ return ""
}
func ResetDB(c *check.C) {
@@ -84,27 +75,21 @@ func ResetDB(c *check.C) {
// optionally with --keep-blob-signing enabled.
// Use numKeepServers = 2 and blobSigning = false under all normal circumstances.
func StartKeep(numKeepServers int, blobSigning bool) {
- cwd, _ := os.Getwd()
- defer os.Chdir(cwd)
- chdirToPythonTests()
-
cmdArgs := []string{"run_test_server.py", "start_keep", "--num-keep-servers", strconv.Itoa(numKeepServers)}
if blobSigning {
cmdArgs = append(cmdArgs, "--keep-blob-signing")
}
-
- bgRun(exec.Command("python", cmdArgs...))
+ cmd := exec.Command("python", cmdArgs...)
+ cmd.Dir = pythonTestDir()
+ bgRun(cmd)
}
// StopKeep stops keep servers that were started with StartKeep.
// numkeepServers should be the same value that was passed to StartKeep,
// which is 2 under all normal circumstances.
func StopKeep(numKeepServers int) {
- cwd, _ := os.Getwd()
- defer os.Chdir(cwd)
- chdirToPythonTests()
-
cmd := exec.Command("python", "run_test_server.py", "stop_keep", "--num-keep-servers", strconv.Itoa(numKeepServers))
+ cmd.Dir = pythonTestDir()
bgRun(cmd)
// Without Wait, "go test" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050
cmd.Wait()
@@ -121,8 +106,10 @@ func bgRun(cmd *exec.Cmd) {
if err := cmd.Start(); err != nil {
log.Fatalf("%+v: %s", cmd.Args, err)
}
- if _, err := cmd.Process.Wait(); err != nil {
+ if pstate, err := cmd.Process.Wait(); err != nil {
log.Fatalf("%+v: %s", cmd.Args, err)
+ } else if pstate.ExitCode() != 0 {
+ log.Fatalf("%+v: exited %d", cmd.Args, pstate.ExitCode())
}
}
diff --git a/sdk/go/auth/auth.go b/sdk/go/auth/auth.go
index da9b4ea5b8..d74f46863e 100644
--- a/sdk/go/auth/auth.go
+++ b/sdk/go/auth/auth.go
@@ -51,8 +51,10 @@ var DecodeTokenCookie func(string) ([]byte, error) = base64.URLEncoding.DecodeSt
// LoadTokensFromHTTPRequest loads all tokens it can find in the
// headers and query string of an http query.
func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
- // Load plain token from "Authorization: OAuth2 ..." header
- // (typically used by smart API clients)
+ // Load plain token from "Authorization: Bearer ..." header
+ // (typically used by smart API clients). Note many pre-3.0
+ // clients send "OAuth2 ..." instead of "Bearer ..." and that
+ // is still accepted.
if toks := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(toks) == 2 && (toks[0] == "OAuth2" || toks[0] == "Bearer") {
a.Tokens = append(a.Tokens, strings.TrimSpace(toks[1]))
}
@@ -117,3 +119,24 @@ func (a *Credentials) LoadTokensFromHTTPRequestBody(r *http.Request) error {
}
return nil
}
+
+// TokenUUIDs returns a list of token UUIDs (or a placeholder for v1
+// tokens) suitable for logging.
+func (creds *Credentials) TokenUUIDs() []string {
+ var tokenUUIDs []string
+ for _, t := range creds.Tokens {
+ if strings.HasPrefix(t, "v2/") {
+ tokenParts := strings.Split(t, "/")
+ if len(tokenParts) >= 3 {
+ tokenUUIDs = append(tokenUUIDs, tokenParts[1])
+ }
+ } else {
+ end := t
+ if len(t) > 5 {
+ end = t[len(t)-5:]
+ }
+ tokenUUIDs = append(tokenUUIDs, "v1 token ending in "+end)
+ }
+ }
+ return tokenUUIDs
+}
diff --git a/sdk/go/auth/salt.go b/sdk/go/auth/salt.go
index 2140215986..e20c9ff8bc 100644
--- a/sdk/go/auth/salt.go
+++ b/sdk/go/auth/salt.go
@@ -31,17 +31,19 @@ func SaltToken(token, remote string) (string, error) {
}
uuid := parts[1]
secret := parts[2]
- if len(secret) != 40 {
+ if strings.HasPrefix(uuid, remote) {
+ // target cluster issued this token -- send the real
+ // token
+ return token, nil
+ } else if len(secret) != 40 {
// not already salted
hmac := hmac.New(sha1.New, []byte(secret))
io.WriteString(hmac, remote)
secret = fmt.Sprintf("%x", hmac.Sum(nil))
return "v2/" + uuid + "/" + secret, nil
- } else if strings.HasPrefix(uuid, remote) {
- // already salted for the desired remote
- return token, nil
} else {
- // salted for a different remote, can't be used
+ // already salted, and not issued by target cluster --
+ // can't be used
return "", ErrSalted
}
}
diff --git a/sdk/go/blockdigest/blockdigest.go b/sdk/go/blockdigest/blockdigest.go
index ecb09964ec..57593bea9c 100644
--- a/sdk/go/blockdigest/blockdigest.go
+++ b/sdk/go/blockdigest/blockdigest.go
@@ -65,29 +65,24 @@ func IsBlockLocator(s string) bool {
return LocatorPattern.MatchString(s)
}
-func ParseBlockLocator(s string) (b BlockLocator, err error) {
+func ParseBlockLocator(s string) (BlockLocator, error) {
if !LocatorPattern.MatchString(s) {
- err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
- "\"%s\".",
- s,
- LocatorPattern.String())
- } else {
- tokens := strings.Split(s, "+")
- var blockSize int64
- var blockDigest BlockDigest
- // We expect both of the following to succeed since LocatorPattern
- // restricts the strings appropriately.
- blockDigest, err = FromString(tokens[0])
- if err != nil {
- return
- }
- blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
- if err != nil {
- return
- }
- b.Digest = blockDigest
- b.Size = int(blockSize)
- b.Hints = tokens[2:]
+ return BlockLocator{}, fmt.Errorf("String %q does not match block locator pattern %q.", s, LocatorPattern.String())
}
- return
+ tokens := strings.Split(s, "+")
+ // We expect both of the following to succeed since
+ // LocatorPattern restricts the strings appropriately.
+ blockDigest, err := FromString(tokens[0])
+ if err != nil {
+ return BlockLocator{}, err
+ }
+ blockSize, err := strconv.ParseInt(tokens[1], 10, 0)
+ if err != nil {
+ return BlockLocator{}, err
+ }
+ return BlockLocator{
+ Digest: blockDigest,
+ Size: int(blockSize),
+ Hints: tokens[2:],
+ }, nil
}
diff --git a/sdk/go/dispatch/dispatch_test.go b/sdk/go/dispatch/dispatch_test.go
index 2a9d84639e..ff3b357692 100644
--- a/sdk/go/dispatch/dispatch_test.go
+++ b/sdk/go/dispatch/dispatch_test.go
@@ -21,7 +21,7 @@ type suite struct{}
func (s *suite) TestTrackContainer(c *C) {
arv, err := arvadosclient.MakeArvadosClient()
c.Assert(err, Equals, nil)
- arv.ApiToken = arvadostest.Dispatch1Token
+ arv.ApiToken = arvadostest.SystemRootToken
done := make(chan bool, 1)
time.AfterFunc(10*time.Second, func() { done <- false })
diff --git a/sdk/go/health/aggregator.go b/sdk/go/health/aggregator.go
index 3bf37b1294..19bafb54c5 100644
--- a/sdk/go/health/aggregator.go
+++ b/sdk/go/health/aggregator.go
@@ -501,7 +501,10 @@ func (ccmd checkCommand) run(ctx context.Context, prog string, args []string, st
return nil
}
-var reGoVersion = regexp.MustCompile(` \(go\d+([\d.])*\)$`)
+var (
+ reGoVersion = regexp.MustCompile(` \(go\d+([\d.])*\)$`)
+ reDevVersion = regexp.MustCompile(`~dev\d+$`)
+)
// Return true if either a==b or the only difference is that one has a
// " (go1.2.3)" suffix and the other does not.
@@ -509,17 +512,10 @@ var reGoVersion = regexp.MustCompile(` \(go\d+([\d.])*\)$`)
// This allows us to recognize a non-Go (rails) service as the same
// version as a Go service.
func sameVersion(a, b string) bool {
- if a == b {
- return true
- }
- anogo := reGoVersion.ReplaceAllLiteralString(a, "")
- bnogo := reGoVersion.ReplaceAllLiteralString(b, "")
- if (anogo == a) != (bnogo == b) {
- // only one of a/b has a (go1.2.3) suffix, so compare
- // without that part
- return anogo == bnogo
- }
- // both or neither has a (go1.2.3) suffix, and we already know
- // a!=b
- return false
+ // Strip " (go1.2.3)" suffix
+ a = reGoVersion.ReplaceAllLiteralString(a, "")
+ b = reGoVersion.ReplaceAllLiteralString(b, "")
+ anodev := reDevVersion.ReplaceAllLiteralString(a, "")
+ bnodev := reDevVersion.ReplaceAllLiteralString(b, "")
+ return anodev == bnodev && (a == anodev) == (b == bnodev)
}
diff --git a/sdk/go/health/aggregator_test.go b/sdk/go/health/aggregator_test.go
index f76f7b8ea8..4401a9752e 100644
--- a/sdk/go/health/aggregator_test.go
+++ b/sdk/go/health/aggregator_test.go
@@ -57,6 +57,19 @@ func (s *AggregatorSuite) SetUpTest(c *check.C) {
s.resp = httptest.NewRecorder()
}
+func (s *AggregatorSuite) TestSameVersion(c *check.C) {
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.1~dev20240610194320"), check.Equals, false)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.1~dev20240610194320 (go1.21.10)"), check.Equals, false)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.1~dev20240610194320 (go1.21.9)"), check.Equals, false)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.0~dev20240610194320 (go1.21.9)"), check.Equals, true)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.0~dev20240611211146 (go1.21.10)"), check.Equals, true)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.0~dev20240611211146"), check.Equals, true)
+ c.Check(sameVersion("2.8.0~dev20240610194320 (go1.21.10)", "2.8.0"), check.Equals, false)
+ c.Check(sameVersion("2.8.0~dev20240610194320", "2.8.0"), check.Equals, false)
+ c.Check(sameVersion("2.8.0", "2.8.0"), check.Equals, true)
+ c.Check(sameVersion("2.8.0", "2.8.1"), check.Equals, false)
+}
+
func (s *AggregatorSuite) TestNoAuth(c *check.C) {
s.req.Header.Del("Authorization")
s.handler.ServeHTTP(s.resp, s.req)
@@ -372,7 +385,6 @@ func (s *AggregatorSuite) setAllServiceURLs(listen string) {
&svcs.DispatchCloud,
&svcs.DispatchLSF,
&svcs.DispatchSLURM,
- &svcs.GitHTTP,
&svcs.Keepbalance,
&svcs.Keepproxy,
&svcs.Keepstore,
diff --git a/sdk/go/httpserver/httpserver.go b/sdk/go/httpserver/httpserver.go
index 627e04f0be..df6e943f7e 100644
--- a/sdk/go/httpserver/httpserver.go
+++ b/sdk/go/httpserver/httpserver.go
@@ -5,8 +5,11 @@
package httpserver
import (
+ "log"
"net"
"net/http"
+ "os"
+ "strings"
"sync"
"time"
)
@@ -33,7 +36,7 @@ func (srv *Server) Start() error {
if err != nil {
return err
}
- srv.listener, err = net.ListenTCP("tcp", addr)
+ srv.listener, err = listenTCP("tcp", addr)
if err != nil {
return err
}
@@ -94,3 +97,26 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
+
+// net.ListenTCP, but retry after "address already in use" for up to 5
+// minutes if running inside the arvados test suite.
+func listenTCP(network string, addr *net.TCPAddr) (*net.TCPListener, error) {
+ if os.Getenv("ARVADOS_TEST_API_HOST") == "" {
+ return net.ListenTCP("tcp", addr)
+ }
+ timeout := 5 * time.Minute
+ deadline := time.Now().Add(timeout)
+ logged := false
+ for {
+ ln, err := net.ListenTCP("tcp", addr)
+ if err != nil && strings.Contains(err.Error(), "address already in use") && time.Now().Before(deadline) {
+ if !logged {
+ log.Printf("listenTCP: retrying up to %v after error: %s", timeout, err)
+ logged = true
+ }
+ time.Sleep(time.Second)
+ continue
+ }
+ return ln, err
+ }
+}
diff --git a/sdk/go/httpserver/logger.go b/sdk/go/httpserver/logger.go
index b71adf7118..917183daee 100644
--- a/sdk/go/httpserver/logger.go
+++ b/sdk/go/httpserver/logger.go
@@ -5,9 +5,7 @@
package httpserver
import (
- "bufio"
"context"
- "net"
"net/http"
"sync"
"time"
@@ -25,25 +23,9 @@ var (
requestTimeContextKey = contextKey{"requestTime"}
responseLogFieldsContextKey = contextKey{"responseLogFields"}
mutexContextKey = contextKey{"mutex"}
+ stopDeadlineTimerContextKey = contextKey{"stopDeadlineTimer"}
)
-type hijacker interface {
- http.ResponseWriter
- http.Hijacker
-}
-
-// hijackNotifier wraps a ResponseWriter, calling the provided
-// Notify() func if/when the wrapped Hijacker is hijacked.
-type hijackNotifier struct {
- hijacker
- hijacked chan<- bool
-}
-
-func (hn hijackNotifier) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- close(hn.hijacked)
- return hn.hijacker.Hijack()
-}
-
// HandlerWithDeadline cancels the request context if the request
// takes longer than the specified timeout without having its
// connection hijacked.
@@ -57,22 +39,23 @@ func HandlerWithDeadline(timeout time.Duration, next http.Handler) http.Handler
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
- nodeadline := make(chan bool)
- go func() {
- select {
- case <-nodeadline:
- case <-ctx.Done():
- case <-time.After(timeout):
- cancel()
- }
- }()
- if hj, ok := w.(hijacker); ok {
- w = hijackNotifier{hj, nodeadline}
- }
+ timer := time.AfterFunc(timeout, cancel)
+ ctx = context.WithValue(ctx, stopDeadlineTimerContextKey, timer.Stop)
next.ServeHTTP(w, r.WithContext(ctx))
+ timer.Stop()
})
}
+// ExemptFromDeadline exempts the given request from the timeout set
+// by HandlerWithDeadline.
+//
+// It is a no-op if the deadline has already passed, or none was set.
+func ExemptFromDeadline(r *http.Request) {
+ if stop, ok := r.Context().Value(stopDeadlineTimerContextKey).(func() bool); ok {
+ stop()
+ }
+}
+
func SetResponseLogFields(ctx context.Context, fields logrus.Fields) {
m, _ := ctx.Value(&mutexContextKey).(*sync.Mutex)
c, _ := ctx.Value(&responseLogFieldsContextKey).(logrus.Fields)
@@ -110,21 +93,10 @@ func LogRequests(h http.Handler) http.Handler {
logRequest(w, req, lgr)
defer logResponse(w, req, lgr)
- h.ServeHTTP(rewrapResponseWriter(w, wrapped), req)
+ h.ServeHTTP(w, req)
})
}
-// Rewrap w to restore additional interfaces provided by wrapped.
-func rewrapResponseWriter(w http.ResponseWriter, wrapped http.ResponseWriter) http.ResponseWriter {
- if hijacker, ok := wrapped.(http.Hijacker); ok {
- return struct {
- http.ResponseWriter
- http.Hijacker
- }{w, hijacker}
- }
- return w
-}
-
func Logger(req *http.Request) logrus.FieldLogger {
return ctxlog.FromContext(req.Context())
}
@@ -172,11 +144,8 @@ type responseTimer struct {
writeTime time.Time
}
-func (rt *responseTimer) CloseNotify() <-chan bool {
- if cn, ok := rt.ResponseWriter.(http.CloseNotifier); ok {
- return cn.CloseNotify()
- }
- return nil
+func (rt *responseTimer) Unwrap() http.ResponseWriter {
+ return rt.ResponseWriter
}
func (rt *responseTimer) WriteHeader(code int) {
diff --git a/sdk/go/httpserver/logger_test.go b/sdk/go/httpserver/logger_test.go
index 60768b3fc9..206148d4f6 100644
--- a/sdk/go/httpserver/logger_test.go
+++ b/sdk/go/httpserver/logger_test.go
@@ -72,29 +72,39 @@ func (s *Suite) TestWithDeadline(c *check.C) {
c.Check(resp.Body.String(), check.Equals, "ok")
}
-func (s *Suite) TestNoDeadlineAfterHijacked(c *check.C) {
+func (s *Suite) TestExemptFromDeadline(c *check.C) {
srv := Server{
Addr: ":",
Server: http.Server{
Handler: HandlerWithDeadline(time.Millisecond, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- conn, _, err := w.(http.Hijacker).Hijack()
- c.Assert(err, check.IsNil)
- defer conn.Close()
- select {
- case <-req.Context().Done():
- c.Error("request context done too soon")
- case <-time.After(time.Second / 10):
- conn.Write([]byte("HTTP/1.1 200 OK\r\n\r\nok"))
+ if req.URL.Path == "/exempt" {
+ ExemptFromDeadline(req)
}
+ time.Sleep(time.Second / 10)
+ if req.Context().Err() != nil {
+ w.WriteHeader(499)
+ return
+ }
+ fmt.Fprint(w, "ok")
})),
BaseContext: func(net.Listener) context.Context { return s.ctx },
},
}
srv.Start()
defer srv.Close()
- resp, err := http.Get("http://" + srv.Addr)
+
+ resp, err := http.Get("http://" + srv.Addr + "/normal")
c.Assert(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, 499)
body, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ c.Check(string(body), check.Equals, "")
+
+ resp, err = http.Get("http://" + srv.Addr + "/exempt")
+ c.Assert(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, 200)
+ body, err = ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
c.Check(string(body), check.Equals, "ok")
}
diff --git a/sdk/go/httpserver/responsewriter.go b/sdk/go/httpserver/responsewriter.go
index 049a3f1aae..9bbc793a6d 100644
--- a/sdk/go/httpserver/responsewriter.go
+++ b/sdk/go/httpserver/responsewriter.go
@@ -5,7 +5,10 @@
package httpserver
import (
+ "bufio"
+ "net"
"net/http"
+ "time"
)
const sniffBytes = 1024
@@ -32,13 +35,6 @@ func WrapResponseWriter(orig http.ResponseWriter) ResponseWriter {
return &responseWriter{ResponseWriter: orig}
}
-func (w *responseWriter) CloseNotify() <-chan bool {
- if cn, ok := w.ResponseWriter.(http.CloseNotifier); ok {
- return cn.CloseNotify()
- }
- return nil
-}
-
func (w *responseWriter) WriteHeader(s int) {
if w.wroteStatus == 0 {
w.wroteStatus = s
@@ -86,3 +82,36 @@ func (w *responseWriter) sniff(data []byte) {
func (w *responseWriter) Sniffed() []byte {
return w.sniffed
}
+
+func (w *responseWriter) Unwrap() http.ResponseWriter {
+ return w.ResponseWriter
+}
+
+// ResponseControllerShim uses a ResponseController to re-add the
+// optional interface methods to a ResponseWriter that has lost them
+// via wrapping by middleware.
+//
+// This allows us to combine old code (like x/net/websocket) with
+// middleware that doesn't explicitly support the optional interfaces
+// (like responseTimer and responseWriter here).
+type ResponseControllerShim struct{ http.ResponseWriter }
+
+func (s ResponseControllerShim) EnableFullDuplex() error {
+ return http.NewResponseController(s.ResponseWriter).EnableFullDuplex()
+}
+
+func (s ResponseControllerShim) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return http.NewResponseController(s.ResponseWriter).Hijack()
+}
+
+func (s ResponseControllerShim) SetReadDeadline(d time.Time) error {
+ return http.NewResponseController(s.ResponseWriter).SetReadDeadline(d)
+}
+
+func (s ResponseControllerShim) SetWriteDeadline(d time.Time) error {
+ return http.NewResponseController(s.ResponseWriter).SetWriteDeadline(d)
+}
+
+func (s ResponseControllerShim) Flush() error {
+ return http.NewResponseController(s.ResponseWriter).Flush()
+}
diff --git a/sdk/go/keepclient/collectionreader.go b/sdk/go/keepclient/collectionreader.go
index 8e4bb93bfa..580e51461b 100644
--- a/sdk/go/keepclient/collectionreader.go
+++ b/sdk/go/keepclient/collectionreader.go
@@ -9,7 +9,6 @@ import (
"os"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/manifest"
)
// ErrNoManifest indicates the given collection has no manifest
@@ -31,11 +30,3 @@ func (kc *KeepClient) CollectionFileReader(collection map[string]interface{}, fi
}
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
-
-func (kc *KeepClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
- fs, err := (&arvados.Collection{ManifestText: m.Text}).FileSystem(nil, kc)
- if err != nil {
- return nil, err
- }
- return fs.OpenFile(filename, os.O_RDONLY, 0)
-}
diff --git a/sdk/go/keepclient/discover.go b/sdk/go/keepclient/discover.go
index 5eafbbe339..b67aaf6c63 100644
--- a/sdk/go/keepclient/discover.go
+++ b/sdk/go/keepclient/discover.go
@@ -5,6 +5,7 @@
package keepclient
import (
+ "crypto/md5"
"encoding/json"
"errors"
"fmt"
@@ -102,7 +103,11 @@ func (ent *cachedSvcList) poll() {
var next svcList
err := ent.arv.Call("GET", "keep_services", "", "accessible", nil, &next)
if err != nil {
- log.Printf("WARNING: Error retrieving services list: %v (retrying in %v)", err, errDelay)
+ if ent.arv.Logger != nil {
+ ent.arv.Logger.WithError(err).Warnf("error retrieving services list (retrying in %v)", errDelay)
+ } else {
+ log.Printf("WARNING: Error retrieving services list: %s (retrying in %v)", err, errDelay)
+ }
timer.Reset(errDelay)
continue
}
@@ -132,12 +137,44 @@ func (kc *KeepClient) discoverServices() error {
kc.replicasPerService = 0
roots := make(map[string]string)
for i, uri := range kc.Arvados.KeepServiceURIs {
- roots[fmt.Sprintf("00000-bi6l4-%015d", i)] = uri
+ roots[fmt.Sprintf("00000-bi6l4-%015d", i)] = strings.TrimSuffix(uri, "/")
}
kc.setServiceRoots(roots, roots, roots)
return nil
}
+ if kc.Arvados.Cluster != nil && os.Getenv("ARVADOS_USE_KEEP_ACCESSIBLE_API") == "" {
+ kc.disableDiscovery = true
+ roots := make(map[string]string)
+ for url, info := range kc.Arvados.Cluster.Services.Keepstore.InternalURLs {
+ rvz := info.Rendezvous
+ if rvz == "" {
+ rvz = url.String()
+ }
+ // If info.Rendezvous is 15 ascii alphanums,
+ // we use it verbatim as the last 15 chars of
+ // the UUID. Otherwise, we hash
+ // info.Rendezvous (or, if empty, the URL) and
+ // use the first 15 chars of the hash as the
+ // last 15 chars of the UUID. This matches the
+ // behavior of
+ // services/api/app/models/keep_service.rb.
+ rvzhash := len(rvz) != 15
+ for i := 0; i < len(rvz) && !rvzhash; i++ {
+ rvzhash = !(rvz[i] >= '0' && rvz[i] <= '9' ||
+ rvz[i] >= 'a' && rvz[i] <= 'z' ||
+ rvz[i] >= 'A' && rvz[i] <= 'Z')
+ }
+ if rvzhash {
+ rvz = fmt.Sprintf("%x", md5.Sum([]byte(rvz)))[:15]
+ }
+ uuid := kc.Arvados.Cluster.ClusterID + "-bi6l4-" + rvz
+ roots[uuid] = strings.TrimSuffix(url.String(), "/")
+ }
+ kc.setServiceRoots(roots, roots, nil)
+ return nil
+ }
+
if kc.Arvados.ApiServer == "" {
return fmt.Errorf("Arvados client is not configured (target API host is not set). Maybe env var ARVADOS_API_HOST should be set first?")
}
diff --git a/sdk/go/keepclient/discover_test.go b/sdk/go/keepclient/discover_test.go
index 92d66a949c..770a79dd23 100644
--- a/sdk/go/keepclient/discover_test.go
+++ b/sdk/go/keepclient/discover_test.go
@@ -5,19 +5,22 @@
package keepclient
import (
+ "bytes"
"crypto/md5"
"fmt"
"net/http"
"os"
- "gopkg.in/check.v1"
-
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "gopkg.in/check.v1"
)
func (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {
- defer os.Setenv("ARVADOS_KEEP_SERVICES", "")
+ defer os.Unsetenv("ARVADOS_KEEP_SERVICES")
data := []byte("TestOverrideDiscovery")
hash := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
@@ -55,3 +58,93 @@ func (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {
_, _, _, err = kc2.Get(hash)
c.Check(err, check.IsNil)
}
+
+func (s *ServerRequiredSuite) TestDoubleSlash(c *check.C) {
+ defer os.Unsetenv("ARVADOS_KEEP_SERVICES")
+
+ data := []byte("TestDoubleSlash")
+ hash := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
+
+ os.Setenv("ARVADOS_KEEP_SERVICES", "")
+ arv1, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, check.IsNil)
+ arv1.ApiToken = arvadostest.ActiveToken
+ kc1, err := MakeKeepClient(arv1)
+ c.Assert(err, check.IsNil)
+
+ // Use kc1's config to set up a new client kc2, but add an
+ // extra trailing slash to each URL.
+ var svcs string
+ for _, url := range kc1.LocalRoots() {
+ svcs += url + "/ "
+ }
+ c.Assert(svcs, check.Not(check.HasLen), 0)
+ os.Setenv("ARVADOS_KEEP_SERVICES", svcs)
+
+ arv2, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, check.IsNil)
+ arv2.ApiToken = arvadostest.ActiveToken
+ kc2, err := MakeKeepClient(arv2)
+ c.Assert(err, check.IsNil)
+
+ // Check that trailing slashes were trimmed.
+ for _, url := range kc2.LocalRoots() {
+ c.Assert(url, check.Not(check.Matches), `.*/$`)
+ }
+
+ _, _, err = kc2.PutB(data)
+ c.Assert(err, check.IsNil)
+ _, _, _, err = kc2.Get(hash)
+ c.Check(err, check.IsNil)
+}
+
+func (s *StandaloneSuite) TestKeepServicesFromClusterConfig(c *check.C) {
+ // This behavior is disabled via env var in the test
+ // environment. Clear the env var to test the default
+ // production behavior.
+ v := "ARVADOS_USE_KEEP_ACCESSIBLE_API"
+ defer os.Setenv(v, os.Getenv(v))
+ os.Unsetenv(v)
+
+ rdr := bytes.NewReader([]byte(`
+Clusters:
+ zzzzz:
+ Services:
+ Keepstore:
+ InternalURLs:
+ "https://[::1]:12345/":
+ Rendezvous: abcdefghijklmno
+ "https://[::1]:54321/":
+ Rendezvous: xyz
+ "http://0.0.0.0:54321/":
+ {}
+ Keepproxy:
+ InternalURLs:
+ "https://[::1]:55555/":
+ {}
+`))
+ ldr := config.NewLoader(rdr, ctxlog.TestLogger(c))
+ ldr.Path = "-"
+ cfg, err := ldr.Load()
+ c.Assert(err, check.IsNil)
+ cluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ c.Assert(cluster.ClusterID, check.Equals, "zzzzz")
+ ac, err := arvados.NewClientFromConfig(cluster)
+ c.Assert(err, check.IsNil)
+ arv1, err := arvadosclient.New(ac)
+ c.Assert(err, check.IsNil)
+ c.Check(arv1.Cluster, check.NotNil)
+ kc, err := MakeKeepClient(arv1)
+ c.Assert(err, check.IsNil)
+ // Note the default rendezvous string is generated based on
+ // the MD5 of the keepstore URL and that URL *must* have a
+ // trailing slash in order to match the RailsAPI behavior --
+ // meanwhile, the keepstore URL given in the localRoots map
+ // *must not* have a trailing slash.
+ c.Check(kc.localRoots, check.DeepEquals, map[string]string{
+ "zzzzz-bi6l4-abcdefghijklmno": "https://[::1]:12345",
+ fmt.Sprintf("zzzzz-bi6l4-%x", md5.Sum([]byte("xyz")))[:27]: "https://[::1]:54321",
+ fmt.Sprintf("zzzzz-bi6l4-%x", md5.Sum([]byte("http://0.0.0.0:54321/")))[:27]: "http://0.0.0.0:54321",
+ })
+}
diff --git a/sdk/go/keepclient/gateway_shim.go b/sdk/go/keepclient/gateway_shim.go
index 260824453d..05e6502aa0 100644
--- a/sdk/go/keepclient/gateway_shim.go
+++ b/sdk/go/keepclient/gateway_shim.go
@@ -42,6 +42,9 @@ func (kvh *keepViaHTTP) ReadAt(locator string, dst []byte, offset int) (int, err
}
func (kvh *keepViaHTTP) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
+ if opts.CheckCacheOnly {
+ return 0, arvados.ErrNotCached
+ }
rdr, _, _, _, err := kvh.getOrHead("GET", opts.Locator, nil)
if err != nil {
return 0, err
diff --git a/sdk/go/keepclient/keepclient.go b/sdk/go/keepclient/keepclient.go
index d97a2d1fcd..2852e6d2d6 100644
--- a/sdk/go/keepclient/keepclient.go
+++ b/sdk/go/keepclient/keepclient.go
@@ -100,6 +100,8 @@ const (
XKeepReplicasStored = "X-Keep-Replicas-Stored"
XKeepStorageClasses = "X-Keep-Storage-Classes"
XKeepStorageClassesConfirmed = "X-Keep-Storage-Classes-Confirmed"
+ XKeepSignature = "X-Keep-Signature"
+ XKeepLocator = "X-Keep-Locator"
)
type HTTPClient interface {
@@ -184,9 +186,9 @@ func (kc *KeepClient) loadDefaultClasses() error {
return nil
}
-// MakeKeepClient creates a new KeepClient, loads default storage classes, calls
-// DiscoverKeepServices(), and returns when the client is ready to
-// use.
+// MakeKeepClient creates a new KeepClient, loads default storage
+// classes, calls discoverServices(), and returns when the client is
+// ready to use.
func MakeKeepClient(arv *arvadosclient.ArvadosClient) (*KeepClient, error) {
kc := New(arv)
return kc, kc.discoverServices()
@@ -209,8 +211,8 @@ func New(arv *arvadosclient.ArvadosClient) *KeepClient {
Retries: 2,
}
err = kc.loadDefaultClasses()
- if err != nil {
- DebugPrintf("DEBUG: Unable to load the default storage classes cluster config")
+ if err != nil && arv.Logger != nil {
+ arv.Logger.WithError(err).Debug("unable to load the default storage classes cluster config")
}
return kc
}
@@ -312,7 +314,7 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
req.Header[k] = append([]string(nil), v...)
}
if req.Header.Get("Authorization") == "" {
- req.Header.Set("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+ req.Header.Set("Authorization", "Bearer "+kc.Arvados.ApiToken)
}
if req.Header.Get("X-Request-Id") == "" {
req.Header.Set("X-Request-Id", reqid)
@@ -370,7 +372,9 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
time.Sleep(delay.Next())
}
}
- DebugPrintf("DEBUG: %s %s failed: %v", method, locator, errs)
+ if kc.Arvados.Logger != nil {
+ kc.Arvados.Logger.Debugf("DEBUG: %s %s failed: %v", method, locator, errs)
+ }
var err error
if count404 == numServers {
@@ -418,6 +422,7 @@ func (kc *KeepClient) upstreamGateway() arvados.KeepGateway {
Dir: cachedir,
MaxSize: kc.DiskCacheSize,
KeepGateway: backend,
+ Logger: kc.Arvados.Logger,
}
}
return kc.gatewayStack
@@ -538,7 +543,7 @@ func (kc *KeepClient) GetIndex(keepServiceUUID, prefix string) (io.Reader, error
return nil, err
}
- req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+ req.Header.Add("Authorization", "Bearer "+kc.Arvados.ApiToken)
req.Header.Set("X-Request-Id", kc.getRequestID())
resp, err := kc.httpClient().Do(req)
if err != nil {
@@ -725,6 +730,13 @@ func (kc *KeepClient) getRequestID() string {
return reqIDGen.Next()
}
+func (kc *KeepClient) debugf(format string, args ...interface{}) {
+ if kc.Arvados.Logger == nil {
+ return
+ }
+ kc.Arvados.Logger.Debugf(format, args...)
+}
+
type Locator struct {
Hash string
Size int // -1 if data size is not known
diff --git a/sdk/go/keepclient/keepclient_test.go b/sdk/go/keepclient/keepclient_test.go
index 531db31b25..72d46ce073 100644
--- a/sdk/go/keepclient/keepclient_test.go
+++ b/sdk/go/keepclient/keepclient_test.go
@@ -11,7 +11,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"net"
"net/http"
"os"
@@ -143,7 +142,7 @@ func (sph *StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request
sph.requests = append(sph.requests, req)
sph.mtx.Unlock()
sph.c.Check(req.URL.Path, Equals, "/"+sph.expectPath)
- sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sph.expectAPIToken))
+ sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("Bearer %s", sph.expectAPIToken))
if sph.expectStorageClass != "*" {
sph.c.Check(req.Header.Get("X-Keep-Storage-Classes"), Equals, sph.expectStorageClass)
}
@@ -189,8 +188,6 @@ func UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,
}
func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
- log.Printf("TestUploadToStubKeepServer")
-
st := &StubPutHandler{
c: c,
expectPath: "acbd18db4cc2f85cedef654fccc4a4d8",
@@ -702,7 +699,7 @@ type StubGetHandler struct {
func (sgh StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
sgh.c.Check(req.URL.Path, Equals, "/"+sgh.expectPath)
- sgh.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sgh.expectAPIToken))
+ sgh.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("Bearer %s", sgh.expectAPIToken))
resp.WriteHeader(sgh.httpStatus)
resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(sgh.body)))
resp.Write(sgh.body)
@@ -1380,7 +1377,7 @@ type StubGetIndexHandler struct {
func (h StubGetIndexHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
h.c.Check(req.URL.Path, Equals, h.expectPath)
- h.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", h.expectAPIToken))
+ h.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("Bearer %s", h.expectAPIToken))
resp.WriteHeader(h.httpStatus)
resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(h.body)))
resp.Write(h.body)
diff --git a/sdk/go/keepclient/support.go b/sdk/go/keepclient/support.go
index d3d799dc5d..57d537ddd5 100644
--- a/sdk/go/keepclient/support.go
+++ b/sdk/go/keepclient/support.go
@@ -12,30 +12,16 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"math/rand"
"net/http"
- "os"
"strconv"
"strings"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/asyncbuf"
)
-// DebugPrintf emits debug messages. The easiest way to enable
-// keepclient debug messages in your application is to assign
-// log.Printf to DebugPrintf.
-var DebugPrintf = func(string, ...interface{}) {}
-
-func init() {
- if arvadosclient.StringBool(os.Getenv("ARVADOS_DEBUG")) {
- DebugPrintf = log.Printf
- }
-}
-
type keepService struct {
Uuid string `json:"uuid"`
Hostname string `json:"service_host"`
@@ -70,7 +56,7 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
- DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
+ kc.debugf("[%s] Error creating request: PUT %s error: %s", reqid, url, err)
uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, ""}
return
}
@@ -85,7 +71,7 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
}
req.Header.Add("X-Request-Id", reqid)
- req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+ req.Header.Add("Authorization", "Bearer "+kc.Arvados.ApiToken)
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(kc.Want_replicas))
if len(classesTodo) > 0 {
@@ -94,7 +80,7 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
var resp *http.Response
if resp, err = kc.httpClient().Do(req); err != nil {
- DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
+ kc.debugf("[%s] Upload failed: %s error: %s", reqid, url, err)
uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, err.Error()}
return
}
@@ -106,7 +92,7 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
scc := resp.Header.Get(XKeepStorageClassesConfirmed)
classesStored, err := parseStorageClassesConfirmedHeader(scc)
if err != nil {
- DebugPrintf("DEBUG: [%s] Ignoring invalid %s header %q: %s", reqid, XKeepStorageClassesConfirmed, scc, err)
+ kc.debugf("[%s] Ignoring invalid %s header %q: %s", reqid, XKeepStorageClassesConfirmed, scc, err)
}
defer resp.Body.Close()
@@ -115,16 +101,16 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
- DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
+ kc.debugf("[%s] Upload %s error: %s response: %s", reqid, url, err2, response)
uploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, classesStored, response}
} else if resp.StatusCode == http.StatusOK {
- DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
+ kc.debugf("[%s] Upload %s success", reqid, url)
uploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, classesStored, response}
} else {
if resp.StatusCode >= 300 && response == "" {
response = resp.Status
}
- DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
+ kc.debugf("[%s] Upload %s status: %d %s", reqid, url, resp.StatusCode, response)
uploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, classesStored, response}
}
}
@@ -255,7 +241,7 @@ func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWrite
for active*replicasPerThread < maxConcurrency {
// Start some upload requests
if nextServer < len(sv) {
- DebugPrintf("DEBUG: [%s] Begin upload %s to %s", req.RequestID, req.Hash, sv[nextServer])
+ kc.debugf("[%s] Begin upload %s to %s", req.RequestID, req.Hash, sv[nextServer])
go kc.uploadToKeepServer(sv[nextServer], req.Hash, classesTodo, getReader(), uploadStatusChan, req.DataSize, req.RequestID)
nextServer++
active++
@@ -272,7 +258,7 @@ func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWrite
}
}
- DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v", req.RequestID, replicasTodo, active)
+ kc.debugf("[%s] Replicas remaining to write: %d active uploads: %d", req.RequestID, replicasTodo, active)
if active < 1 {
break
}
diff --git a/sdk/go/manifest/manifest.go b/sdk/go/manifest/manifest.go
deleted file mode 100644
index a597003859..0000000000
--- a/sdk/go/manifest/manifest.go
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-/* Deals with parsing Manifest Text. */
-
-// Inspired by the Manifest class in arvados/sdk/ruby/lib/arvados/keep.rb
-
-package manifest
-
-import (
- "errors"
- "fmt"
- "path"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "git.arvados.org/arvados.git/sdk/go/blockdigest"
-)
-
-var ErrInvalidToken = errors.New("Invalid token")
-
-type Manifest struct {
- Text string
- Err error
-}
-
-type BlockLocator struct {
- Digest blockdigest.BlockDigest
- Size int
- Hints []string
-}
-
-// FileSegment is a portion of a file that is contained within a
-// single block.
-type FileSegment struct {
- Locator string
- // Offset (within this block) of this data segment
- Offset int
- Len int
-}
-
-// FileStreamSegment is a portion of a file described as a segment of a stream.
-type FileStreamSegment struct {
- SegPos uint64
- SegLen uint64
- Name string
-}
-
-// ManifestStream represents a single line from a manifest.
-type ManifestStream struct {
- StreamName string
- Blocks []string
- blockOffsets []uint64
- FileStreamSegments []FileStreamSegment
- Err error
-}
-
-// Array of segments referencing file content
-type segmentedFile []FileSegment
-
-// Map of files to list of file segments referencing file content
-type segmentedStream map[string]segmentedFile
-
-// Map of streams
-type segmentedManifest map[string]segmentedStream
-
-var escapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
-
-func unescapeSeq(seq string) string {
- if seq == `\\` {
- return `\`
- }
- i, err := strconv.ParseUint(seq[1:], 8, 8)
- if err != nil {
- // Invalid escape sequence: can't unescape.
- return seq
- }
- return string([]byte{byte(i)})
-}
-
-func EscapeName(s string) string {
- raw := []byte(s)
- escaped := make([]byte, 0, len(s))
- for _, c := range raw {
- if c <= 32 {
- oct := fmt.Sprintf("\\%03o", c)
- escaped = append(escaped, []byte(oct)...)
- } else {
- escaped = append(escaped, c)
- }
- }
- return string(escaped)
-}
-
-func UnescapeName(s string) string {
- return escapeSeq.ReplaceAllStringFunc(s, unescapeSeq)
-}
-
-func ParseBlockLocator(s string) (b BlockLocator, err error) {
- if !blockdigest.LocatorPattern.MatchString(s) {
- err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
- "\"%s\".",
- s,
- blockdigest.LocatorPattern.String())
- } else {
- tokens := strings.Split(s, "+")
- var blockSize int64
- var blockDigest blockdigest.BlockDigest
- // We expect both of the following to succeed since LocatorPattern
- // restricts the strings appropriately.
- blockDigest, err = blockdigest.FromString(tokens[0])
- if err != nil {
- return
- }
- blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
- if err != nil {
- return
- }
- b.Digest = blockDigest
- b.Size = int(blockSize)
- b.Hints = tokens[2:]
- }
- return
-}
-
-func parseFileStreamSegment(tok string) (ft FileStreamSegment, err error) {
- parts := strings.SplitN(tok, ":", 3)
- if len(parts) != 3 {
- err = ErrInvalidToken
- return
- }
- ft.SegPos, err = strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return
- }
- ft.SegLen, err = strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return
- }
- ft.Name = UnescapeName(parts[2])
- return
-}
-
-func (s *ManifestStream) FileSegmentIterByName(filepath string) <-chan *FileSegment {
- ch := make(chan *FileSegment, 64)
- go func() {
- s.sendFileSegmentIterByName(filepath, ch)
- close(ch)
- }()
- return ch
-}
-
-func firstBlock(offsets []uint64, rangeStart uint64) int {
- // rangeStart/blockStart is the inclusive lower bound
- // rangeEnd/blockEnd is the exclusive upper bound
-
- hi := len(offsets) - 1
- var lo int
- i := ((hi + lo) / 2)
- blockStart := offsets[i]
- blockEnd := offsets[i+1]
-
- // perform a binary search for the first block
- // assumes that all of the blocks are contiguous, so rangeStart is guaranteed
- // to either fall into the range of a block or be outside the block range entirely
- for !(rangeStart >= blockStart && rangeStart < blockEnd) {
- if lo == i {
- // must be out of range, fail
- return -1
- }
- if rangeStart > blockStart {
- lo = i
- } else {
- hi = i
- }
- i = ((hi + lo) / 2)
- blockStart = offsets[i]
- blockEnd = offsets[i+1]
- }
- return i
-}
-
-func (s *ManifestStream) sendFileSegmentIterByName(filepath string, ch chan<- *FileSegment) {
- // This is what streamName+"/"+fileName will look like:
- target := fixStreamName(filepath)
- for _, fTok := range s.FileStreamSegments {
- wantPos := fTok.SegPos
- wantLen := fTok.SegLen
- name := fTok.Name
-
- if s.StreamName+"/"+name != target {
- continue
- }
- if wantLen == 0 {
- ch <- &FileSegment{Locator: "d41d8cd98f00b204e9800998ecf8427e+0", Offset: 0, Len: 0}
- continue
- }
-
- // Binary search to determine first block in the stream
- i := firstBlock(s.blockOffsets, wantPos)
- if i == -1 {
- // Shouldn't happen, file segments are checked in parseManifestStream
- panic(fmt.Sprintf("File segment %v extends past end of stream", fTok))
- }
- for ; i < len(s.Blocks); i++ {
- blockPos := s.blockOffsets[i]
- blockEnd := s.blockOffsets[i+1]
- if blockEnd <= wantPos {
- // Shouldn't happen, FirstBlock() should start
- // us on the right block, so if this triggers
- // that means there is a bug.
- panic(fmt.Sprintf("Block end %v comes before start of file segment %v", blockEnd, wantPos))
- }
- if blockPos >= wantPos+wantLen {
- // current block comes after current file span
- break
- }
-
- fseg := FileSegment{
- Locator: s.Blocks[i],
- Offset: 0,
- Len: int(blockEnd - blockPos),
- }
- if blockPos < wantPos {
- fseg.Offset = int(wantPos - blockPos)
- fseg.Len -= fseg.Offset
- }
- if blockEnd > wantPos+wantLen {
- fseg.Len = int(wantPos+wantLen-blockPos) - fseg.Offset
- }
- ch <- &fseg
- }
- }
-}
-
-func parseManifestStream(s string) (m ManifestStream) {
- tokens := strings.Split(s, " ")
-
- m.StreamName = UnescapeName(tokens[0])
- if m.StreamName != "." && !strings.HasPrefix(m.StreamName, "./") {
- m.Err = fmt.Errorf("Invalid stream name: %s", m.StreamName)
- return
- }
-
- tokens = tokens[1:]
- var i int
- for i = 0; i < len(tokens); i++ {
- if !blockdigest.IsBlockLocator(tokens[i]) {
- break
- }
- }
- m.Blocks = tokens[:i]
- fileTokens := tokens[i:]
-
- if len(m.Blocks) == 0 {
- m.Err = fmt.Errorf("No block locators found")
- return
- }
-
- m.blockOffsets = make([]uint64, len(m.Blocks)+1)
- var streamoffset uint64
- for i, b := range m.Blocks {
- bl, err := ParseBlockLocator(b)
- if err != nil {
- m.Err = err
- return
- }
- m.blockOffsets[i] = streamoffset
- streamoffset += uint64(bl.Size)
- }
- m.blockOffsets[len(m.Blocks)] = streamoffset
-
- if len(fileTokens) == 0 {
- m.Err = fmt.Errorf("No file tokens found")
- return
- }
-
- for _, ft := range fileTokens {
- pft, err := parseFileStreamSegment(ft)
- if err != nil {
- m.Err = fmt.Errorf("Invalid file token: %s", ft)
- break
- }
- if pft.SegPos+pft.SegLen > streamoffset {
- m.Err = fmt.Errorf("File segment %s extends past end of stream %d", ft, streamoffset)
- break
- }
- m.FileStreamSegments = append(m.FileStreamSegments, pft)
- }
-
- return
-}
-
-func fixStreamName(sn string) string {
- sn = path.Clean(sn)
- if strings.HasPrefix(sn, "/") {
- sn = "." + sn
- } else if sn != "." {
- sn = "./" + sn
- }
- return sn
-}
-
-func splitPath(srcpath string) (streamname, filename string) {
- pathIdx := strings.LastIndex(srcpath, "/")
- if pathIdx >= 0 {
- streamname = srcpath[0:pathIdx]
- filename = srcpath[pathIdx+1:]
- } else {
- streamname = srcpath
- filename = ""
- }
- return
-}
-
-func (m *Manifest) segment() (*segmentedManifest, error) {
- files := make(segmentedManifest)
-
- for stream := range m.StreamIter() {
- if stream.Err != nil {
- // Stream has an error
- return nil, stream.Err
- }
- currentStreamfiles := make(map[string]bool)
- for _, f := range stream.FileStreamSegments {
- sn := stream.StreamName
- if strings.HasSuffix(sn, "/") {
- sn = sn[0 : len(sn)-1]
- }
- path := sn + "/" + f.Name
- streamname, filename := splitPath(path)
- if files[streamname] == nil {
- files[streamname] = make(segmentedStream)
- }
- if !currentStreamfiles[path] {
- segs := files[streamname][filename]
- for seg := range stream.FileSegmentIterByName(path) {
- if seg.Len > 0 {
- segs = append(segs, *seg)
- }
- }
- files[streamname][filename] = segs
- currentStreamfiles[path] = true
- }
- }
- }
-
- return &files, nil
-}
-
-func (stream segmentedStream) normalizedText(name string) string {
- var sortedfiles []string
- for k := range stream {
- sortedfiles = append(sortedfiles, k)
- }
- sort.Strings(sortedfiles)
-
- streamTokens := []string{EscapeName(name)}
-
- blocks := make(map[blockdigest.BlockDigest]int64)
- var streamoffset int64
-
- // Go through each file and add each referenced block exactly once.
- for _, streamfile := range sortedfiles {
- for _, segment := range stream[streamfile] {
- b, _ := ParseBlockLocator(segment.Locator)
- if _, ok := blocks[b.Digest]; !ok {
- streamTokens = append(streamTokens, segment.Locator)
- blocks[b.Digest] = streamoffset
- streamoffset += int64(b.Size)
- }
- }
- }
-
- if len(streamTokens) == 1 {
- streamTokens = append(streamTokens, "d41d8cd98f00b204e9800998ecf8427e+0")
- }
-
- for _, streamfile := range sortedfiles {
- // Add in file segments
- spanStart := int64(-1)
- spanEnd := int64(0)
- fout := EscapeName(streamfile)
- for _, segment := range stream[streamfile] {
- // Collapse adjacent segments
- b, _ := ParseBlockLocator(segment.Locator)
- streamoffset = blocks[b.Digest] + int64(segment.Offset)
- if spanStart == -1 {
- spanStart = streamoffset
- spanEnd = streamoffset + int64(segment.Len)
- } else {
- if streamoffset == spanEnd {
- spanEnd += int64(segment.Len)
- } else {
- streamTokens = append(streamTokens, fmt.Sprintf("%d:%d:%s", spanStart, spanEnd-spanStart, fout))
- spanStart = streamoffset
- spanEnd = streamoffset + int64(segment.Len)
- }
- }
- }
-
- if spanStart != -1 {
- streamTokens = append(streamTokens, fmt.Sprintf("%d:%d:%s", spanStart, spanEnd-spanStart, fout))
- }
-
- if len(stream[streamfile]) == 0 {
- streamTokens = append(streamTokens, fmt.Sprintf("0:0:%s", fout))
- }
- }
-
- return strings.Join(streamTokens, " ") + "\n"
-}
-
-func (m segmentedManifest) manifestTextForPath(srcpath, relocate string) string {
- srcpath = fixStreamName(srcpath)
-
- var suffix string
- if strings.HasSuffix(relocate, "/") {
- suffix = "/"
- }
- relocate = fixStreamName(relocate) + suffix
-
- streamname, filename := splitPath(srcpath)
-
- if stream, ok := m[streamname]; ok {
- // check if it refers to a single file in a stream
- filesegs, okfile := stream[filename]
- if okfile {
- newstream := make(segmentedStream)
- relocateStream, relocateFilename := splitPath(relocate)
- if relocateFilename == "" {
- relocateFilename = filename
- }
- newstream[relocateFilename] = filesegs
- return newstream.normalizedText(relocateStream)
- }
- }
-
- // Going to extract multiple streams
- prefix := srcpath + "/"
-
- if strings.HasSuffix(relocate, "/") {
- relocate = relocate[0 : len(relocate)-1]
- }
-
- var sortedstreams []string
- for k := range m {
- sortedstreams = append(sortedstreams, k)
- }
- sort.Strings(sortedstreams)
-
- manifest := ""
- for _, k := range sortedstreams {
- if strings.HasPrefix(k, prefix) || k == srcpath {
- manifest += m[k].normalizedText(relocate + k[len(srcpath):])
- }
- }
- return manifest
-}
-
-// Extract extracts some or all of the manifest and returns the extracted
-// portion as a normalized manifest. This is a swiss army knife function that
-// can be several ways:
-//
-// If 'srcpath' and 'relocate' are '.' it simply returns an equivalent manifest
-// in normalized form.
-//
-// Extract(".", ".") // return entire normalized manfest text
-//
-// If 'srcpath' points to a single file, it will return manifest text for just that file.
-// The value of "relocate" is can be used to rename the file or set the file stream.
-//
-// Extract("./foo", ".") // extract file "foo" and put it in stream "."
-// Extract("./foo", "./bar") // extract file "foo", rename it to "bar" in stream "."
-// Extract("./foo", "./bar/") // extract file "foo", rename it to "./bar/foo"
-// Extract("./foo", "./bar/baz") // extract file "foo", rename it to "./bar/baz")
-//
-// Otherwise it will return the manifest text for all streams with the prefix in "srcpath" and place
-// them under the path in "relocate".
-//
-// Extract("./stream", ".") // extract "./stream" to "." and "./stream/subdir" to "./subdir")
-// Extract("./stream", "./bar") // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
-func (m Manifest) Extract(srcpath, relocate string) (ret Manifest) {
- segmented, err := m.segment()
- if err != nil {
- ret.Err = err
- return
- }
- ret.Text = segmented.manifestTextForPath(srcpath, relocate)
- return
-}
-
-func (m *Manifest) StreamIter() <-chan ManifestStream {
- ch := make(chan ManifestStream)
- go func(input string) {
- // This slice holds the current line and the remainder of the
- // manifest. We parse one line at a time, to save effort if we
- // only need the first few lines.
- lines := []string{"", input}
- for {
- lines = strings.SplitN(lines[1], "\n", 2)
- if len(lines[0]) > 0 {
- // Only parse non-blank lines
- ch <- parseManifestStream(lines[0])
- }
- if len(lines) == 1 {
- break
- }
- }
- close(ch)
- }(m.Text)
- return ch
-}
-
-func (m *Manifest) FileSegmentIterByName(filepath string) <-chan *FileSegment {
- ch := make(chan *FileSegment, 64)
- filepath = fixStreamName(filepath)
- go func() {
- for stream := range m.StreamIter() {
- if !strings.HasPrefix(filepath, stream.StreamName+"/") {
- continue
- }
- stream.sendFileSegmentIterByName(filepath, ch)
- }
- close(ch)
- }()
- return ch
-}
-
-// BlockIterWithDuplicates iterates over the block locators of a manifest.
-//
-// Blocks may appear multiple times within the same manifest if they
-// are used by multiple files. In that case this Iterator will output
-// the same block multiple times.
-//
-// In order to detect parse errors, caller must check m.Err after the returned channel closes.
-func (m *Manifest) BlockIterWithDuplicates() <-chan blockdigest.BlockLocator {
- blockChannel := make(chan blockdigest.BlockLocator)
- go func(streamChannel <-chan ManifestStream) {
- for ms := range streamChannel {
- if ms.Err != nil {
- m.Err = ms.Err
- continue
- }
- for _, block := range ms.Blocks {
- if b, err := blockdigest.ParseBlockLocator(block); err == nil {
- blockChannel <- b
- } else {
- m.Err = err
- }
- }
- }
- close(blockChannel)
- }(m.StreamIter())
- return blockChannel
-}
diff --git a/sdk/go/manifest/manifest_test.go b/sdk/go/manifest/manifest_test.go
deleted file mode 100644
index 090ead94bd..0000000000
--- a/sdk/go/manifest/manifest_test.go
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package manifest
-
-import (
- "fmt"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/blockdigest"
- "io/ioutil"
- "reflect"
- "regexp"
- "runtime"
- "testing"
-)
-
-func getStackTrace() string {
- buf := make([]byte, 1000)
- bytesWritten := runtime.Stack(buf, false)
- return "Stack Trace:\n" + string(buf[:bytesWritten])
-}
-
-func expectFromChannel(t *testing.T, c <-chan string, expected string) {
- actual, ok := <-c
- if !ok {
- t.Fatalf("Expected to receive %s but channel was closed. %s",
- expected,
- getStackTrace())
- }
- if actual != expected {
- t.Fatalf("Expected %s but got %s instead. %s",
- expected,
- actual,
- getStackTrace())
- }
-}
-
-func expectChannelClosed(t *testing.T, c <-chan interface{}) {
- received, ok := <-c
- if ok {
- t.Fatalf("Expected channel to be closed, but received %v instead. %s",
- received,
- getStackTrace())
- }
-}
-
-func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
- if actual != expected {
- t.Fatalf("Expected %v but received %v instead. %s",
- expected,
- actual,
- getStackTrace())
- }
-}
-
-func expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {
- if len(actual) != len(expected) {
- t.Fatalf("Expected %v (length %d), but received %v (length %d) instead. %s", expected, len(expected), actual, len(actual), getStackTrace())
- }
- for i := range actual {
- if actual[i] != expected[i] {
- t.Fatalf("Expected %v but received %v instead (first disagreement at position %d). %s", expected, actual, i, getStackTrace())
- }
- }
-}
-
-func expectFileStreamSegmentsEqual(t *testing.T, actual []FileStreamSegment, expected []FileStreamSegment) {
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Expected %v but received %v instead. %s", expected, actual, getStackTrace())
- }
-}
-
-func expectManifestStream(t *testing.T, actual ManifestStream, expected ManifestStream) {
- expectEqual(t, actual.StreamName, expected.StreamName)
- expectStringSlicesEqual(t, actual.Blocks, expected.Blocks)
- expectFileStreamSegmentsEqual(t, actual.FileStreamSegments, expected.FileStreamSegments)
-}
-
-func expectBlockLocator(t *testing.T, actual blockdigest.BlockLocator, expected blockdigest.BlockLocator) {
- expectEqual(t, actual.Digest, expected.Digest)
- expectEqual(t, actual.Size, expected.Size)
- expectStringSlicesEqual(t, actual.Hints, expected.Hints)
-}
-
-func TestParseManifestStreamSimple(t *testing.T) {
- m := parseManifestStream(". 365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf 0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt")
- expectManifestStream(t, m, ManifestStream{StreamName: ".",
- Blocks: []string{"365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"},
- FileStreamSegments: []FileStreamSegment{{0, 2310, "qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt"}}})
-}
-
-func TestParseBlockLocatorSimple(t *testing.T) {
- b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
- if err != nil {
- t.Fatalf("Unexpected error parsing block locator: %v", err)
- }
- d, err := blockdigest.FromString("365f83f5f808896ec834c8b595288735")
- if err != nil {
- t.Fatalf("Unexpected error during FromString for block locator: %v", err)
- }
- expectBlockLocator(t, blockdigest.BlockLocator{b.Digest, b.Size, b.Hints},
- blockdigest.BlockLocator{Digest: d,
- Size: 2310,
- Hints: []string{"K@qr1hi",
- "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
-}
-
-func TestStreamIterShortManifestWithBlankStreams(t *testing.T) {
- content, err := ioutil.ReadFile("testdata/short_manifest")
- if err != nil {
- t.Fatalf("Unexpected error reading manifest from file: %v", err)
- }
- manifest := Manifest{Text: string(content)}
- streamIter := manifest.StreamIter()
-
- firstStream := <-streamIter
- expectManifestStream(t,
- firstStream,
- ManifestStream{StreamName: ".",
- Blocks: []string{"b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c"},
- FileStreamSegments: []FileStreamSegment{{0, 15693477, "chr10_band0_s0_e3000000.fj"}}})
-
- received, ok := <-streamIter
- if ok {
- t.Fatalf("Expected streamIter to be closed, but received %v instead.",
- received)
- }
-}
-
-func TestBlockIterLongManifest(t *testing.T) {
- content, err := ioutil.ReadFile("testdata/long_manifest")
- if err != nil {
- t.Fatalf("Unexpected error reading manifest from file: %v", err)
- }
- manifest := Manifest{Text: string(content)}
- blockChannel := manifest.BlockIterWithDuplicates()
-
- firstBlock := <-blockChannel
- d, err := blockdigest.FromString("b746e3d2104645f2f64cd3cc69dd895d")
- if err != nil {
- t.Fatalf("Unexpected error during FromString for block: %v", err)
- }
- expectBlockLocator(t,
- firstBlock,
- blockdigest.BlockLocator{Digest: d,
- Size: 15693477,
- Hints: []string{"E2866e643690156651c03d876e638e674dcd79475@5441920c"}})
- blocksRead := 1
- var lastBlock blockdigest.BlockLocator
- for lastBlock = range blockChannel {
- blocksRead++
- }
- expectEqual(t, blocksRead, 853)
-
- d, err = blockdigest.FromString("f9ce82f59e5908d2d70e18df9679b469")
- if err != nil {
- t.Fatalf("Unexpected error during FromString for block: %v", err)
- }
- expectBlockLocator(t,
- lastBlock,
- blockdigest.BlockLocator{Digest: d,
- Size: 31367794,
- Hints: []string{"E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c"}})
-}
-
-func TestUnescape(t *testing.T) {
- for _, testCase := range [][]string{
- {`\040`, ` `},
- {`\009`, `\009`},
- {`\\\040\\`, `\ \`},
- {`\\040\`, `\040\`},
- } {
- in := testCase[0]
- expect := testCase[1]
- got := UnescapeName(in)
- if expect != got {
- t.Errorf("For '%s' got '%s' instead of '%s'", in, got, expect)
- }
- }
-}
-
-type fsegtest struct {
- mt string // manifest text
- f string // filename
- want []FileSegment // segments should be received on channel
-}
-
-func TestFileSegmentIterByName(t *testing.T) {
- mt := arvadostest.PathologicalManifest
- for _, testCase := range []fsegtest{
- {mt: mt, f: "zzzz", want: nil},
- // This case is too sensitive: it would be acceptable
- // (even preferable) to return only one empty segment.
- {mt: mt, f: "foo/zero", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}, {"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
- {mt: mt, f: "zero@0", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
- {mt: mt, f: "zero@1", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
- {mt: mt, f: "zero@4", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
- {mt: mt, f: "zero@9", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
- {mt: mt, f: "f", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}}},
- {mt: mt, f: "ooba", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}, {"37b51d194a7513e45b56f6524f2d51f2+3", 0, 2}}},
- {mt: mt, f: "overlapReverse/o", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 2, 1}}},
- {mt: mt, f: "overlapReverse/oo", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}}},
- {mt: mt, f: "overlapReverse/ofoo", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 2, 1}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 3}}},
- {mt: mt, f: "foo bar/baz", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 3}}},
- // This case is too sensitive: it would be better to
- // omit the empty segment.
- {mt: mt, f: "segmented/frob", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}, {"37b51d194a7513e45b56f6524f2d51f2+3", 2, 1}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 1}, {"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}, {"37b51d194a7513e45b56f6524f2d51f2+3", 0, 1}}},
- {mt: mt, f: "segmented/oof", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}}},
- } {
- m := Manifest{Text: testCase.mt}
- var got []FileSegment
- for fs := range m.FileSegmentIterByName(testCase.f) {
- got = append(got, *fs)
- }
- if !reflect.DeepEqual(got, testCase.want) {
- t.Errorf("For %#v:\n got %#v\n want %#v", testCase.f, got, testCase.want)
- }
- }
-}
-
-func TestBlockIterWithBadManifest(t *testing.T) {
- testCases := [][]string{
- {"badstream acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt", "Invalid stream name: badstream"},
- {"/badstream acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt", "Invalid stream name: /badstream"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 file1.txt", "Invalid file token: file1.txt"},
- {". acbd18db4cc2f85cedef654fccc4a4+3 0:1:file1.txt", "No block locators found"},
- {". acbd18db4cc2f85cedef654fccc4a4d8 0:1:file1.txt", "No block locators found"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt file2.txt 1:2:file3.txt", "Invalid file token: file2.txt"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt. bcde18db4cc2f85cedef654fccc4a4d8+3 1:2:file3.txt", "Invalid file token: bcde18db4cc2f85cedef654fccc4a4d8.*"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt\n. acbd18db4cc2f85cedef654fccc4a4d8+3 ::file2.txt\n", "Invalid file token: ::file2.txt"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 bcde18db4cc2f85cedef654fccc4a4d8+3\n", "No file tokens found"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3 ", "Invalid file token"},
- {". acbd18db4cc2f85cedef654fccc4a4d8+3", "No file tokens found"},
- {". 0:1:file1.txt\n", "No block locators found"},
- {".\n", "No block locators found"},
- }
-
- for _, testCase := range testCases {
- manifest := Manifest{Text: string(testCase[0])}
- blockChannel := manifest.BlockIterWithDuplicates()
-
- for block := range blockChannel {
- _ = block
- }
-
- // completed reading from blockChannel; now check for errors
- if manifest.Err == nil {
- t.Fatalf("Expected error")
- }
-
- matched, _ := regexp.MatchString(testCase[1], manifest.Err.Error())
- if !matched {
- t.Fatalf("Expected error not found. Expected: %v; Found: %v", testCase[1], manifest.Err.Error())
- }
- }
-}
-
-func TestNormalizeManifest(t *testing.T) {
- m1 := Manifest{Text: `. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
-. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
-. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
-`}
- expectEqual(t, m1.Extract(".", ".").Text,
- `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt
-`)
-
- m2 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
-`}
- expectEqual(t, m2.Extract(".", ".").Text, m2.Text)
-
- m3 := Manifest{Text: `. 5348b82a029fd9e971a811ce1f71360b+43 3:40:md5sum.txt
-. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
-. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
-`}
- expectEqual(t, m3.Extract(".", ".").Text, `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:md5sum.txt
-`)
- expectEqual(t, m3.Extract("/md5sum.txt", "/wiggle.txt").Text, `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:wiggle.txt
-`)
-
- m4 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-./foo 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`}
-
- expectEqual(t, m4.Extract(".", ".").Text,
- `./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-`)
-
- expectEqual(t, m4.Extract("./foo", ".").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
- expectEqual(t, m4.Extract("./foo", "./baz").Text, "./baz 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
- expectEqual(t, m4.Extract("./foo/bar", ".").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
- expectEqual(t, m4.Extract("./foo/bar", "./baz").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:baz 67108864:3:baz\n")
- expectEqual(t, m4.Extract("./foo/bar", "./quux/").Text, "./quux 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
- expectEqual(t, m4.Extract("./foo/bar", "./quux/baz").Text, "./quux 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:baz 67108864:3:baz\n")
- expectEqual(t, m4.Extract(".", ".").Text, `./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-`)
- expectEqual(t, m4.Extract(".", "./zip").Text, `./zip/foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
-./zip/zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-`)
-
- expectEqual(t, m4.Extract("foo/.//bar/../../zzz/", "/waz/").Text, `./waz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-`)
-
- m5 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-./foo 204e43b8a1185621ca55a94839582e6f+67108864 3:3:bar
-`}
- expectEqual(t, m5.Extract(".", ".").Text,
- `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:6:bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-`)
-
- m8 := Manifest{Text: `./a\040b\040c 59ca0efa9f5633cb0371bbc0355478d8+13 0:13:hello\040world.txt
-`}
- expectEqual(t, m8.Extract(".", ".").Text, m8.Text)
-
- m9 := Manifest{Text: ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:10:one 20:10:two 10:10:one 30:10:two\n"}
- expectEqual(t, m9.Extract("", "").Text, ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:20:one 20:20:two\n")
-
- m10 := Manifest{Text: ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:10:one 20:10:two 10:10:one 30:10:two\n"}
- expectEqual(t, m10.Extract("./two", "./three").Text, ". acbd18db4cc2f85cedef654fccc4a4d8+40 20:20:three\n")
-
- m11 := Manifest{Text: arvadostest.PathologicalManifest}
- expectEqual(t, m11.Extract(".", ".").Text, `. acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:1:f 1:4:ooba 5:1:r 5:4:rbaz 0:0:zero@0 0:0:zero@1 0:0:zero@4 0:0:zero@9
-./foo acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo 0:3:foo 0:0:zero
-./foo\040bar acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:baz 0:3:baz\040waz
-./overlapReverse acbd18db4cc2f85cedef654fccc4a4d8+3 2:1:o 2:1:ofoo 0:3:ofoo 1:2:oo
-./segmented acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:1:frob 5:1:frob 1:1:frob 3:1:frob 1:2:oof 0:1:oof
-`)
-
- m12 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
-./foo/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`}
-
- expectEqual(t, m12.Extract("./foo", ".").Text, `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-./baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`)
- expectEqual(t, m12.Extract("./foo", "./blub").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`)
- expectEqual(t, m12.Extract("./foo", "./blub/").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`)
- expectEqual(t, m12.Extract("./foo/", "./blub/").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
-`)
-
- m13 := Manifest{Text: `foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
-`}
-
- expectEqual(t, m13.Extract(".", ".").Text, ``)
- expectEqual(t, m13.Extract(".", ".").Err.Error(), "Invalid stream name: foo")
-
- m14 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 67108863:3:bar
-`}
-
- expectEqual(t, m14.Extract(".", ".").Text, ``)
- expectEqual(t, m14.Extract(".", ".").Err.Error(), "File segment 67108863:3:bar extends past end of stream 67108864")
-
- m15 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3bar
-`}
-
- expectEqual(t, m15.Extract(".", ".").Text, ``)
- expectEqual(t, m15.Extract(".", ".").Err.Error(), "Invalid file token: 0:3bar")
-}
-
-func TestFirstBlock(t *testing.T) {
- fmt.Println("ZZZ")
- expectEqual(t, firstBlock([]uint64{1, 2, 3, 4}, 3), 2)
- expectEqual(t, firstBlock([]uint64{1, 2, 3, 4, 5, 6}, 4), 3)
-}
diff --git a/sdk/go/manifest/testdata/long_manifest b/sdk/go/manifest/testdata/long_manifest
deleted file mode 100644
index a7949e6724..0000000000
--- a/sdk/go/manifest/testdata/long_manifest
+++ /dev/null
@@ -1 +0,0 @@
-. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 109cd35b4d3f83266b63fb46c6943454+6770629+Ed0c0561b669237162996223b813b811d248ff9b0@5441920c 1455890e7b56831edff40738856e4194+15962669+Ec298b770d14205b5185d0e2b016ddd940c745446@5441920c 8c87f1c69c6f302c8c05e7d0e740d233+16342794+Ec432f4c24e63b840c1f12976b9edf396d70b8f67@5441920c 451cfce8c67bf92b67b5c6190d45d4f5+5067634+E406821d6ceb1d16ec638e66b7603c69f3482d895@5441920c f963d174978dc966910be6240e8602c7+4264756+E00241238e18635fdb583dd0c6d6561b672996467@5441920c 33be2d8cdd100eec6e842f644556d031+16665404+E6c773004b8296523014b9d23ed066ec72387485e@5441920c 6db13c2df6342b52d72df469c065b675+13536792+E6011e6057857f68d9b1b486571f239614b0707be@5441920c fb7ccc93e86187c519f6716c26474cb3+13714429+Ec4677bfcbe8689621d1b2d4f1bdce5b52f379f98@5441920c 972f24d216684646dfb9e266b7166f63+44743112+E1706fe89133bcd3625cc88de1035681c2d179770@5441920c 16f8df1595811cf9823c30254e6d58e6+17555223+E0febd567bf630b656dcfef01e90d3878c66eed36@5441920c d25b29289e6632728bf485eff6dde9c5+4366647+E7071644d29dd00be350e2e6fb7496346555fb4e9@5441920c 11dffe40608763462b5d89d5ccf33779+32161952+E7f110261b4b0d628396ff782f86966c17569c249@5441920c 0d36936536e85c28c233c6dfb856863b+22400265+Eee3966f1088f96d4fde6e4ec6b9b85cd65ff0c56@5441920c 03f293686e7c22b852b1f94b3490d781+14026139+Ef27fdfb40d6f9bd7bf8f639bcb2608365e002761@5441920c 185863e4c8fb666bc67b5b6666067094+22042495+Ee1164ffe4bffb0c2f29e1767688fbc468b326007@5441920c 4c7368ed41d2266df698176d0483e0be+31053569+E527d607c348f45ede4d8d6340f6079dd044c554d@5441920c ef75be5688e570564269596833866420+7357223+Eb27e68b0dc1674c515646c79280269779f2fb9ed@5441920c cc178064be26076266896d7b9bd91363+17709624+Ed64b0f5e023578cc2d23de434de9ec95debf6c4c@5441920c 5721f0964f9fb339066c176ce6d819c4+6146416+E5df3e33404b589fd4f2f827b86200fe3507c669b@5441920c 53df2cf91db94f57e7d67e4bc568d102+14669912+E64ddcf065630c72e281d0760fe11475b11614382@5441920c 3b045d987f9e1d03d9f3764223394f7f+11964610+E667868e60686bb6fc49609f2d61cb5b4e990dc4c@5441920c 1b83050279df8c6bfd2d7f675ecc6cc0+14904735+E91b1576015021d4debb5465dc449037bed0efc60@5441920c 16c366b5e44bd6d3f01600776e65076b+13400037+E6ded42f36469b5996e60c3415094d93b98d58d17@5441920c 6e7c59c345714f8d20176086c53d128f+5665774+Ef4c5716bb8c535d1335886f4ed8792e28829f531@5441920c 47c20b212e917be6923d6040053b6199+9646905+E875b5786fe08f40d5655ec0731368085d2059fe7@5441920c 6d56fc2964ee717fb3d168e0433652e5+4640161+E59be5ce3d0188761859f8f723bdfbf6f6cfc58b6@5441920c b62899c71fbf5ee6b3c59777480393b1+32455363+E2bfbdc56d6b66b7709f99466e733c1389cd8c952@5441920c 5c0390fc6f76631ec906792975d36d09+15940309+E0671c8fd6b2d8e05827cf400b6e6f7be76955dbf@5441920c 19be066d6bb9de09cb171c92efb62613+22466671+E2230614c0ccc69fd2669ce65738de68dbff3c867@5441920c 4c8396101d3fc596400d63121db853d0+13741614+Ecf2839221feb3d070b074fb1500544572dc5256b@5441920c cd29406297ffb7f637c058efbf305236+7619567+Ec063b1c180b6dfef7462c65dc2c7fc34b5756598@5441920c f68b644c6c02d36658e6f006f07b8ff0+23222064+E67594b67317452786c664f26808697d343d3316c@5441920c 42f58fb009502ec82e1d5cc076e79e4c+29666907+E2e27c6bef691333b19269570bc175be262e7b2ec@5441920c 384e1e7642d928660bc90950570071b7+16511641+E44951c3c7b111f06d566b686fc78dc430744549e@5441920c e200de735365bd89d42e70b469023076+26095352+Ef9566086c4526e88e4694b55cbeb2ed3d229198d@5441920c e809638508b9c667f7fbd2fde654c4b7+26536426+Eedb7bd609b7d22df73bc5b6031663824ff106f5f@5441920c c6e13cc51e2354c0346d4564c1b22138+5595242+Ef4eb609230d6644f1d8626e186f95f9b784186e3@5441920c fc6e075d862372e6dd4d438f0c339647+524636+E28e5d58c5feed7ef5e11869e16b00666424f3963@5441920c 654066ef6cd1b9ec3010d864800dd1c8+20166756+E655b286e729e5cb164646314031f45628c914761@5441920c dfe8df7f1f6d8f37667f275fb0f16fe4+10195576+Ec7b5272532230b29ce176629dbe6c9098f482062@5441920c 0b3e18ed791e551bbde5653487cd9e0c+26057104+E95309d4ec6c56d6490946103224e8e6d35622e12@5441920c 9f453ed53b8be18d3538b9564c9d6e2f+14129943+Ede61011c6d265c59417889db12301c712ef6e375@5441920c fd919cb4313d5c4d3e6d36ddecb39d9f+27262406+Ee7dcc78b62b26b179f6cd05bb6c56b6d932f01f8@5441920c 2371986d9b195513d56d7d8b6888fd13+11366564+E487076c1c0dbbfe05439e9b7506b3d79dff8e3d7@5441920c 19cc39fb80e4cf65dd9c36888261bf6c+4264756+E5d56331cc97d68d9cd7d1f942b04be3fd808c640@5441920c 622c38578f1913e0d1ce5db993821c89+6746610+E95f98718306714835df471b43393f45e27ddd9b9@5441920c 3836977b216b56d36b456fc07bd53664+21620366+Ed358c40e313e1cc97d3692eec180e45684dc21e5@5441920c 738636b97bc221e7d028bdb06347dc16+9166469+E76e010db792235b2fe1f56f26037638570191f5d@5441920c 56605f61b621650d3df04831649d2588+6326193+E1d9d0567e8fcb93990f7c4365f92742983e6f69c@5441920c 2125e15df79813c69497ef6c0f0f3c6c+12757371+E30cbe534f649db7301496eb203711dd9eb3e9ee9@5441920c c61de805f19928e6561c96f511fedbb4+12157116+E756df376e5bcc65319d062bd10685df117957004@5441920c e32dc879179c2d507bb75ebd015d4d26+10261919+E2250d07188228888c8052e774d68e2918f6c4c2e@5441920c 6d2d0e3b6984940858e36864d571eb96+40669605+E2bd8434ddf794691166b1556e47ef8f7b636c920@5441920c 65603431e7ded48b401b866d4c8d1d93+24190274+Ed2c84b40dde45d8b4df9c696651c4d8cbe02e019@5441920c 1228e02f7cbf807d8ed8b1823fe779b3+10020619+Eef06c59626f88b5dc9b741f777841845549d956d@5441920c 7367b338b16c64312146e65701605876+44636330+Ee6d463f6d719b0f684b7c8911f9cdcf6c272fec5@5441920c cd8d61ee8e4e2ce0717396093b6f39eb+13920977+Eb6c4f61e78b10c045b0dfd82d9635e45b6b01b5f@5441920c 28079dc5488123e5f9f3dcd323b7b560+22369141+E077f18b49d62e4d88ccc78dcc0008e4021d7342b@5441920c 56bf3c8e6c6064f6cb91600d29155b2b+22616366+E920d258e698cd2e7e66d9f78de12c87f62d472d1@5441920c 49f686994d4cb5967d19641e284733c6+26439412+E9dcd733412c06841ded126efdb30542c4f932587@5441920c 1ef6646ce8917186e1752eb65d26856c+4173314+Ed60dc1dc4b9ed74166619d66109f6eb546c86342@5441920c b24076cf2d292b60e6f8634e92b95db9+39664156+Edf615c5203845de38c846c2620560664ee6cb083@5441920c 576e06066d91f6ecb6f9b135926e271c+11123032+E9d147b4b89c947956f0c99b36c98f7026c2d6b05@5441920c 7642676de1dccb14cc2617522f27eb4e+10756630+E55cb4ed690976381c9f60e2666641c16f7cf5dc2@5441920c 77580fe91cd86342165fb0b3115ecc66+10560316+E99463b8815868992449668e59e41644b33c00244@5441920c 1c506d050783c30b8cd6b3e80668e468+35565426+E67c9d75c946c5c6e603867c66ccfcdb45266fc34@5441920c b0d8e3bf2d6fc9c9d067467749639c31+14197061+Ecdbb94e40090d099c847952d2f21de89803f3169@5441920c 01605bdb27b06992636d635b584c5c2f+20756432+E36de4fe4eb01fdd1b9226810d21c8f62f1d65643@5441920c 0c27885b49cf5589619bd6ff07d02fb2+15792191+E23bd16d3bd20d3bed3660d6fd035086d6d5146d7@5441920c b0149371ff6e4b097561cb6de4b5018d+22249239+E4f207f62d04d6d847c27e2463f69b847676344ed@5441920c d6fb819c6039468f36141e1344675379+16449706+Ecfb1156101edfeb2e7f62d074f52686d215def86@5441920c 09d34633511ddbcc6646d275d6f8446d+29052525+E6bd7fe2d67cec4ed4e303e5f75343e4b45656699@5441920c ed798723d587058615b6940434924f17+23966312+E97c78dcf692c99b1432839029c311b9e66ec51e9@5441920c 29f64c166e005e21d9ff612d6345886d+5944461+E004b7cdd000e8b6b82cde77f618d416953ef5f76@5441920c 8610cd2d6fb638467035fdf43f6c056d+20155513+E76b2453644c8624f5352098d3976bd41ccd81152@5441920c 64fbf1f692c85396dffd0497048ff655+26292374+E3d479e00158992e9d770632ed7fe613b801c536d@5441920c e7db466023228e000877117bf40898d5+37776620+E8268e86cf6d614e31b3f89dfcb73cfd1f7b4472d@5441920c 26f844c3000746d76150e474e838876c+16720695+Ecd248063ec976663774bb5102068672f6db25dc8@5441920c d631188d8c5318efbb5966d96567162b+13059459+Ee8e8b625c936d9ed4e5bfdd5031e99d60ec606e6@5441920c 75e196c3ff8c902f0357406573c27969+7673046+E3fde8dc65682eccb43637129dbb2efb2122f6677@5441920c 90d0f062f153d749dc548f5f924e16c7+5625767+Eecd6284d567555146616cf6dc6cc596e76e30e62@5441920c cc3f072f71cc6b1366f8406c613361f6+42976743+E55561d73068c4816945df0039e80863880128997@5441920c e74b79c0cbd84059178c60e8016d113d+13609906+E74850d9197693f46e640df4c7bf631f5cd6fe7db@5441920c 186706b6c31f83b07e7c60eb358e93bf+11966262+Ee4e0e578278e9288bcfc546355e16dd07c71854b@5441920c f85c6bc762c46d2b6245637bfe3f3144+17595626+E780515682f0279edf3bc7638e69dde8d5c87eb5f@5441920c 80fb6eed15dbf3f3d88fb45f2d1e70bb+6567336+E61709663412711e6bcccd1e82e02c207d65083e6@5441920c 55d586d9b4e661654d46201c77047949+7406969+Ef65e6ef6de723634d7ebc04b8e8c787760940948@5441920c 6fc45eb907446762169d58fb66dfc806+26345033+Ebf58596e6096dd76c9ec7579e5803e82ec7ccf66@5441920c e398725534cbe4b9875f184d383fc73e+11140026+E54668ebd22937e69e288657134242770c1fdc699@5441920c 69b586521b967c388b1f6ecf3727f274+9977002+E6eb4b63de4d17b50866bc5d38b0ec26df48be564@5441920c 2e293570b864703f5f1320426762c24e+13651023+Ef6640563ec496df42bcfc696986b6e4f6edccc68@5441920c 462b1eb00f462e161f4e5ce2bbf23515+19646309+E47ec8fb615747c6104f7463ffe65d1f6738c2e67@5441920c 7f8eb265855e458e6bfc13789dd696b7+22406679+Ef3cf31dbb3fefef455f62d6b5c2486500f327398@5441920c 36659b0e79c69296927b418919561e89+24370117+E66e94cf0be13046deb186302cd666d5300908029@5441920c bf6dd822cfbc6b90f986e5f43d500c6c+34354522+Edff8be044ebd69391cf282451659660d5dc6dc12@5441920c 2267fb579f99df6b23290bd2e939bcd6+12153797+Ed3de8875c91d6f346fe320b20670c410f46e7ede@5441920c dd66288e4f7ef394f6ed7e9b73ff5178+19120741+E3860d5c83e021eb3646e5884018ec3dd59d806b7@5441920c 7f86957074e677328be7538ccbcc747f+16676462+Ef6492f2cb4dbf9d73c1e58e2d0d85b0dd2f18402@5441920c d7363e073e178f502b92e053369f40fb+26125462+Ecf329f93efd1ec34f17edb991de264b9590c88f6@5441920c 6d64dde62f62d6febdf6f2c66c0220d8+23263164+Ecc22f32322cd039cce602e155bb530ebedce7b49@5441920c 7b70bebe42067024d360b7216c55d7e6+11436933+E7b70998697b46b0840836219c8e37e6d74906656@5441920c 3e6201706ff76745189f1636d5827578+27434607+E5204e6cf46e581b019661ed794674b877f7d3c26@5441920c 1b1968d7d8bb0d850e15bf8c122b1185+13431932+E28e98b072607648f73c5f09616c0be88d68111dc@5441920c f8ddc22888e3fff1491fdfc81327d8cf+2633555+E1b55c1417c2c0bb2fff5e77dbd6ce09e7f5d68bd@5441920c 9f200cd59000566dd3c5b606c8bd4899+10166739+E88797b1c2d44d6c6b6c16b6e2dfe76812494df2c@5441920c 65f26cbde744d142d8561b715f5dffc7+13335963+E13e86ebb6b426b1f4b6546320f95b63d558678f9@5441920c c89cbf812dd061873fdbeefcbb7bf344+6763176+E13b1765c5d3f3709605ef703c5c41bc46f25ffb4@5441920c 99f663066b7d0dc6f6e355eefbc64726+13444650+E8f607654b8d1fb72109b2e3eb64645202111ef2e@5441920c 6804c29fd6b3ec351dc36bf66146610c+26266416+E106283d64058d0c8b15061eee6d2059095767f7d@5441920c c23c67b4d1123fee2d8ed636c4817fd5+16376964+E392625bf396b887186e8200d94d8c7e392352618@5441920c 3f7640ed561971609025b37696c38236+14116164+E55239788883085d7f854058e090177fd10436258@5441920c 4f4014cf7cf09694c6bc5050d08d6861+23692725+Eb40f77014747eb8756606581bb6cef6665bc1e92@5441920c 0f46b1e0e8e69d0ec0546666b21f1c23+10507763+E173fc49b601c3c699d7cfce8c8871e44b371e6cf@5441920c 24385b164f3913fb234c6e3d8cbf6e55+27625276+Ed26e6d9e6eb59b6cf51c01d4b8909dc648338906@5441920c 0ec3f2ecf85f63886962b33d4785dd19+7026139+E43ec8f5ee2bf4f3b639ed66313c2363965702052@5441920c 674e2b084199c6be0566c29f512ce264+27711533+E1752f5c20c69cd33e669012632cfb2b93e1febf8@5441920c 8de5446ce99c95842b63dd62f2836e35+6793207+E808e94501ce9cf2f0b694f16ff261d42792dfc34@5441920c ecc3b274850405ec6531982414c634c2+15405916+E3c45d5ec865de3c34bb7e14e5577b7ec99d50268@5441920c 4c3b28e830f55707601378f6b314bb36+9160724+E6c42dd49736833326cfeb59003340d99d336b85c@5441920c f217e6338e5be409b309bc05768cd692+9467601+E33296cb0476d39648eb3518265241d2e58667c69@5441920c 1c33d278e00d838960c35365e8b211f3+7969532+E976bbcb318e35b425276d16640687cd30c0f6513@5441920c 45fdc6257f4601f5e6ddf2c3f3249453+24739014+E37fc9116462386d43647d43b1f24301fc2b3d2ff@5441920c 42c619bd934e4ee7876e6e62bb013c8d+26941562+E22061d93633689db860c97d09c2d428e0bc26318@5441920c cef567d31d5e889fc38f0b1c8e10603c+3036311+Eff049d2e8b04646603c7307d8427ec384dd5636e@5441920c 6d919324cfd4489696661b0c3bd2046e+7761096+E3d0ccb506d66c4621d1563e7f301d9de5e306ed0@5441920c 4631f15b56631ddf066623240ef60ecf+16709476+E125d603e61f05573e9bc6d15d64038548be25646@5441920c 6c897d794f5e90b15ee08634c3bfbef1+22602265+E65c0d239fe02411d4e688b0ff35b54b5fbf861e6@5441920c 26e1e7c8d16d0ec9335c8edb01556e74+23405696+Ed77c8c87b739992b6e2f4f0bd813e3877c029646@5441920c de5607856bc6965b3d689d9f6c739dc6+14457362+E16b373fe771865bec4e26e0c5b86e3241be55416@5441920c 9c96247f87d27cdf351d10424fb65154+11220750+E5666f47b25b3667bf32b17cf06202016edd96078@5441920c 6bb96d31bb0766150fbc94ff08ec1e50+16561466+Ef617977d6fc4b3b7606056e7744f61508e1f6dfd@5441920c 290806849f83631376637e012d63c055+15634314+Ef56d98c07c837800ef7653b9e74b1c868911c512@5441920c 917ff996f786819bc13747d05796db8d+26147265+Ebd9eb6985b39beb62d7cee1675dc88bc469786be@5441920c e3c8b5f953857082274364d3867fb56c+11193151+E39798993b68bcde100412e41e046f716cb576fd4@5441920c b0ce9f0bf1db246f83f961be4789b2db+9599462+E9d8bd12dc40e9e4665e4f33206ce9d4144b5c48e@5441920c 77d5f68866703cc369796f6d56c4d564+9625154+E6076126e1811c6e7b05c8959558fd35be4d9336e@5441920c 7b861b04ecef1e4260f42febc076dd48+46677445+E979196bd9bbd7456963e8f55564ecbe16ff3745f@5441920c ffb4f46254cfc652517e153438489038+12795653+E43e6ec68c5276d6422c66b077266230772849035@5441920c 7699462d29f00f611f35891127e16031+27123199+E09eeec5c1612c40246b21e26b65766ecc59bcc9b@5441920c df706e0400506e210565939e04539eb8+16632721+E3d404cd76de417682560ecf97b5c7f821c18148f@5441920c 1c9d96048b663c625fd02658f6f75c7f+12652756+E97cb664d41f2b9c69f9fe5667c12bcc266b6d492@5441920c ed360b6b945be71391e803353132c5fb+5706666+E7e4162c6cc3862322792cf91d76c719c84896c74@5441920c 24b7bf83c6b60fe6cf9746c8d16b86d6+12566075+E0d0b95ee04f865f5db70e2c80d35ed7742d20619@5441920c 9deef070820c1ecff87d109852443e97+16946677+E288515ff55d2b49754bffbde646d6b9f08981b66@5441920c 5e57630e60dd29658e61165360404fb5+12209370+E0762d4cee56b876c85ee0d2fd468649640561070@5441920c 61c7e19f7e96bcf59bff036887e5e755+17916606+E92d286ed713f8cb36d44f6b0346db71b5156648d@5441920c 878e7f227305c5c89ddc057bdc56ede5+24643337+E214637662b794717e65860d89ef5bc35f3f43d10@5441920c ef1514658c8f004fe640b59d376fdb06+3264756+E2b6eb6625c08c54758676006f634f9d09d9218b6@5441920c 485e4d6249b959b57226eec66268d074+4102134+E1118dbb1517f7323387bf970ddd5457c852353ef@5441920c 06d4b5ce44510d68dd154ff45203448c+19703325+E65bff4376436dff5c5601120e7c7138cc78eee61@5441920c 6d6616d27e10b3d0b562d154b6934eb7+11554223+E814476dfc3d4839453633b5538f76e11d365cdf2@5441920c f81f6f1ee2b866edf1e866c360c9decc+12130664+E3f3c05664668c4573244d3ce9ebb32356ec78d00@5441920c 66fb6db666667e6fe4b644d414643225+5642000+Ed3db35e5034c66e26323c3711b3bdd9e0c30b9e1@5441920c 5bedd5d1813136695b744e6696bd444b+17354621+Ed6c692158452b91b00e4f7065fb4d57945c6544f@5441920c 041391d37c47b66c064f216c76967c1d+7546724+E225d15c0700689d941be9216136d5159e57617bf@5441920c 0b3936e98635485dc5c39c091b1e141b+30306549+Ed8201dc4b2f19c6436b27200cc661160880f53e1@5441920c 87c955bc76e6dcd602074cd0b61ef669+19466657+Edce058995064b4c6d2ee4b5fd77634ef612fc4e2@5441920c 5863cf41b6d842606191f91266766ecf+19566732+E35547d8c39d6ddf6f0fd663ef6207d369121fd2c@5441920c 4b2cfe879bfdd4f5592b2948e1f12f80+16726166+E0c34f334513cfc42834f2f1b8bf3c2ec320bf9cc@5441920c 18fed9e859f59e23181668e4143c216d+7297044+E77384d2014fc7f1e460436175b45bb23678c0f70@5441920c dd1ee9df0750267ee5bc9ef6f29b0632+13453405+E45879d6d0f51bd868f7361809df00e383b2d83eb@5441920c f3e82d6578cc5172dd9f264f50d8bb42+20691242+E246dff090584102969751374c13e36510ef96feb@5441920c d68c62d920b706612d32f31727654479+13969727+E0428790ccc219305dd026886526fc5f41505ef67@5441920c 672f554d523e6939c88956610d8d66d9+15929956+Eb0468436beee5f8614d96765e75c628443d04832@5441920c 03690d1333904fdc508c57f33c715c3b+12006715+E3dfb288e160d2920cf92e3cef145d82d8636d807@5441920c d7d5d48c6ecbfff8edf63e21c8ee1680+6976746+Eee6cf6450806f2d68c7ff61d16ff0b9b09bee55b@5441920c b206cce6b38d71c626fc6260d60de055+16617309+E5bd96be2db6bc7692b8e7166fef6741635fe71c1@5441920c f82bc9fb241fc9bb1e9403660f31e963+26602130+E23677fb52377535f6f4d98371640701007467dd3@5441920c 60909d87315fc866ce54161907883f86+22761626+E222d02645d114b88836267760cc5599064dd8937@5441920c 5938d2c975658ed73f676cdf8e2df648+7096657+E6d5533fbcdc0f54dd094cf4de638c4cd3020bf04@5441920c 4b8c87889c09deee98b01bf9ec143964+26067196+Ebcb681616efd85c46893be63dd6663f5b45695c4@5441920c 4e7f06d06fd613f5d50dc3b9626d01de+10673992+E66fe9d65f3f18ef2fc74c6c766e04c6826060c21@5441920c e016be89b3607dc2c6d84703446096c6+14647560+E67d21749bf35c936546c2816e658c8ce4fd4863e@5441920c 65663576005d0735780d7783d27fd612+6567442+E3eeb256c414f59c671484666608019515b6d66e8@5441920c 8184bfb40466690c3c7bd33cf2001b7d+27369311+Ed3b2d4e52f16cf2c20b95e1650f0b69671b6767b@5441920c 28210e98e4bccfc0c8c881ee65dbccd7+9264693+E6780fef94c00c22364661b4df03db1894b65b279@5441920c 7d635728d6d3f0654491e73d06e2760b+16320752+E89b121f6c09e7f188397cedd9ce53064630e4197@5441920c c355555c484c0d41d31c1496bb0f88d4+4140293+Ed2ec40601643f992424e6042610ceeec4f926202@5441920c eee46de26c233081986fcc63036f6e87+17266099+E643f07bc7496eb97beb2bbdd74f78d9c7c40632e@5441920c 6bf27eb8b36619050c0246b26d541397+3060756+E9ed96e63725bb226e6717733062d92c38d0dd416@5441920c 17e7810c048bbbd3837c74253576c064+3260426+E660edf2b267bd1dfb1c70d25ce1173d99b572435@5441920c 633b2f33c40f13b691d59f8b64543ee9+26136225+E65975c79c76fedc2d8b92c2d8095845996c656c8@5441920c e5588b19938ee85458f1008b6155ff80+45662056+E5fe59f043d3b8e6f1ccc6d92e19ff6c6bd6e2d2c@5441920c 14b6ece5c233ed08c8343665bbc435fc+10447960+E6009d59e556cf6379ed6bc849f180d1cc33b3068@5441920c 1064ee1f9f687c0461c5bd686b612ce4+6564566+E7cbf7c65eb90855372605b5452b6265366e64841@5441920c c073866fd327e646c556d748027d6cc6+6396676+E8c404153f6d5010756968c6b9ff619bcddb1e1d7@5441920c 1dd987d82e5f8d23659cf23db99f6517+7956724+E18d666c504486712bddb5f8173658650c7708182@5441920c c4eb6d77298d6964f9e862e809463521+34269266+E1e466382fe93e2103395fedbb57bc5e2826f482f@5441920c 5c621f017e2e17260b15e13d6d6102be+13762411+E5293993d8891eed812c1829096775c9129d66d86@5441920c 706beecbdb9f413d8456e05b6744f6eb+3947613+Ecce55b46196c75ccfb06eb9b392e53d9f1c71c18@5441920c d498f6f76978747843767963f5064309+5537714+E2885742de6412d62b47c33bec68d8d9f81f9c09c@5441920c 2266396b65b97e348973206358673f66+24305632+E2e0ec28566c629333dce5f41e47488f4d736f018@5441920c d91969572c86d6b14636f6e3460bcb24+17507515+E96fb6850f7fbb4d9c2e0954be44635896879976f@5441920c 11b46690ee6e9bfef0c4026d856f4670+32626524+E361d099f561efd303d2e24182ee09327ec51657f@5441920c 2361c32669d0564e52d336f85923b61e+1010299+E45038369c554e6b30b60f3ec580898792163d919@5441920c 858bd2ddeb56d69038b78d289ddfde15+23454636+Ebb767b2668b5f9f61c4de733265595f1c074e606@5441920c 91618b31768711ec4b19dbfcfc7bb98c+16017355+E876f5f62b67613de0f79e60f245cb0f02f017220@5441920c 1bb9feb4c6ecd90cf1d8e061fe9967b1+9792746+Ebee666de05c3811c76620f4d9f49cc7103f0690f@5441920c f76ed53563936eb324feb4fcf9d2e44d+533647+E59361b31266d7566c00ce339629b5d1d86863cb6@5441920c 47f61e664eb4d68364d94971f0446206+1064656+Ef226fc40f66666690e640c125f636b37c6e75682@5441920c 155b75f465771d25168cc2f723426908+27465637+Ef6d455ccdd7350f6d8eb036675b046bd531f694b@5441920c 189e6923d3e6810634475b6563ff42d0+12707353+E218987c1f65753c694feecf176253ccc353268e6@5441920c 345957000ebe671b86130e51896d8694+6632970+E76eb72461dffd0b03ebd0287b4bd4df60fff6019@5441920c bb8830d56f6e8b0463c1897f9c6c9b68+6746794+Ee569093960e68f65b8bfcf0660c0d51d8e316507@5441920c c1c82dbc3246d4994c7110536532bd3f+17732191+Efb0bdf49337261801bd36e7f961cc766bb258d6c@5441920c 3469b89f618cf43d6964c89cb7557360+15491375+Efb4f84bd36776264d5b66193cbe06700c9c36986@5441920c 1c6c8cdd2b55b59763484fc736fcb2cb+20295749+Efd1b1e16c26825e6be2f0086e5956ffc2cb86186@5441920c 425eeb625e0e6f78640cd646b81ff96c+27117670+E6c651bc6fbf0911c5f0cfb13cf46643234cfd962@5441920c 467b40e186cbe66e68e27b497c146989+14464752+E6661978e64f282c9673fbf76c8c28d447de95571@5441920c 215e9957c31b9786166166d3066dc8c1+22592925+E24ec6bec163688076c95e6d575cc43c4d2185d25@5441920c 8e6d9566f2e6b368629c336c9fd6e0c1+21043993+E60f9744737815de11b5cbbf7d2b9bc26197710c6@5441920c 6903b3ef7b72b5c437121c8d75ee5f00+6526756+Eed896e26d13830cd0de7271e986638655bf936f6@5441920c e99d862823e5647d428cf03c85510dff+4646274+E7f7e0d272568f9d8353432e1d1284c6e99179ee1@5441920c de8752933c71e8e4912367c396286d59+19571326+Ed6eb12d8d1ec809bc6636806c89f0fc31b76e49b@5441920c 42b9673e467681dd1b75622d5871022d+12923669+E6638266df36f80ccee9b177392378fe0174654ed@5441920c 6738766901e6522d122065eb602706f8+9921926+Ee0506f3116684358651481b6f6766b6d61e4df36@5441920c 25ed8c9f9b7fc61b3f66936f6d22e966+2695507+E24986eb797bd7e2ce75f8cd7fd13502bd1db0900@5441920c 5f63716d6964f6346be68e50eb3477fd+11292446+E6d40765c1ee54fd31d239e1e96c25d6d964e6e33@5441920c 646ed63541be7c4b197e74200fc58563+40629656+E3228f646ef6d86dfb63090bc1f4540534fb12809@5441920c 2bc96d464c08c774950465b994786463+4060756+Ef6418662f5bf612877bc0334972769d5c364bbbe@5441920c 074f412860c7143944662f3579e8cc96+16610667+E7d989e4216744576f348473d58cb5102cd3b57cb@5441920c fdf162c24e1b743db60644c910bfcf26+29170320+Ec6c6b955e0fe664690d2364446326c2f16279321@5441920c d1e6d9e6512687494cb66788d97d6b76+21574362+E9e9f63bb64f611c623604e6f6f0222e0c8105236@5441920c debdb22c0be9d5cf661539bfdd628421+3619563+Eb95f6d2052bbc63bb931d21fb518f89531168e2d@5441920c 1b3b785b6f585c9f46c8b932ce5ceb26+49161531+E2f15232081e450fd4efe9368bfd8bf8162046667@5441920c e336b53894f0543d59963105e9678367+19746144+Ebf3c79b229c275ee7e1201257605016278153d7d@5441920c 782f48c017169e53d2c726d046dcc6ec+10946735+E9e78046511c67ebe2b39f5b21622bddfb87069c5@5441920c fdeb6225b7463435cebe00e7f86df276+6376465+Ef4599c2d6e757f7f66579b373e9e6ef0ed74b62d@5441920c 32d626f756c4cdf566533c6b2df652f2+26661567+Ed4671f20388d6576565fd26bc00d53f0e38b6c51@5441920c 14c4e60bd3fbded9dc8d11d6e970f666+13661669+E0d589b83806594837ed672319ddfd74f3cc39ff9@5441920c 77886771777c50587e02dd08866b75eb+13501427+E01866f494dcd7dd4fbe7541df16529447e52ef6c@5441920c 8b3bf3e5f6b6be1d667f36d1784367eb+13677551+E6b241697c8d0c97c142fb695936589c1945e9ebe@5441920c e12686bd46818f07614c0143b68802fe+15666076+E24458761c577527694bb99ff659b96c954dbc3e4@5441920c c710454601fb0f6e4d03d6461fce5f17+7996490+E8e9cc9e865e420e3e0cb0987f106665e80e7184e@5441920c 316eb301c1ee9cd9b38c6544cb7bf941+6053236+E04118416885186189d00220842078fdd82b105bc@5441920c 1946863de487f91790e10ce2d63deb4f+10726254+E1613e538b89d50e662650196b2bb46060e46b325@5441920c 7e6debd8e9fe0f58f0c0ee19225e4664+11356746+E15749f35c8f636eb7666f8d62d32f179c7f2b443@5441920c 62d6d9202fc0cd2099157526b4977b6c+7600427+E5363fc1d6f6c9ec60576c454be6e0e026c638644@5441920c 80f767764063d69fb042e73741108330+20722736+E79223662b666f482c76c074de7c948d9b81e9eee@5441920c 7de230cb3c601ffdc306c656d729e766+13729019+Ed6839fff29b73d5b54c16855f0cb57ef1f0d5dee@5441920c 566eb88cf65d80f8def689999ef64367+20246913+E4868dc526d88506ced164b48b2cb6ce669820484@5441920c 27250e8f350f3b51c756d68e47e2c980+26945676+E8c606e26b483c6e93227776776b116e63c7b6607@5441920c bb9e9cd086ee769366229cd0b32b5c09+3364670+Ef63125e4676b66d764234e76f314863e7769e3f5@5441920c 50e50111ef9bfff37663d6932f9b72fd+16155754+E056360cc57665896b629cd38fe14715621363de6@5441920c 72e864cd512f786c54b9f07646e66e37+12762477+E6bd9bff5c2926b09dfd6b66c2e969dbce9f53669@5441920c c339c751cf7d5166c30b8b21dbefb69c+16572364+Ef279e41366b796bbfb333ee55631cd9dfb6e097f@5441920c cbb37c74cd1f688d1c9756cffbfee897+12456663+E523b778eb6355bb66c2f5d4773d775bc6df25dfb@5441920c 819066f13ed2c71947e3f647656b576f+14524669+E62b3c65fee64e372239593516c64d60fcb850d75@5441920c e3635e4290543563388e94e1e6109729+36661662+E767f7d2e1298f1ef565e967e6170f88f7d6ec9f1@5441920c 2ce76730ceec8d843946f809c16f6f46+3149045+E882e0ecf259166b860f68dc6fd844cdce3fe49f9@5441920c fb2814493d1c484625bee373d5369cb9+13700211+E6be1eee5409d867cf0327d762d7ede7fbc296f25@5441920c 6ef39899b0ce52e83964c55f466f7021+7529724+E3095671946451ef2d9b129106c26f1e9515eb60b@5441920c 36e914556f2c8d21b82b63578764e811+7950542+E329cd0c0b244ff75d31782f2dbb7741619b24861@5441920c 895c6d874d1245d8e66455604fc45d3d+14756600+E764966661b47eb9946f1964e5ed060f623240695@5441920c b66ff865ef7d09bb19966902e62429e5+16443596+Ee56f4778f3b067103eb6bb8e0249fed5133749b0@5441920c 3e76f1361961466b0b95d3b6f8ece285+20106669+E01c84b2e28e91ebfe917067bb6671061c8db49e2@5441920c 63953f84933eef8bc8bd15c5d560c522+20056363+E4c6bd626c3b008116064f13694d49844e6e656ff@5441920c 5964964ef7c947f1c185073125669465+2567406+E064d861f4630b32521588b17290264c70f3cd71d@5441920c 379733627e446179436f327832659951+30547504+E76c3833c4d3698066d4eb966d179b85bc889e628@5441920c 3358c02673c23b84c37d83e469c72f66+21562054+E6008936e0c5343533bfc19f5c81ff58c3e2925b3@5441920c 46cb194289db37ee376f4f3346de0e04+27395356+E6db539216c1b433314f27bded4c6cf0078bfce37@5441920c 94310de101827648d6b3bc3c89708c59+26365676+Ee319940fb28fc2b11801e3019bd84937e2248074@5441920c 42220345631c336b5194ce9b573ed40b+269200+Efe4d5267e1d56103455663b90c06d54622e0641d@5441920c 2263e6126061fd7681b1d7e22b9f6e14+5237174+E51317e2730be6fb316f2b2b6e31d2913f4f37676@5441920c 07642351234b816b15e150bb6bd637ec+29727146+E66325bb50e67ef4de1d94737653dbb98761c1e66@5441920c 2fd5ccf86cbbd0e3c3f366d7bfee56de+30907674+Eedfe3e86d6243ddf6d5ede6c86604e7e310283d4@5441920c d6859cec4d9fb1c68e391840579b56de+1504656+E69c673e18f46659560ce19e24cd642d7ec4cb3b7@5441920c 49620b9c06ec234288fe02c59e694928+14943044+E4557ff4e2cd1800c94b296ee059f895660b0d38b@5441920c 1e7664d9f69c30178124676004c5622c+33721037+E16ec6ff518bc86565f4c9dcfc0656e38cf2d47fc@5441920c 17ebf9c6bc4ec665ce79750639272662+24605551+E636c8155632762d667d6c9004f6738f927dd5979@5441920c 342b663668c683fb488c62ce8568b618+29376907+E156b0293e6de6662cebb0703b9e2b37386fd116e@5441920c f0ff7321084e5fb26b047c29b787166f+12633635+Eb57428f2bbc765e1391c660e6592684e76f624f8@5441920c edfc2352776c326d1425c8f75206518b+14797426+E136b15d57166c3791c3cec25f2606868be3bbdc7@5441920c 19556e814b8696d174614d2635efce37+13760102+E8e64c18124f98b3f0d615b89b4bcc0db5345471c@5441920c 25764e17398bb530336f104fe1f16fe6+26794272+E6fc3ce18868166e546e46d72fe289455cfc70834@5441920c ed98ddfbf7181c16fc299ee261fbfc82+10201924+E2de330f0e91b386d0d779d21c3918e998cdde6ce@5441920c 77eecfed3522b3b96d26b645e3367fb1+24124636+E2332473f67efcc195ef87657368074fc7b600642@5441920c 9bc03661300986db109ef2626d3742c8+26615557+E68ed6cff0f9894c2ec3e940e0c676ccf99b6c0ff@5441920c 152316dbdb21124ed53e3eb985b94dc0+22145236+E658096d502e9136b69b1fcf50d5064613dcc7d0e@5441920c 561c751762166c7b8fb609601b9f2f48+7311346+E81d4d07984d6c5e974c15008d4f92d663c710388@5441920c 012c01572b943bb8466fb8116e57e60c+12577740+E1c98f4cd9f1760b062bbb20bcc0131eb9cbf5821@5441920c 4dd985e1e9728f9d676d9d526c0225cb+21506140+E7ef21dd62f372fbf66c17e6164064bc9c1283863@5441920c 626622416232e782cd0874f9fc41e170+52369+E8ec7e615f231dfe25b603f3c178460c06e624f6f@5441920c 6b7e084ec85bdb5633ee1355933517eb+5076969+E3251c561406ffccf6f6678054cc66308160672b9@5441920c d944332019b54e4213694d720652f837+31190176+E51c7c1b974617f8711d31f1ed3d554dd69708b92@5441920c bc35e4ec4f310481df053878c99e2028+41160366+E72e6fc6c8996446f8428889039d6382c3187ee57@5441920c 32b116162e37fe261fbf44699d161bdc+23615045+E7616236b140e626104830c0bf9b63c3632defc9c@5441920c 3260853d69d0f6b96ce5b079b1f1037c+34031699+E614c898376081ef614581fcf012196259b247f1c@5441920c 9024866876926291e291e983816cb080+13651503+E44d2c5f757e5ce51df4bed90d213e67280c08cbf@5441920c 7f7352234c5c86d70eed25447b6f6e51+1996046+E68d1c68b7d65e0697e6c47285061b36474bc9848@5441920c 0866e053769fee5e5eb4c9315d6bc5f2+22692591+E44f353b0622fe8378168c3cc6684ee351e0105cb@5441920c db74b6286949f3b1fc69be2083982e48+6672354+E2546bb731323d421439cd1c6e426dfdc0e6f3184@5441920c 1f6e9090bce4972b5371f66be3dbe365+13749361+E6276f45e81bdbc0eee34e591e76b38385ed87108@5441920c 8495966c987b24d64e8f23261e40773c+16660930+E6b7e063904d76d68b68ce542095408b362230e93@5441920c 222648c113cd8d52179954bf684d5626+11036031+Eb563b11617cf4f44d7c31e51e50d17e0f398f063@5441920c d878ec2cecd3470c7dbf4291653e6c90+13412650+E0dbd46d19e8b6f8c66064196cdceccf5b762727d@5441920c 7658c35e0b91464508f7133dce6e60cb+14313555+E54e6f8e766224090ec6c74f776d30ceccc3de46b@5441920c e8789734411f44661e0fc74c1c0d36f6+33635703+E470928dfe26c643e0603f7630526232621ddc4c7@5441920c 8825382969eb6c5066fe78997e0c7bbf+469634+Ed6dec1e7f6886d2bd1efbd8c6edfb22edff74bc1@5441920c f5552117005f6c9d736496e2f9030f5d+11377056+E0c2be5653d1776957700311e5de86764c636bdc8@5441920c de62f65e30719327fdeb326c2f16d698+16346545+E5d6e6d619f640363f167467b2de9c64347e63768@5441920c 3cc990452997b05b51ed170d291f9f1d+21127772+E281769d6ed0579760f4f2342c1f9bc76618c8cc3@5441920c 13c43c4e049c7d067f0f1dde01648303+1059366+E3f19eee97b53b375756ef3367b86deb6077c593c@5441920c 7d62d6e36364e35252710e47b06c54ed+6964270+E8030563b53b8d7d6c4d127c2e527e6f2ef56e98f@5441920c 7255f3e557e3be60e6bd18054b360f99+20073973+E2d6e29ce1c66668b02f075d99194392b83bb67eb@5441920c 32289c50f7dee66d59260463d7b85c7c+15769669+Ecd9f070e6f3b0555848c8506610997600db07b15@5441920c 6603201c20e0c24b9602169b3547381c+9756229+E3268c74ff8f0f67d1cf1d10c01dd9e2332dcec21@5441920c 061c6b2528256682c7b205b0f0f9d69c+11469333+E960692b62d3d34902fc765048d36081bd58b0e75@5441920c 80b649545f654616348cf66f4dff90f0+11074951+E49c53fcd4deed6b62e3d292e66e2948716e7e1cc@5441920c e736c8b66c29160f42d7ee5bd649e636+26145091+E436426d265d3d4d65658e6b39405b82d308639fd@5441920c 961e212b3d7f9464c268692761090f6c+20545569+E4606f1cfe9cedef085404f8465b915190c8cce76@5441920c 761fe39e125f6f19585464b661706631+13562476+E33877770d62273e62e345f52b755f73fd56c59e7@5441920c 241691bc053966df9f226e308c46e36c+19737049+E86d7f2325737cbb78d6ff61b583ec96fc4c8d0f4@5441920c 6e367eef8cb34400d2b43368893c81b3+27529030+Edff31b6b50ccddb0954c28c8cf38ecbc86417510@5441920c 05555c5fb49bedcf63ff878f1cfbe3c8+15452164+Eefb61e71fb4066dc56c247904be42015ef755861@5441920c 354e0c970b39c6956fc9660eb7367b61+12062565+E27cdb80616591bf8781d75f2349c12c7261338b6@5441920c b7c109d474fecd5b568fd8e460e81d02+39769591+E66648208f40b52f1822c01c3d61c374b1b656055@5441920c 6b6b334d6fc6ce94572fdcc96dbbb204+15604669+E690739f9742699cd09db1fb6b7c8f864916663d6@5441920c 796452beff88c6c0b46efc4b93f14ee2+4141622+E864cee574b2995464159f65fcb48768275ec1649@5441920c 61b6165606d625f9e2f5d22966e9f6c6+67106664+Ef0ce0c9615bf03becf58b76695fcfbf57596d5d9@5441920c c3c76c86ffbdb4331c4d29705f7bf508+13102561+Ecc8d369181f0836cfc5964c61e1e36945eb163cd@5441920c 40b30d29c63466c5e6239f6be673e456+16343642+E2c9d43453c0772ddd2619efbec822e08dcc33967@5441920c 386dc864e33f0436b915d5fe99e568fe+16664730+E20655b581566fdfcc78c0210d212eebddf4ee191@5441920c 6310b937f32c88e68c99d1065dd562ed+13661616+Ee8f93e9678226b32596883f5283d6271b57cee3d@5441920c 9dfc7371d62085d018c01f6e734d7666+26472421+E6b56e878337cbd25dffd733e1613722630682615@5441920c 2b26b973d557149726460d0c84dce8ee+13161766+Efdf846b7114c9dbe0f464dd7fe5226600d66decd@5441920c 4822d05c1f1061302f5e90ed3e33eb32+26136564+E293c1b58e1dc2eefd8ecd3dd99357b837c2d1165@5441920c 802f5e0e9957fb6fef23f77d1826e5b5+23561374+E05b6f3952dffd11ecf83c61f3eb2dff941cf0d48@5441920c 6f81c5950c9bc67d7bc82566e8735fe5+22349651+Ecf7818e4574828536cf5416cc67b87e5233b1586@5441920c 969c688f7267f6e313e4f0fe1c97d3d4+9400437+E697ddb147c825d4b09d2f56466ed5e61cccb10c7@5441920c 23d33d2b86f5be5e60626f213453696e+6696401+E5476e70ce697f686bb63f6927c758653123c7926@5441920c 01fb6544531e50d5dc982f54c6945839+14463365+E796ffc2fb3492cd5f70b9e46b09e7b904e86c186@5441920c 5e10cce37c60cc768ece04794589b362+2797932+Ec7bc4352c6c25f73fe54b62f671673701b676488@5441920c d27e35b3168f6fe30d6446d469cfb82e+7140760+E8e0e1d27865edf69d6f162f262f418267864b716@5441920c 79ce0bcc5f565e689c44df3b2f299690+7956760+E6b405440347634c4d780d9cd2f751b1b74801821@5441920c 6429667e76cfd6c7049b9f2dc83d2e02+26100130+E1c67439fc75bbe8822c11f6be411228c75474346@5441920c f63d64c68edf1058f8042054d9e608c1+15570132+Ef59753bed1608c150b463db19e0b824c56180472@5441920c 3d76591c1fbc9b1cd43216b53037d3b8+12079936+E3659f239292e2cb4c86885b44c6669507140f5b9@5441920c f438cd1e753312868038166908b7746d+23646496+E2b187c62f3015562691904e717f0b766b1d119f4@5441920c 476b689f6d00f5d5c94d4bf89d2d6f26+7320072+E7e4d35700d55497f8cc8188559d256f046d0bf16@5441920c ccbf6b908e6d39954627695372c66646+10249929+Ec220ef724e48c90b31d0c396802df409203f44e6@5441920c e25bc8599399b2b9c174d0b866633d63+13622024+E736877b72407836e424889479e46e60506db8c6b@5441920c 573e08705ff70f79d328c60c0dfe1151+7329647+E978fffec456ecd2633409ee866f9bb9311d976be@5441920c 5f2772d86c6567de1c03fb9b1535e6b5+25915639+E26e094692d34cd8e6e51f964bb8f147be4825d0e@5441920c 6c5cc886928952bd46f1e0432e966c39+6902437+E22272c74f82664339e62651c6373fcd997684ebd@5441920c d7581c3bf65327e93bf6cb536650063c+19367309+Eb904b6e6c9337464e0bb3e3b1fbcd0bf4228726f@5441920c c4dd8646b372463c3ce23c3604418ebc+10334901+E680573c727b403b3c7d364e9076479e6c68ff635@5441920c 16fe696306debe5906c75fbfb4f35e82+15956391+E68cb974c31829f20f4381d605c396ddd9021502f@5441920c b457f19b1c560665968f580861bb5519+22361464+Ee0e5f7040fe15c3d1138046b4204e2d81ffb09ef@5441920c 6283c883239d206dc8d7bd20439ff2fe+26762910+Eb6598d1d22ec11840f06949940cd671e16f54d66@5441920c fec697c5e865cc4e4587d9e2bf4b1df4+27462517+E6cfee6c054636f17309efc8185cd86cd1d0f2f28@5441920c e369d98390996c5b6d124db79d188615+17696144+Eebb1069fe1f6f406c36e2bdd4ced45961d1f63fb@5441920c 6566d9d439d70e07e6590b7232bc6dc6+16115379+E4615bf36e6691866358c30874be71993dc04c491@5441920c 78ebc34f2f582b1e58e52b36cb9b9fd3+26603399+Eb868f96c8010eb08b8bf48fd6689d884962fe856@5441920c 7b306f84f006e652f346640314e565ef+42767332+E8696fcf20e694d7e3190d2263dd0013486d9e286@5441920c 6164704991bcf25741294e26fb6f1033+22519054+Ed30ed601783b6f824d96968157e0ff69d0199301@5441920c f7942548dd956c6c02c1eedfd2755947+15623994+E8dc622096c66e7e459680b0466c97c08e968247c@5441920c 55496f6870b58c20c61c69e329f86b18+50651137+E4d15666681f614d666ffc6033cb2564fd498b422@5441920c 511635872c2be5d2773ebd578167369b+2340763+Ee6f8d691449ec061efdb7db6e67e686446660060@5441920c 5e4db617b4b314863d3df7f5f7d40b46+12296366+Ec52619bfb7bed7e38283cec6c31c629f3b43609e@5441920c 2c1c155e211f8615f348f56cd4e2eb68+19160541+E4d61dff6db10bcdd89f30333c6f416c4dcb10050@5441920c 26793ecd9d648d83017188676d1e468f+21150112+Ebe666f5f9db78499070dd5cce17f1801f5856395@5441920c 5c19db5f2feb0ec6cc247077326132b7+15934102+Ef2c65268fb7556e5008c1ed147e6cb62fc23b8b6@5441920c 18924490df2fe7c8bf536710b6fd6766+9572247+E7606e9814ef7776e16c3661693f0490c94195225@5441920c 640e94c562bf36ddeb0dd226029eb0b9+37063925+E6ef54638f818d4fe8c3dd65c8f3366c7e5d74607@5441920c 14cd1cc7e24f6f166bb26dcfe4143ed9+26279656+E0e62c48482369497792441dd4672849654fb0616@5441920c 606fb1c0c699c7ccd315576b02e692bf+21312663+E05ee10d5f8cc07fcc3cc665d0efe3d1b297cc615@5441920c b42c6410199f3c4b0e54cbf94ee88980+17966553+E6b79e87c11e7fe96d5d960fd875261711e66f06c@5441920c c286916e594c40952556b7857d67e889+20502272+E1b528c0bb53c020dcd3581d629845bc1c25316d0@5441920c e285ce576d5090b707f24dd699667c27+10454346+E6384d44e091f0b6379d8523d6defc6cb6975eedc@5441920c d530986cfed06e608fecb1191df8c11e+26240932+Ee061638e4f42024ef17e01b02e67383f15c14593@5441920c 3536b5d45d919cb866d1569d96f9e939+11477343+E19f085e4dbd379e83c9856956386bccb26495d6b@5441920c 956fd18076397dd9602e5c01ef76623c+16121702+E976fc641f109ceb585672eb795e964c6b8f2f509@5441920c ef33ee876d98646e6fdcb3867518b6cf+21665969+Ebc9b108234b28642df30c976460016486d27f2c1@5441920c 7426c8c56917966f5e7d867133c104c4+2106601+E63d93fe162433e6744e8bf1f63613d3994d46615@5441920c d12b745d9bb4de069124635140d94e66+22234696+E39d77b9c6db4180d930e44d7e77594b7328cb8e6@5441920c 7cc94de0506c1800b23456081e828694+17466445+E3ee18b1031435b6c714cd132d53324f3ec004ee0@5441920c 1984fed8feecb6697671f6c7629736c6+27353500+E69652dcf6edd66d6f7f223c87526ee683550ebed@5441920c f2ff43078422e101efb31546d513d917+7951115+E08c6d30fce60f953131ee9639397d6b9f361b6f3@5441920c 386b40bcf914276c970f642f66521be1+10132647+E69d7529f917e1874c44c21e3c1d391261690268f@5441920c e09636e09cd7d8c32b6663e95678f4b4+24122390+E115e9506efd385e3c03b51d274b136cc283cdb61@5441920c 5dcf989f58765256e745395de2c16d69+21750606+Efb7615104f94c7b4bf48ed8ec84e6ce1f884632e@5441920c 686e5915cd9858003f6822546d6b6d4d+15546705+Ee8d6933f60c51411f136b86962dd7b30c27f466b@5441920c 01486366fe6d0482971666c98fc70766+30792695+E49c08c45d856d386f968485e4505e36fc823ec2e@5441920c bed62e9e6bb42eb6006f7065e6990e18+17604912+E9cb886387c324b05c6be038882bd29434cc49e7c@5441920c e2859d677d7c237974c872784e13e6d3+5164960+Ec46db70c565633fc267dd6d133be6bb5891b6c4c@5441920c e757577865ddb690336d4cecc386c3e6+17296739+E7d1dc238f71762ccf46766627eb215be08b3d5b3@5441920c e2d2f7dd057617592cf9e4317535b11e+3301773+Ecd6413cb8c4e5b795dcc5680693d623b91744107@5441920c 0622825df321b6b36886ce672217feb9+3676756+E46d666c70e222477b3337606dc209e5f6cde7625@5441920c 23566113cc3c2891f84df717b6b5ceb5+13263209+E6f1d139cd24c47f6b5bdbe5636d49d2140745175@5441920c dee0ecf366b0e469126252646cd78667+5712724+E67405ebe84168df10534466699ff60c899055389@5441920c ebe97bb12f1656d3176bb8ce05dcf62d+10516666+E79769813ddcc30681b29180676df6666c06b5164@5441920c e16b6468eb876f7582d666b4538796d4+10144603+E6681f4fef94f71787c6bdf60f73ffc31dcecc444@5441920c 908963806d665f6692de6131cb688e3d+15620599+E19d63801835710d6fe726dfd3002226d59d1e6c2@5441920c e55d7c07c1351d8d52db166be6b8c09b+26940326+Ef7468138dee02cf621b8869c9b9e50476fec05bd@5441920c d4c83966648b99e8dcf6c6494d8d763c+6160746+E24c413dd9f4c938f3234023714c5dde6dde24d2e@5441920c 530f167ef4dc42e18714b4d6fc79bed3+11144267+Ebb553f3bd952cf396b654261465b55bbcf814826@5441920c 16c2c511d3066e6032f465bee26cb26c+1431977+Eb914580870bb2b6bd01dfdb14bd83331470484f2@5441920c 0b71c2164058821f852fc4456876c7b6+2244756+E6c07eef683e14f34fe3f7f066e33c3333304e6d3@5441920c e65eb96f4c91605b01158d56374663dc+9266561+E0b5bc104933b16464fb9d3f15555b6eb321ff820@5441920c bf69063c2fc34526666771506f68bf5d+41245659+Eed1f87b918f56236ecff73bde704699ef23d9fe2@5441920c 6056068b9e9989c1c3260c3501865930+15344510+E04e7df1e225c11c83512b4029fbd2c018b256c45@5441920c 3e5b8e59d577b16b6e84786242521806+24932791+E74c4d89582d84340cdf5465fec29706076667669@5441920c 634d6b1d6146338e38344547047643bd+22442446+Ed776175e050fd858036380649d6482d49287d096@5441920c fb5b4283359e7e5e366c3606cd8894b6+7752724+E680f4054419d6fbe710970d65d33bcc466613cec@5441920c 9b2911bb7fd67f6cd4f64337664d831f+26224360+E20cf4b6c243f160fd6f86253cc6377cdf46873d6@5441920c 63f6692b0fcbe33870031f8687547dbb+17304639+E92ec56f25f729945fb30562bef77f6684645658b@5441920c d96d6835e084f2c1eff67c52f566f6cf+16113075+E454fc1c125573183c69bd5e5cbe26f4bd4412670@5441920c 1d2eb0963b1fdfc11f6ff534162728d6+22233411+E60b2eb26e8d067f3d7612bd3cd6fffc46de1fdd9@5441920c 694b1b84ebddbe61749d6c7744e2e2f3+5524922+Ed932398d61660693e39554b50b2212f8d4960971@5441920c 0b42c92d97c0877b04d33666f22509c5+9664262+E9ce27760d3e7e05b965366f712b5e5f349638f54@5441920c 2cfe498b5b41ff5586b3c18fbf175d68+9160746+Ed2db55d98c2efbef816f30972eefb7f366705618@5441920c 6607b727ee38d0151e22927e8432e2f3+7956752+E20e43f0628df779e08c742dc2651861e4644b161@5441920c d782d2966fe60ceb61760be1891ee909+5100756+E35634fe29d03c35d26f2dc6c03f60272f0674160@5441920c 606030c626d5e94c4062618c3f652b38+9937902+E5662749f1f2e19249023941e760f73fd6df66334@5441920c d62d663092ce6f4d50361f36c0232049+19546232+E6316b6c1b16bc310863d18e7e387e35e4e001d27@5441920c 748d7e6865bd463de20915f53be86056+6663394+E7609edc173c34c9e36112f163563762933d1d284@5441920c cd0b2e572966bee981f066b967c25558+11752445+E224d2e284600166f66b0dd65562f01e7f6bb495d@5441920c ce1806850f36d94b326506b6d9763515+19256022+Eb4b964d8cf18b298376b5b42e1745c925fb6b568@5441920c 3496386d8279d2519c237764d914f862+12954653+E94e21b2f6c32195f270ec96567f6135e4c9d9f7f@5441920c 2eb661f4b584753660883614c14650e2+23233415+E22c6fb1b5e3d821378772485590ccf287b46153c@5441920c b9465b26065de0b6fb1fe66660060fc3+10667296+E95b5997d369dc93fc0bb8646217870cd50110f4b@5441920c 70485d53084d944674663bbe07336639+16966664+E5438d9ef89f2512b426cf230e9ed03461e490566@5441920c d29c2195fe6226f7cbc596264b1ecb9f+31566677+E9107c836bd1436d9d8f06e0fe58f74c36619eec8@5441920c 170d605b14e135f717cded781b3659c4+26966370+E2e596e0187c64ccdcdbb7ee379d59139eb84fe7b@5441920c 0836cfb1122130ecf01d5d2bf06def42+10993650+E4076b5402f7e21f1639dd22370286d76b7c67565@5441920c 851e496848b92598c551313836610426+33045521+Edb0688f6969e275c687f66bfc3be0318e674b13f@5441920c 6e4834f41842034f423626cbc8c2684f+14150927+Ec802066b016ec6ce60661b2b46c10e1663b0915d@5441920c 90e67d4209cbf05e9df318e63f645b54+23576635+E3b614e8f064e641468dc25c8340edf12d10cfe33@5441920c d933cbcedefd194be06338cd661d666b+14665552+Ebeb1c374bd74c56625c164d2e9660134d3623069@5441920c 76922bc107807f84b39fe3c763def0d0+10410131+E62f638476bffb582fd56696696ffddc344d55417@5441920c 7f523f4e5b7fe74b758f084c68f4cc3d+14156634+Eeeebe9b69ffb2424771669467f0c53ee18323294@5441920c bff64b2d1466d691b7967513cbd13dfb+7576172+E95bb1660e199d76d1fcc3bf756844d334bcb5ffb@5441920c 5346366f8e228259192b1fd25fb03174+44109465+E0cc4fc81f3e00e2626ecd5990388e38de4758611@5441920c 23c9d6b46c17b44f615072641d7f1ce4+31254935+E3b51142315f65f48b8e9cf29299712b55469bd9d@5441920c 18c9077006641766e2b0fe36b698011b+5169067+E083cf08445fb6c789417280f85938d6dff9ce4c3@5441920c db7d18f27edc3c6ddfee633731c2be53+10366921+E08619bee42652e512c63090e464049ec58f5502f@5441920c 6407825d48318f5626e310333b4b968e+29052271+Ec14b855cc4525b3d31542216d7b03c74d301068f@5441920c b96e133b4557374be68806e19132e43f+17612627+E775c6d28d896678b02f419e1215405db04db1dbe@5441920c 61f26b19764e01221b6f589b46f2589f+7641759+E4ebf4463513e7ec8326e77eef8b443b7deb6fd30@5441920c 580506ccc464195d925e3bd2d37c2b89+13411716+E534f2d36ec36702153d2ffebc88bde6d16681314@5441920c 0ff594047bfc075755fb6f6d368d4446+17757245+E686ff1689039794f81c901ddceb5bf96b002f471@5441920c ce16005db1fdd8fb664184e4897ed848+26954567+Eb1925567366e0b994d07507992215736ef795c6d@5441920c 668b8606e229bf34b265b5889cc2555e+23246223+Ef8b4096de374b658d17c366314fb2b78d604055d@5441920c 606b63de08276f2fe96e97839f80c725+19074161+E8864426fc98765090568d788fb706bf66eb2025d@5441920c 6f8436bd2f31836f58cec1bb0ee05636+39449695+Ee8933dbb69c24d6d37c456817370dc907b5c95ce@5441920c ec05d19ce5eb8f336eb13e2557d63124+11696577+E964d26d1560036f27bf8776572362c03e4e9f7f6@5441920c 44385cb347d456b6c56885e8de160e6f+13249663+E46f5be980854e6bfc58c55668db28c4090cc627e@5441920c 706fc99f3855ffb94b6d81c5c62b069d+6706592+E1053643769f364b1dc5f766789b747f0bd383d4f@5441920c 6652574701024f5e2f18edc6fd036681+29162964+Ee1b80f61e7971c1f4b3e55260337cd33266c0f00@5441920c 14763654521f8d4f6bc427d3b52e1121+10264945+E69bf6eb86179653066b92c30d288d34e698fc996@5441920c 8d6f406ec665e80c3986fb2d0dc39ce9+24601643+Ec7326d895814e06dcd041c099e25f26cd4e3c214@5441920c cb32b5ed637e57de216603c266184249+5951761+E16690fdf9283ebfd35e13f67307432b23448fe63@5441920c 565e8136fe7ed38f038b4236b645375e+23795506+Ed68ece3056f828c9e5d7fde3f240c404d1c472f3@5441920c 710dd03c65252f2187e76e7666d7d120+3150007+E19fc4021fdd7040bef16d9760fcb94c312665319@5441920c 9bfdf090c2776f258c39e549391c1612+23077469+E7637186506886b0e54b1e3e32cc578ec49c6f3b0@5441920c 7792c138d461748e9e5627e9b0c76c09+25966072+E82c28076635440613cd55d6993fecb03580f4cb6@5441920c e8f20b6349d80e1107ebee500169b8c4+20640325+Eefd81f4895e4ed54edb560bc5586d8df43456678@5441920c 4b61f2ce70111127b33f6249665c3d47+31996632+E388eb8561b2724c225c873e14421df59ecf09fc6@5441920c 661fbce63c16de3520d44e033134c6bb+43632512+E6ebc3f333f55fd0f69c7c5bee2687d1228933e8c@5441920c 5e6df4d8cbb16dd620f373369bc8c9d8+13731959+E16052995d0bc657b3759b020207ff0e3e41369b2@5441920c 24f521b431f2d770f1338700bc6c6917+12656172+E6e287eb610cc483fec0218d27632b69c546d01d0@5441920c ce0c0138e32619861616966d61be5915+34247127+Eebe1b981633c8728f6c69815fd3b88678266ee96@5441920c d870092429833d18585b6f4ec01dc640+13266016+E2d1df1258701d32d0d01f7613b88cb196f410262@5441920c d0728b8d923894b266361b90f06c047d+13161256+E8f4ec81d6944833c3dbc3cb1de240cdcd5f32ddc@5441920c 4c434f76f60d949088278498e5512652+29663052+E7bde3f3f63c9ddd47d466442e2c7116bdd26c9ce@5441920c 953b273715e8c9c1e89155300fc76183+30634366+Ecf3b53ed7d6bf5fb62e0861446615623596fe359@5441920c 432cd2317e2e713880e73660431c3648+6075493+E4b8e179f8c779951f5697e454dedc3b2d5dc0498@5441920c 3251f5bb90f3e37db864b31661297db2+21661204+E69bb7d26df5775651fc098e530697067ef65e343@5441920c 1c6168d1547d01601c45dc5485d6c8d0+33606107+Edf5663b3b04e139dd68bbe6960d6761d14e7d96f@5441920c 0482dc465426bd763b346e8474e3306e+32791910+E716d0c2d56e2b3ebe74922786f5526cd85650c05@5441920c cff6ce7521e98176cd89b1996cc6b2b4+19669112+E33760661c08ff206847f837f6629f2246eb24f90@5441920c 66b606bf24b864bc3e165cdec26cb2b6+4741605+E4175e96fec0c423932d88eb21ec0c63f077e9683@5441920c 63c8d322dc1998841b3b5461070969b5+25904705+Ebce6d95c6e12c00ed08efe4de69856ff194fbeb7@5441920c 4c672b0f4d22e5b6eb0fce791981b5c4+23619321+Ef803e6ce60d662931e60c1e7eb0e0307cf719639@5441920c 212c8b6286520d28024171b6316919f0+25423194+E15c722ef20ee1217c4d25e7be6382e10887c474c@5441920c e9477cfd645d0e20e85ddfbc827b65e2+9119290+Ebf9394269e1967cb3206c86d19f7806bbe48786c@5441920c 9cff6d241881d3fce55ed434be59b717+30796914+E7e404f9edddd288648e0b656bcc8c651e42eff34@5441920c 80666518360e529e9d92c201ff716b4b+19924674+Eb76685753d3cf6e3f358c407e48f038d7c351613@5441920c bef263464b2c50b53b2f6869099c0461+11135309+E6951358e80152e40712796312f6643ee63017b3e@5441920c fe8d5b36b6408eb475c4e1e446649058+10940177+Eb66e396c194c25e7e9fe16f92e25521db80e4036@5441920c 68d36b133c7dc81f5934d70c0617575c+23560116+E2cf474592b605bf861651e09cff2f62e166b351b@5441920c 6332b32b6617659b3840e701167c8222+14661122+E5627d019f688cddeb6b8960629d88973232dddff@5441920c 4e4e6b6b40d792e7c70362b47c968b2e+22609615+E4db8cb76e66db83f0806649bc2101046d4e6b254@5441920c 94668c2206155f7949de7c58f61404b7+15046616+E0f9495622572b5ff0cfc988436bbdc76b6bec4f9@5441920c d69777eeec1b5cb599d6614629b5142e+22166262+E27b789796f685c194502ce65578e5b7149f65b1c@5441920c 95edb266c0f8249c9fd662bb2b15454b+2056060+E8e86762f71e0637c7ff02fe61ce0e4e75f07286c@5441920c 6262cd49c44b278f3dc3c26626811601+521252+E23d8b5757618266f67ce07c658f9b85832c2d162@5441920c 00d93614557937f44de6f05f32c52790+32234144+E0f9f4506067b8b03b2c341f52315fef5bdc7848f@5441920c 3e815000b466f885958480d629962711+6441932+E9736c4dcd0db18be05035b497394832e06c46263@5441920c bde54d38f7b66830c212f9fd8215dcbb+10946699+E1be619426673d73606614825b965d48c72335766@5441920c 6240c463730c510e1d4c78899ce6d1fb+21772696+E480d296eff2c33873c7636eb723e8731c6959ce2@5441920c 2643b11d023f4f0f7614363664ed94e7+27069700+E0634917365618b24d91c37c3297140430ccb2556@5441920c b8cc10de6847ec2063bdb661f54906ed+6545313+E4490384364cc607681f6977b50863dedc73607c6@5441920c e20c9288b641899dbf4cebfb56ccd9c8+31767795+E44cc68804efc112c269c26c4369c67ee6c3193c9@5441920c 3b20e469d187234df0465b86666599df+23275612+Ee611c6930c20756cb200b2cde45057c242457cb1@5441920c 255f4954bcdfcbd1d4e0defd92985d29+29739564+E63b62f61cbe87116d56397b35bcdc3b91114db38@5441920c 5e64c3dc3fbb136403f47c18b06d9cfd+20035093+Eddc50bf137ee36fbd1f1fe0546533e97536926d5@5441920c 600fc94c672bde2472e2447effc449ee+20162106+E47b5116de964dff76b87bdefd81e56666f020f98@5441920c 057e9f3925259bf349d351cd75e01146+3767564+Ec46b4b19673ec8e14b697f77fb76c9822e51b100@5441920c 6d3c76220e433618c237ef6dbee0b20e+13561503+Eb1d89b38199393599fc613ed23e359eecf5880e6@5441920c d227c3f6355fedec0507675b7103e86d+7002557+E461eb64220cc189b14f845c1c696e7020c63e1fc@5441920c dcd0ed86431d171f935c2f86d6102166+23576165+E381238e16b036dc6df7614b6ed2f87c4324c8fd7@5441920c 2666135948e62048f269cf807ee5039d+6615671+E724406f1702c8b9d4b54865b7b197de08e68d057@5441920c 0bf0328bf9c3e9ccc6c386febe043cb7+24662143+E11355cc83b85db6d6d743f608c9db4e3777378e7@5441920c 186647736b93302dd61610f424c8b366+4697534+E40386c60866141c75cc84c3ec39772920b7e0196@5441920c e4ce7d1409ee024c1646e2bd9116d96f+9636940+E88d6ce1c9b866c666e86336e9d406b678efe9802@5441920c 463bb99ff72e344514e00683836dd4c4+16496116+E16301e6dc3b6d434214ebe82fed7333d2d201661@5441920c 3c8e07176c59686e683938069424ff92+9016631+Ef746d69e963c8380dc3b93f3cece96f202893f9f@5441920c e35874b80f896ef662c8ef10509d8612+17929166+E60132dcbf4fc5851d3c387420d13255457dfe9cb@5441920c 0f6973694123d553842f312ee1e7f9e0+11594711+E6ec26bffb5c736d335bd77d57d8c8e2f1be866dd@5441920c 9000642526f472c77960bdf873fd01c1+20306666+E8d23013d5065109c13fffdf723bd66fbb608b949@5441920c 7b237e32669f8636b96ddc6bd4bf63e7+9030401+E3c2be036b14dc4d7ebbb5e86e08eb8075cccb8e5@5441920c 36b36df49e07763417f98881786d8559+14696627+E339168621266131471b452731dc9f62f73bf056d@5441920c dc5694ddff5394218744de5398156668+10006611+Efef3e681202d6b2c713e659ecdd26df28e73867b@5441920c 06f25d6f4944c65bff12b41f65b9ef6e+15979710+Ef66d40d2473b250e733bce068b416ce526cb3f53@5441920c 3e333726fb6849b943b4484c1861599b+10116166+E3e7426053bff164299d4d397165b0fd6220b8dec@5441920c 12de047e2ccc029d5e058db8fc27468c+17606797+Ee906d267962f6f0836c36e376deec2676189060b@5441920c 14beb6bccd174c0d46c298320e76bdcb+23227207+E234249e7c4f2bc3e62fd6f6b0443f0cc543f1147@5441920c d18bb654f006323febde3039f09553bf+10556009+E8817cf8fe57546d649f5d4505d43f7e3f179e3fc@5441920c 83f045513b4f3f4021f65e845bb6ddb5+15162933+Ec1dd1156d1363c11f887cfc8ed001bf86835b2c5@5441920c 648336f5f4936526b645d64725621826+21307590+E6de931671f89b44940b760352e2d0ee530442267@5441920c 08f67de336c072c6d662b6cf2c583861+32759712+E8d66d86d62c0cb53f7b7b45d2f5b6e363622e2f1@5441920c c80193521c5914f978569b404b5d641c+16676434+Ee05b6346211c4f606521ce55234127eb763d5bf9@5441920c 436322cbbf425f6618b10c6d369ede1f+15100163+E60064882f33e83e1c12473c20957cf176fb53e73@5441920c 0bc877878b00f4dc96e0b93be374d5c7+10434017+E1e5016dfe71330e966d163b325c738b126533ff6@5441920c e6b152369e78d29bf8251e0995bcbc17+7765476+E2e56fe08f21e08d1693903bd29722e3f679bb7e2@5441920c 4e6f2124dd087175cf26b53d70f2f816+10542610+E481819516d33f267c0811e007365904f907644e1@5441920c 61664890644031d2e269f642f2669466+6767004+E3ccc64d7245c916b473b8065418527f5def1209d@5441920c 358cf83e673e0b5c9b9ef4316367b369+37253134+Ed1329475fd3b9680fef7648dc3cd0b086b4fdbf7@5441920c 189308e56578d296d153b6d7cfc8ffd6+17163652+Ef2342c38299bd6c8033cd9066d94f9f0966216cc@5441920c 480cc0e63c7d3c15621bd664fd67b509+25746921+E0c75c64e567f7cc675633894b80d2c1f55534c9f@5441920c 6967c4f89e8b866d18e8015d7786fe54+17296262+E6ccb6666326b056be3b82ce978b61656c55e81b8@5441920c cb619e0608d05d333ee66dd60133c26b+19044617+E582576631693e5f9f6463c79285c6399f10dfebd@5441920c 5dbcff65061e1859129467d669376e57+9216326+E66b33696790ee67567724fd77bb6f081ceb0b1b6@5441920c b5cd4c03fd56ce74f47534305f41966d+7709647+E369d45c56d9120b7f54c926cd465b6508c061168@5441920c 021c56603b593fe7b0b9341d3e69dbd5+9992471+E5904c358669ebf85d9672d96b1f05562be4cc1fd@5441920c 25e6d8e21638046d71fdd9236b5c3bbc+16105743+E0b235dd4d6ecf49ee503194de09e83995bbb8b37@5441920c 1035df4f548660343340651661d54861+23723049+E860501658363d21944c46d2861dcf27375cdbe2e@5441920c 66c958453bc3c0536053228807554242+26740659+E6bf768f4c64b76980e71f3986f653dc17dff3fb4@5441920c ee27ffe36861e4d610769c1ef36e81bf+39101465+E07f66cf945d7636bbcf48500ed84f6fede43066c@5441920c 366f8f5b1b4ecd36c3d4bc6e28454223+13179037+E0ed961bc4edefbdd47c9659b746fc485361b3866@5441920c 4545c0483335c548e5454620e8087531+23659026+E16019b3165d481b46fedf5506606dce182507e93@5441920c 27df881c9c906dc3fd04c2ff68d7f69b+6320674+E6e2799fed96fb6f5f8ebf32e18680b691d9528df@5441920c 28b9b320074163fc02b034e862246754+22624620+Ec26668860c2e9b956b8bdb06b69536b65f34d974@5441920c 5968de9783406e3cf1585824f3068095+19209706+Edc1ec66fc64dce19c967735840c19791e0c7b9d1@5441920c dd6b0299fe83e269b456540618bd4837+6364513+Ee18b3f6bfdc9e1ff8f927e61e1d3c9990bf259e4@5441920c 56cfe9b7296366840fbf3c9d0cb2bdb9+4766253+E5296479852716963f5749f7866dd919322e284c8@5441920c 368b66c5265db5c5613316566f7f9652+35016116+E8d446e66889974311f73c0b7cd682ed4dc4163be@5441920c cddb3f3bbf9d3c545fd68d76983b45bf+36549974+E6f26c14bf26fc4c57903f698475076cbdcdf1f07@5441920c 7cef19426bed80ff83f9d900c8178667+20373460+E5c6cb47d34cc6495fd887903d6d6666d9505d761@5441920c 8f32865c19f724438e2d9b648c6640ee+29919661+E16c07cb08b67feb34fb9c76b36206644f898976e@5441920c 09043bdd449b7946c4fec913e4217364+13493460+Eeefc5c2c41ceb676feb866380ef68062579196c6@5441920c b5d1be0d12754de15166479f927dd02f+16466490+E28ec14599eb0db52480483f68be739f4ec638686@5441920c f483fb8e54f6e1763799e9df42f08950+6660416+E962653dc7f63c1f1fbb6856633f1c2b857de4cf1@5441920c d4dbcb0d851764f4f94e4d62996d7261+7796021+E91f5483255146fb4f1eb66c2b797b6e924b8b108@5441920c 6691b09543c044060441936ee10221f5+14575657+E09fb9c6678805f0e7b29e290177f1d2f3916f0f8@5441920c b19b9c3869e39becd78c8053ff63c6fe+5634479+Ee35b3b397624685016de4586c9d96f57fec9fb4e@5441920c 94208330d58de63b7b603355845e2e9f+29716269+E0d811ef3d936670d06e74b9fe6fec5c86ed5004c@5441920c f024f736cff9618312339bd9847f08f0+7363995+E4b972de47eeff1038c3be68b28c652c6750cf1c1@5441920c 8b66166d236682687322174e707f5bf1+19715177+Effe9b54cf6b0b4156cc78334b71ed29cefe2fbce@5441920c 687925fb3f001f6eb17e262f7f3bf6c9+11922350+E6e45db64c158b168d9866928667ed8c5e400dc37@5441920c 811bc86929c6cbe690e68e712f81df76+34696356+E1d9dbd19b5b1f6d16697890d4136646e0b250567@5441920c f88343667b26669cebdb91160bde17e6+33645974+Ef0e4ccb520cdd1fd51f4008b596e370b8920fc63@5441920c b9e2c0b204645f0b5ee13776052de068+35567370+E78b5f0cc1d71b91ee13763613c715b5c0d946874@5441920c 516603449b0e68dbe1f10916626c66cf+15611642+E35b47868610850f2b866fbc566936872708ec8d0@5441920c 5ee193db01448f87063d7b854d07986c+27146461+E8c3b7df08f26c457d654c4d90c956b75d3856660@5441920c 928de1604d0f709c62e23fb2f6c1d3f6+26736354+Ec954bcb4e6951f3fd82e89f4d675d0d6655b5ff5@5441920c 17c99db9e4d850c53408ff6593bf4e6d+12053649+E6418566222886b6e5003c6804f92327c66059e3e@5441920c b28d67d5c60e0165d639695ccce06c60+45621670+E39f873b0cc620266d04f5b32482c68e3ff3fc15c@5441920c f7d21c881b4ecdc6105befd96983d442+10457142+Eb98210f27687e94d9f8921502c56bfd5b0606e8f@5441920c 0bf6beeb6097903930dbfb6f397363f7+27163032+Ec0d77f7bc0182f422df918f597e01f9e6b7715be@5441920c e7170d8075f74f96bb230515214907c2+6901657+E9e0b89feb40e2267f5d94df2d1993ec640268b53@5441920c 0e59c8753f30cc7cc9fd19f8e11dc5f6+13650247+E0b663b68366d8df921269fb04bb7f72770352066@5441920c 29f864d900551cf85dc33c850f49061f+23602906+Ed6f0120d02dd26216c5510c1e46bd109bebf6681@5441920c c8f4528bd47bddb5b26e4006d9cc89f0+33300672+Ef1b055439022dcb8e5b60721226028cf60b9660e@5441920c b84644525493d6b827f166d0edb616de+14622270+E56972e6be0dfd68fc0362332ce43cdc55f9c30be@5441920c 04f6c936ef65edd854c2105b246c7d0b+20760162+E8410cc9b133b6082efe33c6f42996d304708984d@5441920c ce1c666fc933026cfcf39c0221987462+29757577+E8b297663f7f1b63191103790dd7060374535f380@5441920c d6369df6628f2d7c48ecc5726e544004+9439391+E4d6b43d61290d3383d50668b68ed1b1d3b86cb9e@5441920c 3092d05f3b8f55ec765c8c95b6b40622+23690991+E269654b67d14c41bdd9920303500003f0e930cd8@5441920c 50b159ef9c1213116d947c92285d4983+6504376+E8e941656effc51485f2f6419e6f76d6b0619cd65@5441920c c8543b693d01c8fe6cb3728ddcdbdb22+31429424+E96e17b3cf08bb6494f841556d6037b6df5cb4842@5441920c 4846c99e6bb5b179de4fd46edf46e31b+20667266+E89858f30656fe456b6b4c2271fb1f5fd98b4e9dd@5441920c df48275087655f67867539949f52cc01+21542259+E6d2796f768f4621eb6b7b74c3322d1bd2b3d981c@5441920c e07398c19366fc4f876b23bf79049f1f+13635330+E24336044e23d35569f51f466c47b1c0e3090666d@5441920c d27072861c18d3d61663bb359e61d1e4+16645443+E57c58c03f56666fded7e95596e6017f6458f5e8e@5441920c 868b525fc41c185415fee9ede35c9b7f+33763672+Ed656e64e7f08cd2363648f29446f84f0013f3662@5441920c 634d4d1116c85199b4c8837667126628+43935944+E51dc654d2602ee26618e60c8842112225ec2bf48@5441920c 6e8cec6b84b340b746f63f7368339430+26173344+E7b16866c76fd11f50f6768172453cbe3c83385b6@5441920c 1c7746f9733e0ece7923ed3537dd2966+17960379+E2c7f5850549c1662d20c09e330fb173c243f4f47@5441920c 331557d6b124e16eb4de307655c40882+23669100+Ed8c6c496bef0c4fd0866922cf4d7762b2b9390e6@5441920c e200e83c9304eec0022b7521c3d8f256+21475297+Ee99dd49f41fec9566f6fb75301236673737db243@5441920c 9c76d58e16d65c05325d12318189b06d+16293653+E862e595b7100806e3036dd94df563646226bd766@5441920c 515e08e4b23d320644267cb4946d5e3e+1514377+E67d27dec368dd978e2fc48ee90f59711e5dbc2e0@5441920c 1d943f36d01f35f0f1bf9663b506e924+6509364+E59408bb9f6fd9f3c537000dd213e628f656ef976@5441920c 6d234083cee3e2efe81455d863cc5dc4+43017690+E6b1e7c6b5c44860e1fecbf135b7e9f662801cce6@5441920c d1922cc8dc266b6f6eb0c95cd8b2f417+20665117+E962b126b99c4c8cc216450937bc8c1dbd8e2d2dc@5441920c c3f2e5d04b3346545c2584cbcc9969f2+1591467+E6d7e5836b23ec1ff6336d62f6037e9d3cb92693d@5441920c 867de2fc66953e25bf15c61378ebb781+16146759+Ed18d2b753e63678546bce0bb1196b4ccc23207e6@5441920c d8c6f838e3d60b0f1f7dd7e9bc896cc2+6656200+E2db7f97705c6c32e14562c2776776bc80fc97d63@5441920c 0cdb1913f6fb98e1680d101dec9c07cc+20707621+E9178d6b652d68bc7f61dbbfc942673d523c7d86e@5441920c 413e0b5537cb3d5ce03f9e9cec4f62c6+9656450+E7f00c2344edbd7683c37d786c0c7cdb9168d1cec@5441920c f8083c6ec29669d7ee607223e3ed584d+16425621+E8083b2db35f09487c86c03c0165716144f68112d@5441920c d57b00fff01f31e839921b4109151f30+23196332+E663e94940799968e43e632cb56d75fee8b418677@5441920c c9d80bdb4b75c42ef1154bc13e11021e+7300691+E8681d5461b3d984ed09eed8fb41917b9e7bcbe17@5441920c e4d78db5894943cd403b6b3147c7321f+49692537+E12239bf4e933dfb24292001dcdb3b074969ded00@5441920c 4902623e0f182b4f31fbbf6c1b917ec6+30721960+Ec98bb316ded2bdc765967b66218227f45e4ecdcf@5441920c 5f0238db354266526666793b0b228312+23666340+E0b679d71662d6387682430b11bbbc47737356e93@5441920c 8c0db777b2b8b95785766bc1b47733b6+9229611+E5869e889bf157f2612f20b6d765bdee03476e9c0@5441920c b53c6b59d74ec6dd58b56152519274e5+27421753+E42b4c33532ff2638983b21548b50f8d77b40cef9@5441920c 65bf786bf68c762e3fe62c2357896c7d+9699436+E8f2b22716ef79f09748948eec2c610118f7576eb@5441920c 894c39bec02f51f622e4b1bf202be8cd+6406659+E6b56fe4f277d784ce1d3d3c279763690f19d576b@5441920c 32dbd624968d15ccf65b3d26bcf3e0bb+16694996+E38435585ce3658c50de109653661fd661968fd2e@5441920c 5218162dc863d88d27c8088b7fe0db3e+11026527+E70b1e8d389f0b1b1e635bd5f0219635976f53586@5441920c 2664546541d516425ef812bd00e4e549+32909679+E56d0d0f602d8d2e240dc6ccc4d69d9353030ce9b@5441920c 24400617c269c4ddc9bef64256865245+30963436+E7eb3c301800f63d66ed0755b1858ebf488464166@5441920c 80e8650bf6f2101d6526e85cbf1669c1+17266919+Ec9e10be5668d905fcc4fed6e5856281c4e2d64b5@5441920c 659e40465fc1d4d93b9596d6902258b6+26996009+Ee6ebb57415fd1b8b668e276f34f9b5b891d3f526@5441920c 940b65f21799e622371662b8c543f280+16704607+E2eb498f04302367895cb3ec665eb7941bc62dd82@5441920c e5bdddf3051f3e66608008750c46f2d7+26045175+E6661b5d76f3253e1044d6b266174b6d27fd7b65b@5441920c c4810116de72ffbf10295ed9c07e7685+27916575+E6ef1302d90884fbf836712f1fd74d61f612f536e@5441920c 64fc98e6841b185fc0d82fb136b663f1+15050054+Ef795fd6e80365f9ff17767c6231327463433e9bd@5441920c 6456c7ef22d529c812b5622668f1f84d+15603577+E5577c12c3563684bb5600b4e9be014dec6b06c33@5441920c 687405d9d700bc374b30029cf8d4be59+27716393+Eb0b3748363bb867ddd6dc8c3c8c08105741864d4@5441920c d284d714348344645242506366129f16+22019757+Eee039cdcd2e2630126ed862ff4e697bb1b93637d@5441920c 69b48566c2663ef36086d9db2f990136+45797643+E1c80842642f746545dd1405229f35c3b3dc6b19d@5441920c e32e44b17e16fc2e2113466ff867e26c+22514360+E56d768865cbdc8b4c3c56965ed282e1fee305906@5441920c c8b2896f824744f6569b88fefdd216ce+19253951+E322903880e688b62d3bc146765c5c1750e43f45f@5441920c eb2576903cced0150e92eb028603f228+21229495+Ec266378d59606199c6e5294f1d400b196904859f@5441920c c9d128d476e5c463452d08d4ce0efe6f+17559372+E465f5926d3711b9b1dc8266666fb7ced402c9c78@5441920c 565cd1f686914644b63dfeb72e9d041f+25526673+E49e77665901ddf4f98fb5d61de73edd66b43fdcb@5441920c f293c29e91b111e9330209f3d94dec55+7096070+E3ef2322ce8517189616069206c266c66c16ce39c@5441920c c243edbd633d9795c9008457e7f64c24+23411651+E0fd1066e77be25015675fbf8e338364bd404d16e@5441920c 66959d9139f6de12ec00f9dd486fb30c+26119054+E04bd73dc60b645f68239df27e8707d342cc5be4f@5441920c 6b633b3567fcf12293e447f2f535f68e+23290349+E8e76c6686bbc756c2b966ed43e9fe1dd4f9bbbc8@5441920c 8c6b77dd767ff6e4e4bcb5645db9c267+11654057+Ec5e46e815801c11f9d0b13200539d5b8c05c6b90@5441920c 3377c8e76b7eb9f04d30063e468fe4e1+6496414+Eecd1131c353c78259036e2c36205d71e695ef6b5@5441920c 4026556790fdb1739541e13e97c58e9d+7220726+E01078c064ef4477876ee0d730ccb97c695f72d9b@5441920c 265fc5b76cbd9cbb3fb0ce49dd4ee2b6+15666346+E831444667901b15497b4b1850fb5df76f5098681@5441920c d6f158bdeec1c3cbe0df75466d5e0691+20565771+E66517714fb6121c25260c6d766080d107136b199@5441920c c99404c36f55be9285d6ce7f6c398728+29696076+E46bde61dd962c7659b6bf58c5f24f3c4b0295fc4@5441920c 3162d76defe7c44544707f52d67b4770+29661960+E88d05cc566b526ef6cb76626bd386ed468eccddf@5441920c ebcf6967d9e4c232e2876786f86e3cd8+5667660+E21dd3f48b8116e7824b2fd342e7d1300663ee33f@5441920c 7507d4647d3bf526be5e64f86fc24740+21602934+E6729c2617d186d5d1f828bf0126c67ce3c67534e@5441920c 64745b39622be74bc8c6bf8d566f7f64+6125690+E9e94137358c821d701932363b415c35511b41009@5441920c 8ed366614cf657d3d654f181b698d28d+345265+Ef9f24b9b4b39e13c0859033f00ded590e89e9eb5@5441920c 10b4f10368dd17556465f21dc66c9d62+10003929+E4b200cf2ee068279d431940f687d300e4741c76d@5441920c 748847d8c44cc3c6ec068161d13d8269+6997133+E117d6fb869e6138b8c9cef842f2dc2f60b9b8cb3@5441920c 332d07334b64462499c6fd664e3ce8e4+550060+Ef1062eb63d03656f3368fb088c6e152667662c00@5441920c 386e6796b1756f14b906d6496667666b+35514556+E398d66e1be91928eb0f6725e40096f34c1566bc8@5441920c f699169634840b6c5c22032f04662885+30770003+E23066b0c9607b5c0c848716251bccfff57f57644@5441920c e3328568c69cb7833368c53660bf7778+41661599+E934b4b27473595d61ee61381b912dd1b15f69b3e@5441920c b68239278c162b35f59dd11b26c7bedb+5032660+E13f6f6039e1954d72bcfe87cf714144b71f3c9ec@5441920c 63b57755c3cd8069e5e2626dee6c93b7+15730167+Ee3c7d8676fc6461df3dd9b78c30931b29c569485@5441920c 18dc31ccb81062638626639d1c7bdf60+26696961+E61e3bf0e745b644443647e287252e84061f20838@5441920c 37ff6375ed894106e4365c5c6416d067+33670066+E6d626346726f50f7f186c6602f6cb1166dd7506f@5441920c 11365f54371b62654524fc65e34ee36f+5763371+E315283978f461c40e5641556f19e630c2256046b@5441920c d830c420616b1e0ceecd1361e07575fc+15201350+E8260833cce68c73846008c810d7e910821f6fffd@5441920c 93f16b759b1cb023344dcf15cc7b199c+26316506+E9fe3d33fc9c66091ddfd6eedf752442bb988254f@5441920c c23706e6fed66ded615945667300d388+47367411+E8b073cc5777d624909c6bd3e65b61ed303d5423e@5441920c e24b474d105f11b6573565fe54862860+19419515+E4cb466d568e663b386014080e11bdd9e22db32b6@5441920c 3632b61819853b163036e1f402638c44+1079105+E8db9e3652cc1780bebf800344e250feb52ff1f11@5441920c 22c064b8de458f72fb77e43f73cf3123+40594325+E672dbd4b2b496ef28b64ed6910948c68b607e491@5441920c 3c5710662d8d0e6f12b2071426d48644+5163249+Ec6517c5bd09f4b081dd95ebee4bd869f89b1441c@5441920c c3f52592b3f97416b23f689536e37693+17064012+Ecbcee67800458b6df98ff689187446966d821f39@5441920c 8456c6cfb316bf82c7934d3ced09b5b7+4703673+E916ece3d7117b6dce14e2e1621566cbd7766de3e@5441920c cbb9f866c562655729c4bf5f67666e46+20937649+E2d2396b6587f6f4ebf76295728070c835d55bfcb@5441920c 7eb39636607c3c8b726d67e928d0c950+19766577+E16676995d471f8d36806e8065063e9144e612d6c@5441920c 6873797eedef2fcd226686765b83cd84+16520799+E4de63c7082f1d2ccfc77457150423562d9346b62@5441920c 16c2450d3153b864de69e362c16ec6d4+20064956+E3b681fe29ee6c47e19ed1bc08947576d38c1b1c6@5441920c ce951e19024eb6ef606629527d03657f+14563555+Ebb1556509137131bec2c637946440d2e39f2dfeb@5441920c 968f09f24f240b0ee3de2615905d284c+17666235+Ec08966e756c198656d867620466617cd3d1021b7@5441920c 01264be9f7569fc6d446c6658c68c7ff+16200150+Efb257061168310d1334e51db7d064d13f053b7db@5441920c 3354d97c0c7368349cb167d463ffff3c+15699664+Ecdfe2e0b6c86dc12f8ed0c035056143dfdd16bc7@5441920c 685b0eb9860422e6c308197912f9cf89+26566964+Ee1c91b8b47f186dee67bc0bfc8581487f1734841@5441920c 10d6fde917d2f67974c342f2bdb99810+16666607+Ed9d736c3598219d786bccdc4480ccb6574fe65d4@5441920c 7436e9b3dc58d16c4f7fec9558e2e3f4+34299516+E84f366efbc7f687c37bd4006c80bf867606bbc2d@5441920c 0c7dc3f9be85bf7f02b6986369e15396+5167494+E3491f6f0157be9976e8f52f48f427068efef2041@5441920c 853860b5e3d7d68db870d64887eee036+9550459+E22296006714611dbf6ff2100006d14f7ee49274b@5441920c f4b3b1b8c22d36c1b2efdd626c3f7353+9425652+E502bd665962785f678f6e33e9b79ff5c09dbb892@5441920c 46dd6e718e7bc94962d460890d532d46+52257569+Ebe11c76489465f24695550366c829b11679d9f4c@5441920c b72f5e0756c4b5fd3ebdd71812f3ee56+6929925+E242c9f670807672f3b9cc681b47140529f436874@5441920c ed7f65db39984d581c595cd0e1e9d056+17556391+E8486351c5ec074e0b844c186d66fe701e44e3763@5441920c 63006056e077e0dc7716bf3425010ecc+14247713+E664b6f21cf6debc0095276164d3091f20b752597@5441920c d5116b69973d889d6f298f4738deb498+11066306+E3b4646b51c989f65567426664efde6e6c341f66c@5441920c 13e923c021e62ee066ef759d74e32d92+32067066+E3effb2cb94884161514b9cef413cc81b178be806@5441920c 65078f352fbbd13b8b56ebd0defb5cc1+10666222+E68f108063ebebded3649026b960f55f646c9b3f4@5441920c 3d4be63f60347bb56626be3969de967b+16626376+Ed8f748f6f073e373f71126f1c984815b26607dc3@5441920c f90b18dee1e00c275c2e238eb0393064+7956919+Ebd4f603319ed5e1f01c3e5875391688de2627899@5441920c b203671ec56f6d0bd72ff3f8415091e5+16713509+E5e929bb716e7dd51eef64530b23257d64dd06d64@5441920c 73b1ec2c6b3358b8190bf6c23e4569b0+16935900+E66185731649cf69f5d192b084b03dbef866ded63@5441920c 18697cdc7111d7dcf188dc222dc236d8+261032+E4b9dd594df44bdf4eb8850bb2f7dd1154b2fc5c8@5441920c 466b81f8768877dbc09fced3669fe11b+4269160+Ed51deef9b87c6c468b8cee2f1c7354f15117df62@5441920c 56719bc3f4db387ee926e85f9c017bb2+22617045+E182150854d000ed3316429530534337731b1c888@5441920c b6560f6d5e974464461f5d996cc16160+6953071+E26ddf53265f8146ed70f620d46f56667fb6e6411@5441920c 61280c9751d006c822044302870516f6+26475163+Ebe9b8b46c367107632bdf064fb80566ec8175e10@5441920c cd36ebee7f20ebb43dbe61351c9e33d8+21260557+E84b512df1b769c965f796560616566d36ed612d9@5441920c eeb7dd91c17fe167d870676648891ee1+47650592+Ed857b3e69858537d67766568e6e43d8b6487108c@5441920c 62ed43cd619e5307f96ec7294634ff81+9264520+Ef6ebc37b8877018c46c43f31b322f46b8676096f@5441920c 637d2d6c28466b2fbcc3596e4e48e925+15247646+E69510297f56571313d71633767be496d6ee5bce7@5441920c 1955419f9b01bd0e663edc61ef23fb44+6560616+E8eb1266643ff694fb8b2b2062469736302211e84@5441920c ed3b97847e816871458df4097897f666+26610427+E91d463cbc8639e2982d83e5ef6d6676130112c29@5441920c cc560418ec87624942f357bd6e349f11+27671122+E6326b03674bd7db8bd951d008d8e8617c64b959b@5441920c 346d8222e5e662b9d52d535e6354b571+3665630+E971cd85296175789601c66de54420bc0b04e58dc@5441920c 974beb90b949dc76258e7d73b6505e86+14940321+Ed73ffbc16e630c20b681536c7c6446c6fb6253ef@5441920c 6f88628c91767558e376f10d6eefb559+12957633+Eee6b1166f4c4fd01b9b39d84f85fc8bc68c7fb52@5441920c 50016d6fe13d9dc340cc27b6c20d6040+36096753+E6027b3b3d25bcb41de50469c6d8f6576613e6cf7@5441920c 66c0d207c66e6fbc509f52b8bf20b664+14674016+E206b24966155d3ce169076f32c91ef17c3bf7c85@5441920c 467e1966e64f17f6c68be65561c1dd6d+19901201+E8f59b255569d94803750e9c98e29c335218bde60@5441920c 3669c6c63e8ec00fde480790eff80647+14314479+E857690ec233c9c4b6436ef04590e21ceb26e7606@5441920c f924d760468f7dd3d28940f57f77193c+17691663+Ef8d9fbb6446528ff1c84bfbd61bfb4e2e9074fcf@5441920c 437442184168c16035eee82204cdf366+10632652+E715f84e7ff6f6d9d6b456d8854ec6b78403205f9@5441920c 4b2ebfccf47699f59192894db210d37e+5606647+Eb16336fe5036868c36db8cfbe0be660c4611676e@5441920c 9bd8ccfe078b86dc475656911667dc24+11677064+E1245c040830dc83b3e8ff5648296ff1e0bec36e7@5441920c 628384646666c65c0c67f9e671e67262+29615252+E3efcec0c36df19663d3c7240634740feb051d6cb@5441920c 04676d96e3b5dcf5e36f633c124b366c+24913006+E6461330125be80553808e043915b51c31567be17@5441920c 2e5cc96bf6d6c81674364bf74534d96d+35077014+E408515ceee93c3781b012517294560695b2d3ff8@5441920c 625d6dcc95bf40d68d42bc4f0d8c8135+23967236+Ef8f0869bdbfd658032d59d02692f6b4671d67b16@5441920c f7f176fd25d26d69057e4259f7164280+23454742+Eb05113506e03958136f1e77b659c805e481346b6@5441920c e0c006f2dfd4542e2b6b9d66c065975c+12476502+E6e617df6b90f7f1633b960cb710e5d639f507684@5441920c 65476f45e197d0cb38bcb0e5c3eb88c6+21624407+E75496e0662883f622ceb1166426172b11f066049@5441920c 46d6d4e5356fff3df952c006c60e605b+14556946+E2478fe7b5334c38c666519e26085f8f879f2e5ff@5441920c c06b49d30b4c58d6c407d0f01d8c9134+23503963+E6f100f01e53944593d14668b674e766eff14d626@5441920c 9ee892068c2c07664f13e519e2356f65+23959972+E5e5566d7092060b6e573d9b6058569967936f8f1@5441920c 8063f4732047fd6290456863c75355f8+13023330+Ef5e6fd0bd4d9e3e619769122400e6699ef42ffb6@5441920c 6133e2fd32713cd037fee9ee60193360+4304022+Eb9181ee3eb8d773d3929b1e58f605ef46158b668@5441920c 06cb6265f8ec862ee6c405b9c5185ff5+32133100+E2d828480e29d35725418721e6d969066fe6310c3@5441920c f9ed6b5c26c126de295bf1232ccedbdb+11110146+E8c90f9ddd3b6cd1620f48e34b7490534c43f7e74@5441920c 6bb1e5ccf8c4ef2583efe96bccf70491+17322291+E196662fc86919b65b4d92890cb6d758510fe0d6b@5441920c 32543c512ede456e6d526780b88b6b15+3523440+E4c9067c126d27c21558d66b676b9050c020feccd@5441920c 3884892318510d6e5e52e6194686d19d+6666009+Ebdb05b4e5c10f28d5507ce38c81c268cd3457084@5441920c 42bf30ee736e0e254b04e9e6913e06be+21279449+E1b145961d7c6637bfb1916eb42f503b4680636d8@5441920c 05f010fe0e3687f66986159dd1916699+2064644+Ed52318d3e26290f188266514dbd9602e779bb4d3@5441920c 662c56b8642e26265093b826e0689288+14665421+E36cdfe7b5656d9b7158e8ee7883886d1d34d52f8@5441920c 6789d9df635d40dbdb42cf201796619c+31742006+E496c7611143e3960f1c1c6c82fb16d8620dc7cd3@5441920c dcfe9fd63c4f21c8eedcdf27d60477bb+3139055+E91e6d07fccb75cc809bf6d5bf06bb2585ff0e8eb@5441920c f69eb3f93d80bf4f7221794964081e46+20025220+Ee596640d0963153b29b2edf074fd8c431c4b3f90@5441920c 967d6d4ed233c610896fc1d98c3b28b0+10164650+E81e99e39e3557b64076e433547541397b1478685@5441920c 9039b674c5217c1c2cd912f6e5028439+22530516+Ebb526c19b663ce48e1c6016634928684b7374b63@5441920c 14d6d4915152e2ce983496774fcde566+13062626+E22f59b087fbbe5eb5de5f7f66176b6bdcecb4940@5441920c e7060435500d777946081cd8df43c78b+17612660+Ec32fdd296301d5673c5415ef79c25e7fcdf1dfde@5441920c 9e31b4720c6ed8d60c4757065491f965+23191965+E67b454334b6140627c3fe06fdbe6028499970f63@5441920c d26d94682c04132c5c42709463ccc26e+9532662+Eb2cc826866265f6b9d72606fd6f5b250c61e9c7e@5441920c 4077d037e9c6096d6388e967236fc9e3+23463610+E93c7660c4f0defb357f973e15663e50b016ed35c@5441920c 939efcf9d66d076f1f52dbee0b46d886+11316441+E193158f0e66c8369d87e5561c720f355ee77c987@5441920c 78edd8c79e4911de2266411947056c8d+33406606+E632b24bd669f2c39ef56540fdc4fd90f5460303c@5441920c 2ff0eb23186b6db3c3f296363de5180c+29555310+E0139db36506e657b6995fd4771019d393f82885c@5441920c 5b0fe6d6604951cd5f6c78f66f359c66+13315726+E295b6db03207f03143e00edc9964739cffc195ce@5441920c 22d56132b728471e2712561b5e683548+24175332+E426de3f93b2bcbde645b57970b0663c1b7fb665f@5441920c f5092e3541b32c9b8553c18fc75deb59+30526779+E5f26e773f219367c550c67c6902b7e929d65e7d3@5441920c d43ec61d1ff5e63b9076626671b0c038+22764166+E915f309339169f6e23c4336240c6e592e08313e6@5441920c e6f4908ec7ec6060c8d466beb076d87d+6501650+Ed2502bebd746dc46382c87cf196c4bb468939782@5441920c 3b0d1c2743570c67ef61cc5940e60bff+30459907+Ef4258964867743bcb19f8ec64e66d018e865f452@5441920c fd47e84763e40011d00b23eb19cfe0d6+7937153+Edfc4e6bf57c0f1df165661393f86b1d355bd4d44@5441920c 34987c6857bce32f07bf4ee618772df9+17300095+Ed03738e8fdf1041662244f4270066862ff1fb197@5441920c b5356f66d692218e690bb94561c047fd+25176211+E0643cebe06c4ebe5fe1c5dbf746e2868e24b9c6f@5441920c cb9e15151f73d81f6864d55758496cc5+22053170+E6962e9ee716ce012e916f2cc93cc9624ce090225@5441920c 687f6d297b07065c613e6192963e82f0+26170716+Edd7828372c78ec4964d9363891068e13f339124f@5441920c b3e5368d95d1916d5653b90cbe4c5166+16915964+Ee39ebeb6e92096ee58648c66d373e5035b19fb96@5441920c 028f92d2328d0b82662e85c504389ebf+33011109+E56f2d7d585901e563e6223b63444806861778686@5441920c 8fee741b744b81dc001986562c76e2e3+34615593+E1e92ec6070bb4561382d33d7049dec660d71f87d@5441920c 0561c687debcbf5cd5bb661d576d88dd+22721666+E5fb1596528cbb8bc42d1d18cc8f0b46c5e0de6f5@5441920c b41703b42df6d3cd992599e9677b7c0c+21656716+E888d9325f666683427068d89bfc54806d296b684@5441920c e88dee4736fb1332fd0624d886984839+7279296+Ee91142f0e8f0fd9678e3e188effdc440e1d23408@5441920c 219dc65085f06379262dd3b4727b6efe+40036264+E9865f1497fd07c02915697e223727563686663ff@5441920c 8632264fbc7898588dc38e4639496636+9223066+E9c265e5654eccb4def668d87e0614465b4ec44bb@5441920c 10c33b498107392c30e1c5f3494e602d+7265467+E417be03290b37248c4c91fccd30b1b4692266255@5441920c c21959f5756d956456bf8f9eb955c4f0+11169673+E85e840f2b4870c83264233dc08fc3b396c9d1de2@5441920c 96db807ff3e36722740dddc5c1bf62e0+15396551+E1e8f4b9270dcf3638cb6182254c9df3d0e26221e@5441920c bbbf2e45768169c6699cccd60655b635+10659291+Ed47d66136074c5764fe655cb7d96b251ccbc4561@5441920c bfd61f58437b8b8021132efec2446665+6096913+E916f0429b8e8cc046dded7b8c07f4837ff021b5f@5441920c 37604e70419598750ec924d525267f3c+7660539+E6c4d798bf8eb3122157d31e0306c1e545611b259@5441920c de66fe2dcdb83b476990ecf1fbb27c26+11204600+Eb81c3d896180706f813d67882bd113fd69cf6528@5441920c 342eef125628740b9562673bfd2b4d96+54366+E70098b304b0d8975c36075254076223fb73f2eeb@5441920c 1b9215689eb58c256528bd2865c2d626+3466752+E127268472ff8bde9ee818669f4629298e8086bed@5441920c 086034465e0316e2648f8e4802604f51+31370255+E6f34120d24c1f3665846204865b4bcceb47686fe@5441920c 2e234e3f091bec21456c9cff0bc761b6+6507254+E6fb62e6b9e0e734d152106d060ee6692b6c9ff16@5441920c d6c63e474cf6b5385104b0b78677ec67+13175993+E9809c18b776b605849b2c321928863c362988576@5441920c 4ddfb1ccc7611289d7264bd70cc93dcf+12766634+E3b762696bee4dd0569d0302f957868fc20f51652@5441920c 0bdf4063360d6b021e7bd3ccdef516b7+9460636+Ec07113cdb6264b0821504ccf3b0e2604c1870916@5441920c 491e0110808fffddc6b9d2576c19dc1c+27323515+E11f2c06e4f100f75448161462cb693e6debd5178@5441920c 4f16e91e756de766b9bc8ce99900623e+9640416+E06208cbec42bfc0d55661e91036cdb4cb5dce80b@5441920c 5605f670b81c38565483275336c3eb92+15604264+E7ec502fe6fb951904769d67621100686144d56de@5441920c 176d89f315f6d49fee66317f081634c6+14067052+Ec6345f6d94364e6e310655435b476c46087e6746@5441920c 89b45833d2c19fd864538c2ec1d39db0+17922209+E6679c164b6f598d5d0630b678297cd068c9c9262@5441920c b42f550fc4f7c3987413b19e69ec784d+6951532+E589684c52326864cd096ce66dc61e30ebe4130d8@5441920c 70462101bfe861f600cff25705738683+10179374+Ee8735f8d2e55946d6dbd3622bfd0b52ed4ff5645@5441920c e5639f1ce89f30d7647d43d92f3749c9+13769767+Ecd88f922b0db4102564b81fc91c7b74f66112656@5441920c 922c4f0efe4d505f216cf6b16e0c74f0+13596264+E521d6ccf9306e12e3976c9169c122220b1cd702d@5441920c f75c903f8d88d6969e7ff2c72e5b31b8+22691146+E0f3dfdb223b828723e3017ff77e3f66b493b86dd@5441920c 6788dc29696632e5f39b668e84337147+16625559+Eff07ef424de0e25ff25316c43ecb9620c8ff6cd6@5441920c 195872d8776fb7df2699106f22de52eb+29592637+E3796e413486c57e1671b9066cf91fb6f358e1b8c@5441920c 1872d8876c16d6c72b1915486c996f51+16101070+E299d2660721262061d20e5421d387c966595f396@5441920c 6d54122d77b2246369d35f699220bd41+16610443+E763f085d9b08d8333e5d95f295028d5b848fc7b7@5441920c 9987e8842471d306ff54b68333fc94bc+14696064+E66641522d69ffb1b990be462106b248c99506b55@5441920c 37ed73f77c77c6c8ec8666e753cbbf7b+25736556+E87781259d92d670966e1654d369ee46d86d5ce66@5441920c de0dbcf70d224c16dcf92905ec10e261+17151669+E4228816e8d6d28e8835d8dfb46e54dc1f63c7c67@5441920c 709e8ee867526b180b619b682159c277+23262243+E93f00c26b28e85d26e8cec6d916de796e6e3333f@5441920c 374f74d9f4f0409b19ef96d00b267868+15933520+Eb4f5933760625f77d172235bb2fd62b5d46c1b6c@5441920c 51202e99c801cfc3062bd9610c00f063+2539339+E26c714978b06906d7144158b6ebb1fbe36d56344@5441920c b6fd759cb167c94557649cb3f7482d49+26353605+Ee642b25b5c00040520fe3dddd988c146e632cc14@5441920c 94669028355369bfe0db926846bb56f2+9695904+E94bd0c5fbe063be26b5d37061e0d5e13666b67d5@5441920c 2350567d203eb82066ef6dd59351990f+7647526+E27c8f695b3d508984bb35cdb78f75b0b690e5078@5441920c ed1f536d97255d9b3287612ed4833026+19420966+E35cd0f0303cfc68077376266e3117c72b369b10c@5441920c bf202c423c2f658db116976b3866c622+12634376+E7086c00ef933ccf0f07f0c9d00377797f337fefe@5441920c 17055b910c95c42619109362966c8fbf+10157396+Efd6d11c193bd32c7c69df08d8217ec63cf8414e3@5441920c f11c6144838cb9b4d67351d6626d1802+7156443+E4d5dcdd8ed1c174076dfd46767996651f38c4903@5441920c 9cb47df53d1cff53cd5b4796d0bc23f3+29952199+Eed564950d188541356161227068fd9f40fb5933d@5441920c 2ede654beb747fe9ee17be9dd5d3949c+12640911+Ee99ffeb440dd729067c606762ec076e524d592f5@5441920c 9be26457c84576c7e66e3168fd979607+26005247+Ec94b90868305fb875497f3b687655fd096e95296@5441920c 38b141749fdcc96dc28f725593486bec+61264+Ebe8cc5cfd0bdd54732ff1c62f6620ce4c797cbc8@5441920c 853b659766fbc96f641bc6923d5694bb+14544713+Eb567982c333b291b2d72467b6c431cee1bfcb6de@5441920c c6ecf79b145527c6cd62b8b6cf6f51b8+24455427+Ebc7bd846b936b266f7985111223eb1fb73d99cd5@5441920c 753b0b93996c2970915290ebb7eebd27+19979357+E3c0604c2ec64edbbc8360e568601e1c6ecbeebc1@5441920c 8fcfbb2b43c14680bdff1e514210632d+15760934+Ed0265cebdd6c614709e8cb4295f353e36083b32f@5441920c 11e006c41883660d19e68df266fe4636+22066346+E08206865313e13d29662e02927f424c7c8ebf265@5441920c d55b9e552b90d8f54c84f620ecb73e2b+15463950+E31701420310310e677b648926644c3234d52f472@5441920c c6b76f0b30d2e1c48c345608961c6603+2245652+E1248cd543d2160eef37fe460402ff946e75d8d64@5441920c 4806216f9c2638b63e678d0d660d2409+6206011+E1e94089dbb7c14d892d7f6521fc36764f5c6d761@5441920c 47f36ce735ff98996762cb1245e2d97b+7456077+Ec7ddc386d614f46ce7bbd4db8fc8e2e0261f923d@5441920c 7e8e73b8655f80b3583507fb666c77e1+15544112+Edfdd6ce9b54bf6fd3d2e8116783289dd77532b9f@5441920c 675b0fd88758c546376314872801576e+19435511+E30b4e596f663f0b826b5208370246bd321bbd856@5441920c d69769253bb145bc162c6158e9675316+6640055+Ef9c9c2ccbb6e964c05fb79b1250b606d57e59164@5441920c 0e357377b64fe29c3806fbf96c946645+9325419+E1b8609b20f5fef67fc46ffe5046b9f86e883d6e7@5441920c 9f57b97c259fed92f637d5232dde6104+9611496+E7e2b4cd0562494cbec77f3f67eb55414266d8d50@5441920c 09f60e940e2b603004d6337b32665beb+42415433+E93636b065e97d59bbdb24bc7dff5145f618f64d9@5441920c 6276b65424d63984f8015782060647b6+6046575+Ecc7e42155e92667eb8499956d012fc67b674301e@5441920c c9ce65d27ed164502366f9f5ec6e3fdf+22647045+Ebbff16b79dd826b687464f496f630db769e4f267@5441920c c16f091009b6f237366d5554137509c0+7507452+E3099761fe738fd5ee6368dcb8f1871d9bc018673@5441920c e06b96b906460dc628310477ec136ed7+24532176+E467927670673306f4186e4298f594c2584625137@5441920c f4ff1289c81b231be38907b88e82e975+20702445+Eb06cd9434e0292e6650453656986dbee2e5517b6@5441920c 8ed4167cbc6998f76847f4504cc21655+5393310+E3216b6f606602517fc6102e663746762e348b261@5441920c ed96eee78bcd599609bccb890d19d1c0+25036697+E2855c621547f6508f06862739b1d3c98d502f60f@5441920c b10905f5fbde35f7764492472ef1296c+17526792+E2387540056d68b4f5370bf7cb01d8439c83fc571@5441920c 762ef6d6e967ef7de65eb2095005664c+39123936+E366b9e4e438991d75f6cbc63d66d4671b62dc13b@5441920c 58686918bf8226496969555356830d50+21530262+E08415f6366061839595597edf078cc42764ec929@5441920c 987cc9c5c66e600676ccb76827266b69+39763257+Eb8e06991c83ec041e86f2e563656c869b6237cd7@5441920c c5c010572d6fd5f3683b3f7452e88b2d+6637631+Efb665b8364468f891bf42622099c643c558534f1@5441920c 076d7008f20864612f7f5132c66b84ce+16073436+Ec6cf748b16cc57f7168c989e661346495224f661@5441920c 81115023d44583e3dd80c630e9eb3b95+21766601+E4456d3c5e1cedc36461269e8c84fe32e8882f0b7@5441920c 26e15cef932e661c163d65c53f3d7596+11316659+Ed328777b54e6570d8fb1067f00847290be9642d7@5441920c 2e9c846ce77c8d62e58728d948f32301+6626151+E6742654b169c78c2636ee26bfbbbd246f86ec811@5441920c 86d19f8cc3be48b90501605017b36579+25421420+Edebc6387dd9f7fed0d4bcf6696220087381e5404@5441920c 27e6162bc2c14c183953fe682fdf1525+36360466+E7c6ece51c0fbd20f6647230bbdbdc66c66860beb@5441920c c03d55167fb6714d78880dc460574091+36766715+E140799f4146c60857050b56e4ffc66693b576ec2@5441920c 631c6b6f09985860c7fed6048e76b716+11066673+E5db6df91202e3100c4577f4bb665474382f8811c@5441920c d62dd2616f00f463681e15ec3647cd58+13126734+E609f8229cdf8c9e9642dfd6e3167ffd076dedbb8@5441920c 8749dd87c0d6b1377909c58fbc45dded+15236795+E461ee6611937f46654806754353bd32961666056@5441920c df7e5e5e1dd4d9dc09d8bf35b5fe3f24+22561443+E8fffe5863e071f5becb24e9c4de0569c1d864ec9@5441920c 4738611fe367691dd44e18f3c8857839+11364640+Ef171c946e87f52ec2877c74964d6c05115724fd6@5441920c f9ce82f59e5908d2d70e18df9679b469+31367794+E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c 0:15893477:chr10_band0_s0_e3000000.fj 15893477:8770829:chr10_band10_s29600000_e31300000.fj 24664306:15962689:chr10_band11_s31300000_e34400000.fj 40626995:18342794:chr10_band12_s34400000_e38000000.fj 58969789:5087834:chr10_band13_s38000000_e40200000.fj 64057623:4284756:chr10_band14_s40200000_e42300000.fj 68342379:18665404:chr10_band15_s42300000_e46100000.fj 87007783:13536792:chr10_band16_s46100000_e49900000.fj 100544575:13714429:chr10_band17_s49900000_e52900000.fj 114259004:44743112:chr10_band18_s52900000_e61200000.fj 159002116:17555223:chr10_band19_s61200000_e64500000.fj 176557339:4386647:chr10_band1_s3000000_e3800000.fj 180943986:32161952:chr10_band20_s64500000_e70600000.fj 213105938:22400285:chr10_band21_s70600000_e74900000.fj 235506223:14028139:chr10_band22_s74900000_e77700000.fj 249534362:22042495:chr10_band23_s77700000_e82000000.fj 271576857:31053589:chr10_band24_s82000000_e87900000.fj 302630446:7357223:chr10_band25_s87900000_e89500000.fj 309987669:17709824:chr10_band26_s89500000_e92900000.fj 327697493:6148418:chr10_band27_s92900000_e94100000.fj 333845911:14689912:chr10_band28_s94100000_e97000000.fj 348535823:11964810:chr10_band29_s97000000_e99300000.fj 360500633:14904735:chr10_band2_s3800000_e6600000.fj 375405368:13400037:chr10_band30_s99300000_e101900000.fj 388805405:5685774:chr10_band31_s101900000_e103000000.fj 394491179:9646905:chr10_band32_s103000000_e104900000.fj 404138084:4640161:chr10_band33_s104900000_e105800000.fj 408778245:32455363:chr10_band34_s105800000_e111900000.fj 441233608:15940309:chr10_band35_s111900000_e114900000.fj 457173917:22488871:chr10_band36_s114900000_e119100000.fj 479662788:13741614:chr10_band37_s119100000_e121700000.fj 493404402:7619587:chr10_band38_s121700000_e123100000.fj 501023989:23222084:chr10_band39_s123100000_e127500000.fj 524246073:29868907:chr10_band3_s6600000_e12200000.fj 554114980:16511841:chr10_band40_s127500000_e130600000.fj 570626821:26095352:chr10_band41_s130600000_e135534747.fj 596722173:26538428:chr10_band4_s12200000_e17300000.fj 623260601:5595242:chr10_band5_s17300000_e18600000.fj 628855843:524638:chr10_band6_s18600000_e18700000.fj 629380481:20166758:chr10_band7_s18700000_e22600000.fj 649547239:10195576:chr10_band8_s22600000_e24600000.fj 659742815:26057104:chr10_band9_s24600000_e29600000.fj 685799919:14129943:chr11_band0_s0_e2800000.fj 699929862:27262406:chr11_band10_s43500000_e48800000.fj 727192268:11366584:chr11_band11_s48800000_e51600000.fj 738558852:4284756:chr11_band12_s51600000_e53700000.fj 742843608:6746810:chr11_band13_s53700000_e55700000.fj 749590418:21620368:chr11_band14_s55700000_e59900000.fj 771210786:9186489:chr11_band15_s59900000_e61700000.fj 780397275:8326193:chr11_band16_s61700000_e63400000.fj 788723468:12757371:chr11_band17_s63400000_e65900000.fj 801480839:12157116:chr11_band18_s65900000_e68400000.fj 813637955:10261919:chr11_band19_s68400000_e70400000.fj 823899874:40669605:chr11_band1_s2800000_e10700000.fj 864569479:24190274:chr11_band20_s70400000_e75200000.fj 888759753:10020619:chr11_band21_s75200000_e77100000.fj 898780372:44638330:chr11_band22_s77100000_e85600000.fj 943418702:13920977:chr11_band23_s85600000_e88300000.fj 957339679:22389141:chr11_band24_s88300000_e92800000.fj 979728820:22616388:chr11_band25_s92800000_e97200000.fj 1002345208:26439412:chr11_band26_s97200000_e102100000.fj 1028784620:4173314:chr11_band27_s102100000_e102900000.fj 1032957934:39884156:chr11_band28_s102900000_e110400000.fj 1072842090:11123032:chr11_band29_s110400000_e112500000.fj 1083965122:10756630:chr11_band2_s10700000_e12700000.fj 1094721752:10580316:chr11_band30_s112500000_e114500000.fj 1105302068:35565428:chr11_band31_s114500000_e121200000.fj 1140867496:14197081:chr11_band32_s121200000_e123900000.fj 1155064577:20758432:chr11_band33_s123900000_e127800000.fj 1175823009:15792191:chr11_band34_s127800000_e130800000.fj 1191615200:22249239:chr11_band35_s130800000_e135006516.fj 1213864439:18449708:chr11_band3_s12700000_e16200000.fj 1232314147:29052525:chr11_band4_s16200000_e21700000.fj 1261366672:23968312:chr11_band5_s21700000_e26100000.fj 1285334984:5944481:chr11_band6_s26100000_e27200000.fj 1291279465:20155513:chr11_band7_s27200000_e31000000.fj 1311434978:28292374:chr11_band8_s31000000_e36400000.fj 1339727352:37778620:chr11_band9_s36400000_e43500000.fj 1377505972:16720695:chr12_band0_s0_e3300000.fj 1394226667:13059459:chr12_band10_s30700000_e33300000.fj 1407286126:7673046:chr12_band11_s33300000_e35800000.fj 1414959172:5825767:chr12_band12_s35800000_e38200000.fj 1420784939:42976743:chr12_band13_s38200000_e46400000.fj 1463761682:13809906:chr12_band14_s46400000_e49100000.fj 1477571588:11988262:chr12_band15_s49100000_e51500000.fj 1489559850:17595626:chr12_band16_s51500000_e54900000.fj 1507155476:8587338:chr12_band17_s54900000_e56600000.fj 1515742814:7408989:chr12_band18_s56600000_e58100000.fj 1523151803:26345033:chr12_band19_s58100000_e63100000.fj 1549496836:11140028:chr12_band1_s3300000_e5400000.fj 1560636864:9977002:chr12_band20_s63100000_e65100000.fj 1570613866:13651023:chr12_band21_s65100000_e67700000.fj 1584264889:19846309:chr12_band22_s67700000_e71500000.fj 1604111198:22406679:chr12_band23_s71500000_e75700000.fj 1626517877:24370117:chr12_band24_s75700000_e80300000.fj 1650887994:34354522:chr12_band25_s80300000_e86700000.fj 1685242516:12153797:chr12_band26_s86700000_e89000000.fj 1697396313:19120741:chr12_band27_s89000000_e92600000.fj 1716517054:18678462:chr12_band28_s92600000_e96200000.fj 1735195516:28125462:chr12_band29_s96200000_e101600000.fj 1763320978:23263164:chr12_band2_s5400000_e10100000.fj 1786584142:11438933:chr12_band30_s101600000_e103800000.fj 1798023075:27434807:chr12_band31_s103800000_e109000000.fj 1825457882:13431932:chr12_band32_s109000000_e111700000.fj 1838889814:2833555:chr12_band33_s111700000_e112300000.fj 1841723369:10166739:chr12_band34_s112300000_e114300000.fj 1851890108:13335983:chr12_band35_s114300000_e116800000.fj 1865226091:6763178:chr12_band36_s116800000_e118100000.fj 1871989269:13444650:chr12_band37_s118100000_e120700000.fj 1885433919:26286416:chr12_band38_s120700000_e125900000.fj 1911720335:18376984:chr12_band39_s125900000_e129300000.fj 1930097319:14118184:chr12_band3_s10100000_e12800000.fj 1944215503:23892725:chr12_band40_s129300000_e133851895.fj 1968108228:10507783:chr12_band4_s12800000_e14800000.fj 1978616011:27625276:chr12_band5_s14800000_e20000000.fj 2006241287:7026139:chr12_band6_s20000000_e21300000.fj 2013267426:27711533:chr12_band7_s21300000_e26500000.fj 2040978959:6793207:chr12_band8_s26500000_e27800000.fj 2047772166:15405916:chr12_band9_s27800000_e30700000.fj 2063178082:9180724:chr13_band0_s0_e4500000.fj 2072358806:9467601:chr13_band10_s32200000_e34000000.fj 2081826407:7989532:chr13_band11_s34000000_e35500000.fj 2089815939:24739014:chr13_band12_s35500000_e40100000.fj 2114554953:26941582:chr13_band13_s40100000_e45200000.fj 2141496535:3036311:chr13_band14_s45200000_e45800000.fj 2144532846:7761096:chr13_band15_s45800000_e47300000.fj 2152293942:18709476:chr13_band16_s47300000_e50900000.fj 2171003418:22602285:chr13_band17_s50900000_e55300000.fj 2193605703:23405896:chr13_band18_s55300000_e59600000.fj 2217011599:14457382:chr13_band19_s59600000_e62300000.fj 2231468981:11220750:chr13_band1_s4500000_e10000000.fj 2242689731:18581486:chr13_band20_s62300000_e65700000.fj 2261271217:15834314:chr13_band21_s65700000_e68600000.fj 2277105531:26147285:chr13_band22_s68600000_e73300000.fj 2303252816:11193151:chr13_band23_s73300000_e75400000.fj 2314445967:9599462:chr13_band24_s75400000_e77200000.fj 2324045429:9625154:chr13_band25_s77200000_e79000000.fj 2333670583:46677445:chr13_band26_s79000000_e87700000.fj 2380348028:12795853:chr13_band27_s87700000_e90000000.fj 2393143881:27123199:chr13_band28_s90000000_e95000000.fj 2420267080:16832721:chr13_band29_s95000000_e98200000.fj 2437099801:12852756:chr13_band2_s10000000_e16300000.fj 2449952557:5708668:chr13_band30_s98200000_e99300000.fj 2455661225:12588075:chr13_band31_s99300000_e101700000.fj 2468249300:16946677:chr13_band32_s101700000_e104800000.fj 2485195977:12209370:chr13_band33_s104800000_e107000000.fj 2497405347:17916606:chr13_band34_s107000000_e110300000.fj 2515321953:24643337:chr13_band35_s110300000_e115169878.fj 2539965290:3264756:chr13_band3_s16300000_e17900000.fj 2543230046:4102134:chr13_band4_s17900000_e19500000.fj 2547332180:19703325:chr13_band5_s19500000_e23300000.fj 2567035505:11554223:chr13_band6_s23300000_e25500000.fj 2578589728:12130664:chr13_band7_s25500000_e27800000.fj 2590720392:5842000:chr13_band8_s27800000_e28900000.fj 2596562392:17354821:chr13_band9_s28900000_e32200000.fj 2613917213:7548724:chr14_band0_s0_e3700000.fj 2621465937:30306549:chr14_band10_s37800000_e43500000.fj 2651772486:19488657:chr14_band11_s43500000_e47200000.fj 2671261143:19588732:chr14_band12_s47200000_e50900000.fj 2690849875:16728188:chr14_band13_s50900000_e54100000.fj 2707578063:7297044:chr14_band14_s54100000_e55500000.fj 2714875107:13453405:chr14_band15_s55500000_e58100000.fj 2728328512:20891242:chr14_band16_s58100000_e62100000.fj 2749219754:13969727:chr14_band17_s62100000_e64800000.fj 2763189481:15929958:chr14_band18_s64800000_e67900000.fj 2779119439:12006715:chr14_band19_s67900000_e70200000.fj 2791126154:8976748:chr14_band1_s3700000_e8100000.fj 2800102902:18617309:chr14_band20_s70200000_e73800000.fj 2818720211:28602130:chr14_band21_s73800000_e79300000.fj 2847322341:22781826:chr14_band22_s79300000_e83600000.fj 2870104167:7096857:chr14_band23_s83600000_e84900000.fj 2877201024:26087198:chr14_band24_s84900000_e89800000.fj 2903288222:10873992:chr14_band25_s89800000_e91900000.fj 2914162214:14647560:chr14_band26_s91900000_e94700000.fj 2928809774:8587442:chr14_band27_s94700000_e96300000.fj 2937397216:27389311:chr14_band28_s96300000_e101400000.fj 2964786527:9264693:chr14_band29_s101400000_e103200000.fj 2974051220:16320752:chr14_band2_s8100000_e16100000.fj 2990371972:4140293:chr14_band30_s103200000_e104000000.fj 2994512265:17268099:chr14_band31_s104000000_e107349540.fj 3011780364:3060756:chr14_band3_s16100000_e17600000.fj 3014841120:3260428:chr14_band4_s17600000_e19100000.fj 3018101548:26138225:chr14_band5_s19100000_e24600000.fj 3044239773:45862056:chr14_band6_s24600000_e33300000.fj 3090101829:10447980:chr14_band7_s33300000_e35300000.fj 3100549809:6564588:chr14_band8_s35300000_e36600000.fj 3107114397:6398876:chr14_band9_s36600000_e37800000.fj 3113513273:7956724:chr15_band0_s0_e3900000.fj 3121469997:34269266:chr15_band10_s33600000_e40100000.fj 3155739263:13762411:chr15_band11_s40100000_e42800000.fj 3169501674:3947813:chr15_band12_s42800000_e43600000.fj 3173449487:5537714:chr15_band13_s43600000_e44800000.fj 3178987201:24305832:chr15_band14_s44800000_e49500000.fj 3203293033:17507515:chr15_band15_s49500000_e52900000.fj 3220800548:32826524:chr15_band16_s52900000_e59100000.fj 3253627072:1010299:chr15_band17_s59100000_e59300000.fj 3254637371:23454838:chr15_band18_s59300000_e63700000.fj 3278092209:18017355:chr15_band19_s63700000_e67200000.fj 3296109564:9792748:chr15_band1_s3900000_e8700000.fj 3305902312:533847:chr15_band20_s67200000_e67300000.fj 3306436159:1084858:chr15_band21_s67300000_e67500000.fj 3307521017:27465637:chr15_band22_s67500000_e72700000.fj 3334986654:12707353:chr15_band23_s72700000_e75200000.fj 3347694007:6832970:chr15_band24_s75200000_e76600000.fj 3354526977:8748794:chr15_band25_s76600000_e78300000.fj 3363275771:17732191:chr15_band26_s78300000_e81700000.fj 3381007962:15491375:chr15_band27_s81700000_e85200000.fj 3396499337:20295749:chr15_band28_s85200000_e89100000.fj 3416795086:27117670:chr15_band29_s89100000_e94300000.fj 3443912756:14484752:chr15_band2_s8700000_e15800000.fj 3458397508:22592925:chr15_band30_s94300000_e98500000.fj 3480990433:21043993:chr15_band31_s98500000_e102531392.fj 3502034426:6528756:chr15_band3_s15800000_e19000000.fj 3508563182:4646274:chr15_band4_s19000000_e20700000.fj 3513209456:19571328:chr15_band5_s20700000_e25700000.fj 3532780784:12923689:chr15_band6_s25700000_e28100000.fj 3545704473:9921926:chr15_band7_s28100000_e30300000.fj 3555626399:2895507:chr15_band8_s30300000_e31200000.fj 3558521906:11292446:chr15_band9_s31200000_e33600000.fj 3569814352:40629656:chr16_band0_s0_e7900000.fj 3610444008:4080756:chr16_band10_s36600000_e38600000.fj 3614524764:18810667:chr16_band11_s38600000_e47000000.fj 3633335431:29170320:chr16_band12_s47000000_e52600000.fj 3662505751:21574362:chr16_band13_s52600000_e56700000.fj 3684080113:3619563:chr16_band14_s56700000_e57400000.fj 3687699676:49161531:chr16_band15_s57400000_e66700000.fj 3736861207:19748144:chr16_band16_s66700000_e70800000.fj 3756609351:10946735:chr16_band17_s70800000_e72900000.fj 3767556086:6378485:chr16_band18_s72900000_e74100000.fj 3773934571:26881587:chr16_band19_s74100000_e79200000.fj 3800816158:13661669:chr16_band1_s7900000_e10500000.fj 3814477827:13501427:chr16_band20_s79200000_e81700000.fj 3827979254:13677551:chr16_band21_s81700000_e84200000.fj 3841656805:15666076:chr16_band22_s84200000_e87100000.fj 3857322881:7998490:chr16_band23_s87100000_e88700000.fj 3865321371:8053236:chr16_band24_s88700000_e90354753.fj 3873374607:10728254:chr16_band2_s10500000_e12600000.fj 3884102861:11356748:chr16_band3_s12600000_e14800000.fj 3895459609:7600427:chr16_band4_s14800000_e16800000.fj 3903060036:20722736:chr16_band5_s16800000_e21200000.fj 3923782772:13729019:chr16_band6_s21200000_e24200000.fj 3937511791:20246913:chr16_band7_s24200000_e28100000.fj 3957758704:26945678:chr16_band8_s28100000_e34600000.fj 3984704382:3384870:chr16_band9_s34600000_e36600000.fj 3988089252:16155754:chr17_band0_s0_e3300000.fj 4004245006:12762477:chr17_band10_s38400000_e40900000.fj 4017007483:18572384:chr17_band11_s40900000_e44900000.fj 4035579867:12458663:chr17_band12_s44900000_e47400000.fj 4048038530:14524689:chr17_band13_s47400000_e50200000.fj 4062563219:38661662:chr17_band14_s50200000_e57600000.fj 4101224881:3149045:chr17_band15_s57600000_e58300000.fj 4104373926:13700211:chr17_band16_s58300000_e61100000.fj 4118074137:7529724:chr17_band17_s61100000_e62600000.fj 4125603861:7950542:chr17_band18_s62600000_e64200000.fj 4133554403:14756800:chr17_band19_s64200000_e67100000.fj 4148311203:16443598:chr17_band1_s3300000_e6500000.fj 4164754801:20108889:chr17_band20_s67100000_e70900000.fj 4184863690:20058363:chr17_band21_s70900000_e74800000.fj 4204922053:2587408:chr17_band22_s74800000_e75300000.fj 4207509461:30547504:chr17_band23_s75300000_e81195210.fj 4238056965:21562054:chr17_band2_s6500000_e10700000.fj 4259619019:27395356:chr17_band3_s10700000_e16000000.fj 4287014375:28365678:chr17_band4_s16000000_e22200000.fj 4315380053:289200:chr17_band5_s22200000_e24000000.fj 4315669253:5237174:chr17_band6_s24000000_e25800000.fj 4320906427:29727146:chr17_band7_s25800000_e31800000.fj 4350633573:30907874:chr17_band8_s31800000_e38100000.fj 4381541447:1504858:chr17_band9_s38100000_e38400000.fj 4383046305:14943044:chr18_band0_s0_e2900000.fj 4397989349:33721037:chr18_band10_s37200000_e43500000.fj 4431710386:24805551:chr18_band11_s43500000_e48200000.fj 4456515937:29378907:chr18_band12_s48200000_e53800000.fj 4485894844:12633635:chr18_band13_s53800000_e56200000.fj 4498528479:14797428:chr18_band14_s56200000_e59000000.fj 4513325907:13780102:chr18_band15_s59000000_e61600000.fj 4527106009:28794272:chr18_band16_s61600000_e66800000.fj 4555900281:10201924:chr18_band17_s66800000_e68700000.fj 4566102205:24124836:chr18_band18_s68700000_e73100000.fj 4590227041:26615557:chr18_band19_s73100000_e78077248.fj 4616842598:22145236:chr18_band1_s2900000_e7100000.fj 4638987834:7311348:chr18_band2_s7100000_e8500000.fj 4646299182:12577740:chr18_band3_s8500000_e10900000.fj 4658876922:21508140:chr18_band4_s10900000_e15400000.fj 4680385062:52389:chr18_band5_s15400000_e17200000.fj 4680437451:5076969:chr18_band6_s17200000_e19000000.fj 4685514420:31190178:chr18_band7_s19000000_e25000000.fj 4716704598:41160388:chr18_band8_s25000000_e32700000.fj 4757864986:23815045:chr18_band9_s32700000_e37200000.fj 4781680031:34031899:chr19_band0_s0_e6900000.fj 4815711930:13851503:chr19_band10_s35500000_e38300000.fj 4829563433:1998048:chr19_band11_s38300000_e38700000.fj 4831561481:22892591:chr19_band12_s38700000_e43400000.fj 4854454072:8872354:chr19_band13_s43400000_e45200000.fj 4863326426:13749381:chr19_band14_s45200000_e48000000.fj 4877075807:16660930:chr19_band15_s48000000_e51400000.fj 4893736737:11038031:chr19_band16_s51400000_e53600000.fj 4904774768:13412850:chr19_band17_s53600000_e56300000.fj 4918187618:14313555:chr19_band18_s56300000_e59128983.fj 4932501173:33635703:chr19_band1_s6900000_e13900000.fj 4966136876:489834:chr19_band2_s13900000_e14000000.fj 4966626710:11377056:chr19_band3_s14000000_e16300000.fj 4978003766:18348545:chr19_band4_s16300000_e20000000.fj 4996352311:21127772:chr19_band5_s20000000_e24400000.fj 5017480083:1059388:chr19_band6_s24400000_e26500000.fj 5018539471:6984270:chr19_band7_s26500000_e28600000.fj 5025523741:20073973:chr19_band8_s28600000_e32400000.fj 5045597714:15769669:chr19_band9_s32400000_e35500000.fj 5061367383:9756229:chr1_band0_s0_e2300000.fj 5071123612:11489333:chr1_band10_s30200000_e32400000.fj 5082612945:11074951:chr1_band11_s32400000_e34600000.fj 5093687896:28145091:chr1_band12_s34600000_e40100000.fj 5121832987:20545569:chr1_band13_s40100000_e44100000.fj 5142378556:13582476:chr1_band14_s44100000_e46800000.fj 5155961032:19737049:chr1_band15_s46800000_e50700000.fj 5175698081:27529030:chr1_band16_s50700000_e56100000.fj 5203227111:15452164:chr1_band17_s56100000_e59000000.fj 5218679275:12082565:chr1_band18_s59000000_e61300000.fj 5230761840:39789591:chr1_band19_s61300000_e68900000.fj 5270551431:15804689:chr1_band1_s2300000_e5400000.fj 5286356120:4141822:chr1_band20_s68900000_e69700000.fj 5290497942:80211445:chr1_band21_s69700000_e84900000.fj 5370709387:18343642:chr1_band22_s84900000_e88400000.fj 5389053029:18664730:chr1_band23_s88400000_e92000000.fj 5407717759:13861818:chr1_band24_s92000000_e94700000.fj 5421579577:26472421:chr1_band25_s94700000_e99700000.fj 5448051998:13161786:chr1_band26_s99700000_e102200000.fj 5461213784:26136584:chr1_band27_s102200000_e107200000.fj 5487350368:23561374:chr1_band28_s107200000_e111800000.fj 5510911742:22349851:chr1_band29_s111800000_e116100000.fj 5533261593:9400437:chr1_band2_s5400000_e7200000.fj 5542662030:8898401:chr1_band30_s116100000_e117800000.fj 5551560431:14463385:chr1_band31_s117800000_e120600000.fj 5566023816:2797932:chr1_band32_s120600000_e121500000.fj 5568821748:7140760:chr1_band33_s121500000_e125000000.fj 5575962508:7956760:chr1_band34_s125000000_e128900000.fj 5583919268:28100130:chr1_band35_s128900000_e142600000.fj 5612019398:15570132:chr1_band36_s142600000_e147000000.fj 5627589530:12079936:chr1_band37_s147000000_e150300000.fj 5639669466:23848498:chr1_band38_s150300000_e155000000.fj 5663517964:7320072:chr1_band39_s155000000_e156500000.fj 5670838036:10249929:chr1_band3_s7200000_e9200000.fj 5681087965:13622024:chr1_band40_s156500000_e159100000.fj 5694709989:7329847:chr1_band41_s159100000_e160500000.fj 5702039836:25915639:chr1_band42_s160500000_e165500000.fj 5727955475:8902437:chr1_band43_s165500000_e167200000.fj 5736857912:19387309:chr1_band44_s167200000_e170900000.fj 5756245221:10334901:chr1_band45_s170900000_e172900000.fj 5766580122:15956391:chr1_band46_s172900000_e176000000.fj 5782536513:22381464:chr1_band47_s176000000_e180300000.fj 5804917977:28762910:chr1_band48_s180300000_e185800000.fj 5833680887:27482517:chr1_band49_s185800000_e190800000.fj 5861163404:17698144:chr1_band4_s9200000_e12700000.fj 5878861548:16115379:chr1_band50_s190800000_e193800000.fj 5894976927:26603399:chr1_band51_s193800000_e198700000.fj 5921580326:42767332:chr1_band52_s198700000_e207200000.fj 5964347658:22519054:chr1_band53_s207200000_e211500000.fj 5986866712:15623994:chr1_band54_s211500000_e214500000.fj 6002490706:50651137:chr1_band55_s214500000_e224100000.fj 6053141843:2340783:chr1_band56_s224100000_e224600000.fj 6055482626:12296366:chr1_band57_s224600000_e227000000.fj 6067778992:19160541:chr1_band58_s227000000_e230700000.fj 6086939533:21150112:chr1_band59_s230700000_e234700000.fj 6108089645:15934102:chr1_band5_s12700000_e16200000.fj 6124023747:9572247:chr1_band60_s234700000_e236600000.fj 6133595994:37063925:chr1_band61_s236600000_e243700000.fj 6170659919:28279658:chr1_band62_s243700000_e249250621.fj 6198939577:21312883:chr1_band6_s16200000_e20400000.fj 6220252460:17968553:chr1_band7_s20400000_e23900000.fj 6238221013:20502272:chr1_band8_s23900000_e28000000.fj 6258723285:10454348:chr1_band9_s28000000_e30200000.fj 6269177633:26240932:chr20_band0_s0_e5100000.fj 6295418565:11477343:chr20_band10_s32100000_e34400000.fj 6306895908:16121702:chr20_band11_s34400000_e37600000.fj 6323017610:21665969:chr20_band12_s37600000_e41700000.fj 6344683579:2106601:chr20_band13_s41700000_e42100000.fj 6346790180:22234896:chr20_band14_s42100000_e46400000.fj 6369025076:17466445:chr20_band15_s46400000_e49800000.fj 6386491521:27353500:chr20_band16_s49800000_e55000000.fj 6413845021:7951115:chr20_band17_s55000000_e56500000.fj 6421796136:10132647:chr20_band18_s56500000_e58400000.fj 6431928783:24122390:chr20_band19_s58400000_e63025520.fj 6456051173:21750808:chr20_band1_s5100000_e9200000.fj 6477801981:15548705:chr20_band2_s9200000_e12100000.fj 6493350686:30792695:chr20_band3_s12100000_e17900000.fj 6524143381:17804912:chr20_band4_s17900000_e21300000.fj 6541948293:5184960:chr20_band5_s21300000_e22300000.fj 6547133253:17298739:chr20_band6_s22300000_e25600000.fj 6564431992:3301773:chr20_band7_s25600000_e27500000.fj 6567733765:3876756:chr20_band8_s27500000_e29400000.fj 6571610521:13283209:chr20_band9_s29400000_e32100000.fj 6584893730:5712724:chr21_band0_s0_e2800000.fj 6590606454:10518888:chr21_band10_s35800000_e37800000.fj 6601125342:10144603:chr21_band11_s37800000_e39700000.fj 6611269945:15620599:chr21_band12_s39700000_e42600000.fj 6626890544:28940326:chr21_band13_s42600000_e48129895.fj 6655830870:8160748:chr21_band1_s2800000_e6800000.fj 6663991618:11144287:chr21_band2_s6800000_e10900000.fj 6675135905:1431977:chr21_band3_s10900000_e13200000.fj 6676567882:2244756:chr21_band4_s13200000_e14300000.fj 6678812638:9266581:chr21_band5_s14300000_e16400000.fj 6688079219:41245659:chr21_band6_s16400000_e24000000.fj 6729324878:15344510:chr21_band7_s24000000_e26800000.fj 6744669388:24932791:chr21_band8_s26800000_e31500000.fj 6769602179:22442446:chr21_band9_s31500000_e35800000.fj 6792044625:7752724:chr22_band0_s0_e3800000.fj 6799797349:28224380:chr22_band10_s32200000_e37600000.fj 6828021729:17304839:chr22_band11_s37600000_e41000000.fj 6845326568:16113075:chr22_band12_s41000000_e44200000.fj 6861439643:22233411:chr22_band13_s44200000_e48400000.fj 6883673054:5524922:chr22_band14_s48400000_e49400000.fj 6889197976:9664262:chr22_band15_s49400000_e51304566.fj 6898862238:9180748:chr22_band1_s3800000_e8300000.fj 6908042986:7956752:chr22_band2_s8300000_e12200000.fj 6915999738:5100756:chr22_band3_s12200000_e14700000.fj 6921100494:9937902:chr22_band4_s14700000_e17900000.fj 6931038396:19548232:chr22_band5_s17900000_e22200000.fj 6950586628:6683394:chr22_band6_s22200000_e23500000.fj 6957270022:11752445:chr22_band7_s23500000_e25900000.fj 6969022467:19256022:chr22_band8_s25900000_e29600000.fj 6988278489:12954853:chr22_band9_s29600000_e32200000.fj 7001233342:23233415:chr2_band0_s0_e4400000.fj 7024466757:10667298:chr2_band10_s36600000_e38600000.fj 7035134055:16966684:chr2_band11_s38600000_e41800000.fj 7052100739:31586877:chr2_band12_s41800000_e47800000.fj 7083687616:26968370:chr2_band13_s47800000_e52900000.fj 7110655986:10993850:chr2_band14_s52900000_e55000000.fj 7121649836:33045521:chr2_band15_s55000000_e61300000.fj 7154695357:14150927:chr2_band16_s61300000_e64100000.fj 7168846284:23578835:chr2_band17_s64100000_e68600000.fj 7192425119:14885552:chr2_band18_s68600000_e71500000.fj 7207310671:10410131:chr2_band19_s71500000_e73500000.fj 7217720802:14156834:chr2_band1_s4400000_e7100000.fj 7231877636:7578172:chr2_band20_s73500000_e75000000.fj 7239455808:44109485:chr2_band21_s75000000_e83300000.fj 7283565293:31254935:chr2_band22_s83300000_e90500000.fj 7314820228:5169067:chr2_band23_s90500000_e93300000.fj 7319989295:10368921:chr2_band24_s93300000_e96800000.fj 7330358216:29052271:chr2_band25_s96800000_e102700000.fj 7359410487:17612827:chr2_band26_s102700000_e106000000.fj 7377023314:7641759:chr2_band27_s106000000_e107500000.fj 7384665073:13411716:chr2_band28_s107500000_e110200000.fj 7398076789:17757245:chr2_band29_s110200000_e114400000.fj 7415834034:26954567:chr2_band2_s7100000_e12200000.fj 7442788601:23246223:chr2_band30_s114400000_e118800000.fj 7466034824:19074161:chr2_band31_s118800000_e122400000.fj 7485108985:39449695:chr2_band32_s122400000_e129900000.fj 7524558680:11696577:chr2_band33_s129900000_e132500000.fj 7536255257:13249863:chr2_band34_s132500000_e135100000.fj 7549505120:8708592:chr2_band35_s135100000_e136800000.fj 7558213712:29182964:chr2_band36_s136800000_e142200000.fj 7587396676:10264945:chr2_band37_s142200000_e144100000.fj 7597661621:24601843:chr2_band38_s144100000_e148700000.fj 7622263464:5951781:chr2_band39_s148700000_e149900000.fj 7628215245:23795508:chr2_band3_s12200000_e16700000.fj 7652010753:3150007:chr2_band40_s149900000_e150500000.fj 7655160760:23077469:chr2_band41_s150500000_e154900000.fj 7678238229:25968072:chr2_band42_s154900000_e159800000.fj 7704206301:20640325:chr2_band43_s159800000_e163700000.fj 7724846626:31998832:chr2_band44_s163700000_e169700000.fj 7756845458:43632512:chr2_band45_s169700000_e178000000.fj 7800477970:13731959:chr2_band46_s178000000_e180600000.fj 7814209929:12856172:chr2_band47_s180600000_e183000000.fj 7827066101:34247127:chr2_band48_s183000000_e189400000.fj 7861313228:13286018:chr2_band49_s189400000_e191900000.fj 7874599246:13181256:chr2_band4_s16700000_e19200000.fj 7887780502:29663052:chr2_band50_s191900000_e197400000.fj 7917443554:30634366:chr2_band51_s197400000_e203300000.fj 7948077920:8075493:chr2_band52_s203300000_e204900000.fj 7956153413:21661204:chr2_band53_s204900000_e209000000.fj 7977814617:33806107:chr2_band54_s209000000_e215300000.fj 8011620724:32791910:chr2_band55_s215300000_e221500000.fj 8044412634:19689112:chr2_band56_s221500000_e225200000.fj 8064101746:4741805:chr2_band57_s225200000_e226100000.fj 8068843551:25904705:chr2_band58_s226100000_e231000000.fj 8094748256:23619321:chr2_band59_s231000000_e235600000.fj 8118367577:25423194:chr2_band5_s19200000_e24000000.fj 8143790771:9119290:chr2_band60_s235600000_e237300000.fj 8152910061:30796914:chr2_band61_s237300000_e243199373.fj 8183706975:19924674:chr2_band6_s24000000_e27900000.fj 8203631649:11135309:chr2_band7_s27900000_e30000000.fj 8214766958:10940177:chr2_band8_s30000000_e32100000.fj 8225707135:23560118:chr2_band9_s32100000_e36600000.fj 8249267253:14861122:chr3_band0_s0_e2800000.fj 8264128375:22809815:chr3_band10_s32100000_e36500000.fj 8286938190:15046818:chr3_band11_s36500000_e39400000.fj 8301985008:22186262:chr3_band12_s39400000_e43700000.fj 8324171270:2058080:chr3_band13_s43700000_e44100000.fj 8326229350:521252:chr3_band14_s44100000_e44200000.fj 8326750602:32234144:chr3_band15_s44200000_e50600000.fj 8358984746:8441932:chr3_band16_s50600000_e52300000.fj 8367426678:10948899:chr3_band17_s52300000_e54400000.fj 8378375577:21772898:chr3_band18_s54400000_e58600000.fj 8400148475:27069700:chr3_band19_s58600000_e63700000.fj 8427218175:6545313:chr3_band1_s2800000_e4000000.fj 8433763488:31787795:chr3_band20_s63700000_e69800000.fj 8465551283:23275812:chr3_band21_s69800000_e74200000.fj 8488827095:29739564:chr3_band22_s74200000_e79800000.fj 8518566659:20035093:chr3_band23_s79800000_e83500000.fj 8538601752:20162108:chr3_band24_s83500000_e87200000.fj 8558763860:3767584:chr3_band25_s87200000_e87900000.fj 8562531444:13581503:chr3_band26_s87900000_e91000000.fj 8576112947:7002557:chr3_band27_s91000000_e93900000.fj 8583115504:23576185:chr3_band28_s93900000_e98300000.fj 8606691689:8815871:chr3_band29_s98300000_e100000000.fj 8615507560:24882143:chr3_band2_s4000000_e8700000.fj 8640389703:4697534:chr3_band30_s100000000_e100900000.fj 8645087237:9838940:chr3_band31_s100900000_e102800000.fj 8654926177:18496118:chr3_band32_s102800000_e106200000.fj 8673422295:9018631:chr3_band33_s106200000_e107900000.fj 8682440926:17929166:chr3_band34_s107900000_e111300000.fj 8700370092:11594711:chr3_band35_s111300000_e113500000.fj 8711964803:20308668:chr3_band36_s113500000_e117300000.fj 8732273471:9030401:chr3_band37_s117300000_e119000000.fj 8741303872:14898827:chr3_band38_s119000000_e121900000.fj 8756202699:10008811:chr3_band39_s121900000_e123800000.fj 8766211510:15979710:chr3_band3_s8700000_e11800000.fj 8782191220:10116188:chr3_band40_s123800000_e125800000.fj 8792307408:17806797:chr3_band41_s125800000_e129200000.fj 8810114205:23227207:chr3_band42_s129200000_e133700000.fj 8833341412:10556009:chr3_band43_s133700000_e135700000.fj 8843897421:15182933:chr3_band44_s135700000_e138700000.fj 8859080354:21307590:chr3_band45_s138700000_e142800000.fj 8880387944:32759712:chr3_band46_s142800000_e148900000.fj 8913147656:16878434:chr3_band47_s148900000_e152100000.fj 8930026090:15100163:chr3_band48_s152100000_e155000000.fj 8945126253:10434017:chr3_band49_s155000000_e157000000.fj 8955560270:7785476:chr3_band4_s11800000_e13300000.fj 8963345746:10542610:chr3_band50_s157000000_e159000000.fj 8973888356:8787004:chr3_band51_s159000000_e160700000.fj 8982675360:37253134:chr3_band52_s160700000_e167600000.fj 9019928494:17183652:chr3_band53_s167600000_e170900000.fj 9037112146:25746921:chr3_band54_s170900000_e175700000.fj 9062859067:17296262:chr3_band55_s175700000_e179000000.fj 9080155329:19044817:chr3_band56_s179000000_e182700000.fj 9099200146:9216326:chr3_band57_s182700000_e184500000.fj 9108416472:7709847:chr3_band58_s184500000_e186000000.fj 9116126319:9992471:chr3_band59_s186000000_e187900000.fj 9126118790:16105743:chr3_band5_s13300000_e16400000.fj 9142224533:23723049:chr3_band60_s187900000_e192300000.fj 9165947582:28740659:chr3_band61_s192300000_e198022430.fj 9194688241:39101485:chr3_band6_s16400000_e23900000.fj 9233789726:13179037:chr3_band7_s23900000_e26400000.fj 9246968763:23659026:chr3_band8_s26400000_e30900000.fj 9270627789:6320874:chr3_band9_s30900000_e32100000.fj 9276948663:22624820:chr4_band0_s0_e4500000.fj 9299573483:19209706:chr4_band10_s44600000_e48200000.fj 9318783189:6384513:chr4_band11_s48200000_e50400000.fj 9325167702:4766253:chr4_band12_s50400000_e52700000.fj 9329933955:35018116:chr4_band13_s52700000_e59500000.fj 9364952071:38549974:chr4_band14_s59500000_e66600000.fj 9403502045:20373460:chr4_band15_s66600000_e70500000.fj 9423875505:29919881:chr4_band16_s70500000_e76300000.fj 9453795386:13493480:chr4_band17_s76300000_e78900000.fj 9467288866:18466490:chr4_band18_s78900000_e82400000.fj 9485755356:8860418:chr4_band19_s82400000_e84100000.fj 9494615774:7798021:chr4_band1_s4500000_e6000000.fj 9502413795:14575657:chr4_band20_s84100000_e86900000.fj 9516989452:5634479:chr4_band21_s86900000_e88000000.fj 9522623931:29718269:chr4_band22_s88000000_e93700000.fj 9552342200:7383995:chr4_band23_s93700000_e95100000.fj 9559726195:19715177:chr4_band24_s95100000_e98800000.fj 9579441372:11922350:chr4_band25_s98800000_e101100000.fj 9591363722:34698356:chr4_band26_s101100000_e107700000.fj 9626062078:33645974:chr4_band27_s107700000_e114100000.fj 9659708052:35587370:chr4_band28_s114100000_e120800000.fj 9695295422:15811642:chr4_band29_s120800000_e123800000.fj 9711107064:27146461:chr4_band2_s6000000_e11300000.fj 9738253525:26736354:chr4_band30_s123800000_e128800000.fj 9764989879:12053649:chr4_band31_s128800000_e131100000.fj 9777043528:45621870:chr4_band32_s131100000_e139500000.fj 9822665398:10457142:chr4_band33_s139500000_e141500000.fj 9833122540:27183032:chr4_band34_s141500000_e146800000.fj 9860305572:8901657:chr4_band35_s146800000_e148500000.fj 9869207229:13650247:chr4_band36_s148500000_e151100000.fj 9882857476:23802908:chr4_band37_s151100000_e155600000.fj 9906660384:33300872:chr4_band38_s155600000_e161800000.fj 9939961256:14822270:chr4_band39_s161800000_e164500000.fj 9954783526:20780182:chr4_band3_s11300000_e15200000.fj 9975563708:29757577:chr4_band40_s164500000_e170100000.fj 10005321285:9439391:chr4_band41_s170100000_e171900000.fj 10014760676:23890991:chr4_band42_s171900000_e176300000.fj 10038651667:6504378:chr4_band43_s176300000_e177500000.fj 10045156045:31429424:chr4_band44_s177500000_e183200000.fj 10076585469:20867286:chr4_band45_s183200000_e187100000.fj 10097452755:21542259:chr4_band46_s187100000_e191154276.fj 10118995014:13635330:chr4_band4_s15200000_e17800000.fj 10132630344:18645443:chr4_band5_s17800000_e21300000.fj 10151275787:33763872:chr4_band6_s21300000_e27700000.fj 10185039659:43935944:chr4_band7_s27700000_e35800000.fj 10228975603:28173344:chr4_band8_s35800000_e41200000.fj 10257148947:17960379:chr4_band9_s41200000_e44600000.fj 10275109326:23869100:chr5_band0_s0_e4500000.fj 10298978426:21475297:chr5_band10_s38400000_e42500000.fj 10320453723:18293853:chr5_band11_s42500000_e46100000.fj 10338747576:1514377:chr5_band12_s46100000_e48400000.fj 10340261953:8509364:chr5_band13_s48400000_e50700000.fj 10348771317:43017890:chr5_band14_s50700000_e58900000.fj 10391789207:20665117:chr5_band15_s58900000_e62900000.fj 10412454324:1591467:chr5_band16_s62900000_e63200000.fj 10414045791:18148759:chr5_band17_s63200000_e66700000.fj 10432194550:8856200:chr5_band18_s66700000_e68400000.fj 10441050750:20707621:chr5_band19_s68400000_e73300000.fj 10461758371:9656450:chr5_band1_s4500000_e6300000.fj 10471414821:18425621:chr5_band20_s73300000_e76900000.fj 10489840442:23196332:chr5_band21_s76900000_e81400000.fj 10513036774:7300891:chr5_band22_s81400000_e82800000.fj 10520337665:49892537:chr5_band23_s82800000_e92300000.fj 10570230202:30721980:chr5_band24_s92300000_e98200000.fj 10600952182:23888340:chr5_band25_s98200000_e102800000.fj 10624840522:9229611:chr5_band26_s102800000_e104500000.fj 10634070133:27421753:chr5_band27_s104500000_e109600000.fj 10661491886:9899436:chr5_band28_s109600000_e111500000.fj 10671391322:8406659:chr5_band29_s111500000_e113100000.fj 10679797981:18694996:chr5_band2_s6300000_e9800000.fj 10698492977:11028527:chr5_band30_s113100000_e115200000.fj 10709521504:32909679:chr5_band31_s115200000_e121400000.fj 10742431183:30963436:chr5_band32_s121400000_e127300000.fj 10773394619:17266919:chr5_band33_s127300000_e130600000.fj 10790661538:28998009:chr5_band34_s130600000_e136200000.fj 10819659547:16704607:chr5_band35_s136200000_e139500000.fj 10836364154:26045175:chr5_band36_s139500000_e144500000.fj 10862409329:27918575:chr5_band37_s144500000_e149800000.fj 10890327904:15050054:chr5_band38_s149800000_e152700000.fj 10905377958:15603577:chr5_band39_s152700000_e155700000.fj 10920981535:27716393:chr5_band3_s9800000_e15000000.fj 10948697928:22019757:chr5_band40_s155700000_e159900000.fj 10970717685:45797643:chr5_band41_s159900000_e168500000.fj 11016515328:22514380:chr5_band42_s168500000_e172800000.fj 11039029708:19253951:chr5_band43_s172800000_e176600000.fj 11058283659:21229495:chr5_band44_s176600000_e180915260.fj 11079513154:17559372:chr5_band4_s15000000_e18400000.fj 11097072526:25526673:chr5_band5_s18400000_e23300000.fj 11122599199:7096070:chr5_band6_s23300000_e24600000.fj 11129695269:23411851:chr5_band7_s24600000_e28900000.fj 11153107120:26119054:chr5_band8_s28900000_e33800000.fj 11179226174:23290349:chr5_band9_s33800000_e38400000.fj 11202516523:11854057:chr6_band0_s0_e2300000.fj 11214370580:8496414:chr6_band10_s30400000_e32100000.fj 11222866994:7220728:chr6_band11_s32100000_e33500000.fj 11230087722:15866348:chr6_band12_s33500000_e36600000.fj 11245954070:20565771:chr6_band13_s36600000_e40500000.fj 11266519841:29696078:chr6_band14_s40500000_e46200000.fj 11296215919:29661980:chr6_band15_s46200000_e51800000.fj 11325877899:5687860:chr6_band16_s51800000_e52900000.fj 11331565759:21802934:chr6_band17_s52900000_e57000000.fj 11353368693:8125890:chr6_band18_s57000000_e58700000.fj 11361494583:345265:chr6_band19_s58700000_e61000000.fj 11361839848:10003929:chr6_band1_s2300000_e4200000.fj 11371843777:8997133:chr6_band20_s61000000_e63300000.fj 11380840910:550060:chr6_band21_s63300000_e63400000.fj 11381390970:35514558:chr6_band22_s63400000_e70000000.fj 11416905528:30770003:chr6_band23_s70000000_e75900000.fj 11447675531:41661599:chr6_band24_s75900000_e83900000.fj 11489337130:5032680:chr6_band25_s83900000_e84900000.fj 11494369810:15730167:chr6_band26_s84900000_e88000000.fj 11510099977:26698981:chr6_band27_s88000000_e93100000.fj 11536798958:33870086:chr6_band28_s93100000_e99500000.fj 11570669044:5783371:chr6_band29_s99500000_e100600000.fj 11576452415:15201350:chr6_band2_s4200000_e7100000.fj 11591653765:26318508:chr6_band30_s100600000_e105500000.fj 11617972273:47367411:chr6_band31_s105500000_e114600000.fj 11665339684:19419515:chr6_band32_s114600000_e118300000.fj 11684759199:1079105:chr6_band33_s118300000_e118500000.fj 11685838304:40594325:chr6_band34_s118500000_e126100000.fj 11726432629:5183249:chr6_band35_s126100000_e127100000.fj 11731615878:17064012:chr6_band36_s127100000_e130300000.fj 11748679890:4703673:chr6_band37_s130300000_e131200000.fj 11753383563:20937849:chr6_band38_s131200000_e135200000.fj 11774321412:19768577:chr6_band39_s135200000_e139000000.fj 11794089989:18520799:chr6_band3_s7100000_e10600000.fj 11812610788:20084958:chr6_band40_s139000000_e142800000.fj 11832695746:14583555:chr6_band41_s142800000_e145600000.fj 11847279301:17888235:chr6_band42_s145600000_e149000000.fj 11865167536:18200150:chr6_band43_s149000000_e152500000.fj 11883367686:15899684:chr6_band44_s152500000_e155500000.fj 11899267370:28588964:chr6_band45_s155500000_e161000000.fj 11927856334:18688807:chr6_band46_s161000000_e164500000.fj 11946545141:34299518:chr6_band47_s164500000_e171115067.fj 11980844659:5187494:chr6_band4_s10600000_e11600000.fj 11986032153:9550459:chr6_band5_s11600000_e13400000.fj 11995582612:9425852:chr6_band6_s13400000_e15200000.fj 12005008464:52257569:chr6_band7_s15200000_e25200000.fj 12057266033:8929925:chr6_band8_s25200000_e27000000.fj 12066195958:17556391:chr6_band9_s27000000_e30400000.fj 12083752349:14247713:chr7_band0_s0_e2800000.fj 12098000062:11066306:chr7_band10_s35000000_e37200000.fj 12109066368:32087088:chr7_band11_s37200000_e43300000.fj 12141153456:10668222:chr7_band12_s43300000_e45400000.fj 12151821678:18626376:chr7_band13_s45400000_e49000000.fj 12170448054:7958919:chr7_band14_s49000000_e50500000.fj 12178406973:18713509:chr7_band15_s50500000_e54000000.fj 12197120482:18935900:chr7_band16_s54000000_e58000000.fj 12216056382:261032:chr7_band17_s58000000_e59900000.fj 12216317414:4289180:chr7_band18_s59900000_e61700000.fj 12220606594:22817045:chr7_band19_s61700000_e67000000.fj 12243423639:8953071:chr7_band1_s2800000_e4500000.fj 12252376710:26475183:chr7_band20_s67000000_e72200000.fj 12278851893:21260557:chr7_band21_s72200000_e77500000.fj 12300112450:47850592:chr7_band22_s77500000_e86400000.fj 12347963042:9284520:chr7_band23_s86400000_e88200000.fj 12357247562:15247848:chr7_band24_s88200000_e91100000.fj 12372495410:8580818:chr7_band25_s91100000_e92800000.fj 12381076228:26810427:chr7_band26_s92800000_e98000000.fj 12407886655:27671122:chr7_band27_s98000000_e103800000.fj 12435557777:3665630:chr7_band28_s103800000_e104500000.fj 12439223407:14940321:chr7_band29_s104500000_e107400000.fj 12454163728:12957633:chr7_band2_s4500000_e7300000.fj 12467121361:38098753:chr7_band30_s107400000_e114600000.fj 12505220114:14874016:chr7_band31_s114600000_e117400000.fj 12520094130:19901201:chr7_band32_s117400000_e121100000.fj 12539995331:14314479:chr7_band33_s121100000_e123800000.fj 12554309810:17691683:chr7_band34_s123800000_e127100000.fj 12572001493:10632852:chr7_band35_s127100000_e129200000.fj 12582634345:5806847:chr7_band36_s129200000_e130400000.fj 12588441192:11677084:chr7_band37_s130400000_e132600000.fj 12600118276:29615252:chr7_band38_s132600000_e138200000.fj 12629733528:24913008:chr7_band39_s138200000_e143100000.fj 12654646536:35077014:chr7_band3_s7300000_e13800000.fj 12689723550:23967238:chr7_band40_s143100000_e147900000.fj 12713690788:23454742:chr7_band41_s147900000_e152600000.fj 12737145530:12478502:chr7_band42_s152600000_e155100000.fj 12749624032:21624407:chr7_band43_s155100000_e159138663.fj 12771248439:14556948:chr7_band4_s13800000_e16500000.fj 12785805387:23503963:chr7_band5_s16500000_e20900000.fj 12809309350:23959972:chr7_band6_s20900000_e25500000.fj 12833269322:13023330:chr7_band7_s25500000_e28000000.fj 12846292652:4304022:chr7_band8_s28000000_e28800000.fj 12850596674:32133100:chr7_band9_s28800000_e35000000.fj 12882729774:11110146:chr8_band0_s0_e2200000.fj 12893839920:17322291:chr8_band10_s39700000_e43100000.fj 12911162211:3523440:chr8_band11_s43100000_e45600000.fj 12914685651:8688009:chr8_band12_s45600000_e48100000.fj 12923373660:21279449:chr8_band13_s48100000_e52200000.fj 12944653109:2084644:chr8_band14_s52200000_e52600000.fj 12946737753:14865421:chr8_band15_s52600000_e55500000.fj 12961603174:31742008:chr8_band16_s55500000_e61600000.fj 12993345182:3139055:chr8_band17_s61600000_e62200000.fj 12996484237:20025220:chr8_band18_s62200000_e66000000.fj 13016509457:10184650:chr8_band19_s66000000_e68000000.fj 13026694107:22530516:chr8_band1_s2200000_e6200000.fj 13049224623:13062826:chr8_band20_s68000000_e70500000.fj 13062287449:17612880:chr8_band21_s70500000_e73900000.fj 13079900329:23191965:chr8_band22_s73900000_e78300000.fj 13103092294:9532882:chr8_band23_s78300000_e80100000.fj 13112625176:23483810:chr8_band24_s80100000_e84600000.fj 13136108986:11316441:chr8_band25_s84600000_e86900000.fj 13147425427:33406608:chr8_band26_s86900000_e93300000.fj 13180832035:29555310:chr8_band27_s93300000_e99000000.fj 13210387345:13315728:chr8_band28_s99000000_e101600000.fj 13223703073:24175332:chr8_band29_s101600000_e106200000.fj 13247878405:30528779:chr8_band2_s6200000_e12700000.fj 13278407184:22764188:chr8_band30_s106200000_e110500000.fj 13301171372:8501850:chr8_band31_s110500000_e112100000.fj 13309673222:30459907:chr8_band32_s112100000_e117700000.fj 13340133129:7937153:chr8_band33_s117700000_e119200000.fj 13348070282:17300095:chr8_band34_s119200000_e122500000.fj 13365370377:25178211:chr8_band35_s122500000_e127300000.fj 13390548588:22053170:chr8_band36_s127300000_e131500000.fj 13412601758:26170716:chr8_band37_s131500000_e136400000.fj 13438772474:18915984:chr8_band38_s136400000_e139900000.fj 13457688458:33011109:chr8_band39_s139900000_e146364022.fj 13490699567:34615593:chr8_band3_s12700000_e19000000.fj 13525315160:22721686:chr8_band4_s19000000_e23300000.fj 13548036846:21858716:chr8_band5_s23300000_e27400000.fj 13569895562:7279298:chr8_band6_s27400000_e28800000.fj 13577174860:40036264:chr8_band7_s28800000_e36500000.fj 13617211124:9223086:chr8_band8_s36500000_e38300000.fj 13626434210:7285487:chr8_band9_s38300000_e39700000.fj 13633719697:11189873:chr9_band0_s0_e2200000.fj 13644909570:15398551:chr9_band10_s33200000_e36300000.fj 13660308121:10859291:chr9_band11_s36300000_e38400000.fj 13671167412:8098913:chr9_band12_s38400000_e41000000.fj 13679266325:7680539:chr9_band13_s41000000_e43600000.fj 13686946864:11204600:chr9_band14_s43600000_e47300000.fj 13698151464:54388:chr9_band15_s47300000_e49000000.fj 13698205852:3468752:chr9_band16_s49000000_e50700000.fj 13701674604:31370255:chr9_band17_s50700000_e65900000.fj 13733044859:8507254:chr9_band18_s65900000_e68700000.fj 13741552113:13175993:chr9_band19_s68700000_e72200000.fj 13754728106:12788834:chr9_band1_s2200000_e4600000.fj 13767516940:9460838:chr9_band20_s72200000_e74000000.fj 13776977778:27323515:chr9_band21_s74000000_e79200000.fj 13804301293:9840418:chr9_band22_s79200000_e81100000.fj 13814141711:15804284:chr9_band23_s81100000_e84100000.fj 13829945995:14087052:chr9_band24_s84100000_e86900000.fj 13844033047:17922209:chr9_band25_s86900000_e90400000.fj 13861955256:6951532:chr9_band26_s90400000_e91800000.fj 13868906788:10179374:chr9_band27_s91800000_e93900000.fj 13879086162:13769787:chr9_band28_s93900000_e96600000.fj 13892855949:13598284:chr9_band29_s96600000_e99300000.fj 13906454233:22891146:chr9_band2_s4600000_e9000000.fj 13929345379:16625559:chr9_band30_s99300000_e102600000.fj 13945970938:29592637:chr9_band31_s102600000_e108200000.fj 13975563575:16101070:chr9_band32_s108200000_e111300000.fj 13991664645:18810443:chr9_band33_s111300000_e114900000.fj 14010475088:14696084:chr9_band34_s114900000_e117700000.fj 14025171172:25738558:chr9_band35_s117700000_e122500000.fj 14050909730:17151669:chr9_band36_s122500000_e125800000.fj 14068061399:23282243:chr9_band37_s125800000_e130300000.fj 14091343642:15933520:chr9_band38_s130300000_e133500000.fj 14107277162:2539339:chr9_band39_s133500000_e134000000.fj 14109816501:28353605:chr9_band3_s9000000_e14200000.fj 14138170106:9895904:chr9_band40_s134000000_e135900000.fj 14148066010:7847526:chr9_band41_s135900000_e137400000.fj 14155913536:19420968:chr9_band42_s137400000_e141213431.fj 14175334504:12634378:chr9_band4_s14200000_e16600000.fj 14187968882:10157396:chr9_band5_s16600000_e18500000.fj 14198126278:7156443:chr9_band6_s18500000_e19900000.fj 14205282721:29952199:chr9_band7_s19900000_e25600000.fj 14235234920:12640911:chr9_band8_s25600000_e28000000.fj 14247875831:28005247:chr9_band9_s28000000_e33200000.fj 14275881078:61284:chrM_band0_s0_e16571.fj 14275942362:14544713:chrX_band0_s0_e4300000.fj 14290487075:24455427:chrX_band10_s37600000_e42400000.fj 14314942502:19979357:chrX_band11_s42400000_e46400000.fj 14334921859:15780934:chrX_band12_s46400000_e49800000.fj 14350702793:22068346:chrX_band13_s49800000_e54800000.fj 14372771139:15483950:chrX_band14_s54800000_e58100000.fj 14388255089:2245852:chrX_band15_s58100000_e60600000.fj 14390500941:8206011:chrX_band16_s60600000_e63000000.fj 14398706952:7456077:chrX_band17_s63000000_e64600000.fj 14406163029:15544112:chrX_band18_s64600000_e67800000.fj 14421707141:19435511:chrX_band19_s67800000_e71800000.fj 14441142652:8840055:chrX_band1_s4300000_e6000000.fj 14449982707:9325419:chrX_band20_s71800000_e73900000.fj 14459308126:9811498:chrX_band21_s73900000_e76000000.fj 14469119624:42415433:chrX_band22_s76000000_e84600000.fj 14511535057:8048575:chrX_band23_s84600000_e86200000.fj 14519583632:22647045:chrX_band24_s86200000_e91800000.fj 14542230677:7507452:chrX_band25_s91800000_e93500000.fj 14549738129:24532176:chrX_band26_s93500000_e98300000.fj 14574270305:20702445:chrX_band27_s98300000_e102600000.fj 14594972750:5393310:chrX_band28_s102600000_e103700000.fj 14600366060:25038697:chrX_band29_s103700000_e108700000.fj 14625404757:17528792:chrX_band2_s6000000_e9500000.fj 14642933549:39123936:chrX_band30_s108700000_e116500000.fj 14682057485:21530282:chrX_band31_s116500000_e120900000.fj 14703587767:39763257:chrX_band32_s120900000_e128700000.fj 14743351024:8637631:chrX_band33_s128700000_e130400000.fj 14751988655:16073438:chrX_band34_s130400000_e133600000.fj 14768062093:21768801:chrX_band35_s133600000_e138000000.fj 14789830894:11318859:chrX_band36_s138000000_e140300000.fj 14801149753:8828151:chrX_band37_s140300000_e142100000.fj
diff --git a/sdk/go/manifest/testdata/short_manifest b/sdk/go/manifest/testdata/short_manifest
deleted file mode 100644
index e8a0e43c05..0000000000
--- a/sdk/go/manifest/testdata/short_manifest
+++ /dev/null
@@ -1 +0,0 @@
-. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 0:15693477:chr10_band0_s0_e3000000.fj
diff --git a/sdk/java-v2/build.gradle b/sdk/java-v2/build.gradle
index 5b09db948a..bfb3bc59c8 100644
--- a/sdk/java-v2/build.gradle
+++ b/sdk/java-v2/build.gradle
@@ -21,7 +21,7 @@ dependencies {
api 'com.typesafe:config:1.3.2'
testImplementation 'junit:junit:4.12'
- testImplementation 'org.mockito:mockito-core:3.3.3'
+ testImplementation 'org.mockito:mockito-core:5.17.0'
testImplementation 'org.assertj:assertj-core:3.8.0'
testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1'
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
index 51f2f4a81b..960f397311 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
@@ -45,7 +45,7 @@ abstract class BaseApiClient {
Request.Builder getRequestBuilder() {
return new Request.Builder()
- .addHeader("authorization", String.format("OAuth2 %s", config.getApiToken()))
+ .addHeader("authorization", String.format("Bearer %s", config.getApiToken()))
.addHeader("cache-control", "no-cache");
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java
index 75aa9ca309..0e6517f0c0 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java
@@ -14,6 +14,7 @@ import okhttp3.RequestBody;
import org.arvados.client.api.model.Group;
import org.arvados.client.api.model.GroupList;
import org.arvados.client.api.model.argument.ContentsGroup;
+import org.arvados.client.api.model.argument.ListArgument;
import org.arvados.client.api.model.argument.UntrashGroup;
import org.arvados.client.config.ConfigProvider;
import org.slf4j.Logger;
@@ -36,6 +37,15 @@ public class GroupsApiClient extends BaseStandardApiClient {
return callForList(request);
}
+ public GroupList contents(ListArgument listArguments) {
+ this.log.debug("Get {} contents", this.getType().getSimpleName());
+ HttpUrl.Builder urlBuilder = this.getUrlBuilder().addPathSegment("contents");
+ this.addQueryParameters(urlBuilder, listArguments);
+ HttpUrl url = urlBuilder.build();
+ Request request = this.getRequestBuilder().url(url).build();
+ return callForList(request);
+ }
+
public Group untrash(UntrashGroup untrashGroup) {
log.debug("Untrash {} by UUID {}", getType().getSimpleName(), untrashGroup.getUuid());
HttpUrl url = getUrlBuilder().addPathSegment(untrashGroup.getUuid()).addPathSegment("untrash").build();
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java
index be30e57843..959fc83ae9 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java
@@ -16,12 +16,10 @@ import java.time.LocalDateTime;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonPropertyOrder({ "href", "kind", "etag", "uuid", "owner_uuid", "created_at", "modified_by_client_uuid",
+@JsonPropertyOrder({ "kind", "etag", "uuid", "owner_uuid", "created_at", "modified_by_client_uuid",
"modified_by_user_uuid", "modified_at", "updated_at" })
public abstract class Item {
- @JsonProperty("href")
- private String href;
@JsonProperty("kind")
private String kind;
@JsonProperty("etag")
@@ -41,10 +39,6 @@ public abstract class Item {
@JsonProperty("updated_at")
private LocalDateTime updatedAt;
- public String getHref() {
- return this.href;
- }
-
public String getKind() {
return this.kind;
}
@@ -81,10 +75,6 @@ public abstract class Item {
return this.updatedAt;
}
- public void setHref(String href) {
- this.href = href;
- }
-
public void setKind(String kind) {
this.kind = kind;
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java
index b15a3628f2..91b919b50e 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java
@@ -14,15 +14,13 @@ import com.fasterxml.jackson.annotation.JsonPropertyOrder;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonPropertyOrder({ "kind", "etag", "self_link", "offset", "limit", "items_available" })
+@JsonPropertyOrder({ "kind", "etag", "offset", "limit", "items_available" })
public class ItemList {
@JsonProperty("kind")
private String kind;
@JsonProperty("etag")
private String etag;
- @JsonProperty("self_link")
- private String selfLink;
@JsonProperty("offset")
private Object offset;
@JsonProperty("limit")
@@ -38,10 +36,6 @@ public class ItemList {
return this.etag;
}
- public String getSelfLink() {
- return this.selfLink;
- }
-
public Object getOffset() {
return this.offset;
}
@@ -62,10 +56,6 @@ public class ItemList {
this.etag = etag;
}
- public void setSelfLink(String selfLink) {
- this.selfLink = selfLink;
- }
-
public void setOffset(Object offset) {
this.offset = offset;
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java
index 5c86a07bdf..19c85c0cab 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java
@@ -17,7 +17,7 @@ import java.util.List;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonPropertyOrder({ "email", "username", "full_name", "first_name", "last_name", "identity_url", "is_active", "is_admin", "is_invited",
- "prefs", "writable_by", "default_owner_uuid" })
+ "prefs", "writable_by" })
public class User extends Item {
@JsonProperty("email")
@@ -42,8 +42,6 @@ public class User extends Item {
private Object prefs;
@JsonProperty("writable_by")
private List writableBy;
- @JsonProperty("default_owner_uuid")
- private Boolean defaultOwnerUuid;
public String getEmail() {
return this.email;
@@ -89,10 +87,6 @@ public class User extends Item {
return this.writableBy;
}
- public Boolean getDefaultOwnerUuid() {
- return this.defaultOwnerUuid;
- }
-
public void setEmail(String email) {
this.email = email;
}
@@ -137,11 +131,7 @@ public class User extends Item {
this.writableBy = writableBy;
}
- public void setDefaultOwnerUuid(Boolean defaultOwnerUuid) {
- this.defaultOwnerUuid = defaultOwnerUuid;
- }
-
public String toString() {
- return "User(email=" + this.getEmail() + ", username=" + this.getUsername() + ", fullName=" + this.getFullName() + ", firstName=" + this.getFirstName() + ", lastName=" + this.getLastName() + ", identityUrl=" + this.getIdentityUrl() + ", isActive=" + this.getIsActive() + ", isAdmin=" + this.getIsAdmin() + ", isInvited=" + this.getIsInvited() + ", prefs=" + this.getPrefs() + ", writableBy=" + this.getWritableBy() + ", defaultOwnerUuid=" + this.getDefaultOwnerUuid() + ")";
+ return "User(email=" + this.getEmail() + ", username=" + this.getUsername() + ", fullName=" + this.getFullName() + ", firstName=" + this.getFirstName() + ", lastName=" + this.getLastName() + ", identityUrl=" + this.getIdentityUrl() + ", isActive=" + this.getIsActive() + ", isAdmin=" + this.getIsAdmin() + ", isInvited=" + this.getIsInvited() + ", prefs=" + this.getPrefs() + ", writableBy=" + this.getWritableBy() + ")";
}
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
index 8b65cebc59..366c74d37d 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
@@ -320,6 +320,25 @@ public class ArvadosFacade {
return createdProject;
}
+
+ /**
+ * Creates new project that will be a subproject of "home" for the specified owner.
+ *
+ * @param ownerUuid uuid of owner for subproject
+ * @param projectName name for the newly created subproject
+ * @return Group object containing information about created project
+ * (mapped from JSON returned from server after creating the project)
+ */
+ public Group createNewSubProject(String ownerUuid, String projectName) {
+ Group project = new Group();
+ project.setName(projectName);
+ project.setGroupClass(PROJECT);
+ project.setOwnerUuid(ownerUuid);
+ Group createdProject = groupsApiClient.create(project);
+ this.log.debug("Project " + createdProject.getName() + " created with UUID: " + createdProject.getUuid());
+ return createdProject;
+ }
+
/**
* Deletes collection with specified uuid.
*
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java
index cc409983ba..2530d7b73e 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java
@@ -120,7 +120,7 @@ public class KeepClient {
private List mapNewServices(Map rootsMap, KeepLocator locator,
boolean forceRebuild, boolean needWritable, Map headers) {
- headers.putIfAbsent("Authorization", String.format("OAuth2 %s", config.getApiToken()));
+ headers.putIfAbsent("Authorization", String.format("Bearer %s", config.getApiToken()));
List localRoots = weightedServiceRoots(locator, forceRebuild, needWritable);
for (String root : localRoots) {
FileTransferHandler keepServiceLocal = new FileTransferHandler(root, headers, config);
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java
index ac7dd02795..8f7a6d022b 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java
@@ -32,7 +32,7 @@ public final class ApiClientTestUtils {
}
public static void assertAuthorizationHeader(RecordedRequest request) {
- assertThat(request.getHeader("authorization")).isEqualTo("OAuth2 " + new FileConfigProvider().getApiToken());
+ assertThat(request.getHeader("authorization")).isEqualTo("Bearer " + new FileConfigProvider().getApiToken());
}
public static void assertRequestPath(RecordedRequest request, String subPath) {
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json
index 68dce30206..eabed4a86e 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json
@@ -1,5 +1,4 @@
{
- "href": "/collections/112ci-4zz18-12tncxzptzbec1p",
"kind": "arvados#collection",
"etag": "bqoujj7oybdx0jybwvtsebj7y",
"uuid": "112ci-4zz18-12tncxzptzbec1p",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json
index 57a2ee5a5b..d5994a5f11 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json
@@ -1,5 +1,4 @@
{
- "href": "/collections/112ci-4zz18-12tncxzptzbec1p",
"kind": "arvados#collection",
"etag": "bqoujj7oybdx0jybwvtsebj7y",
"uuid": "112ci-4zz18-12tncxzptzbec1p",
@@ -19,4 +18,4 @@
"delete_at": null,
"trash_at": null,
"is_trashed": false
-}
\ No newline at end of file
+}
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json
index 1fed3832b0..2b7392a291 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json
@@ -1,5 +1,4 @@
{
- "href": "/collections/ardev-4zz18-jk5vo4uo9u5vj52",
"kind": "arvados#collection",
"etag": "2vm76dxmzr23u9774iguuxsrg",
"uuid": "ardev-4zz18-jk5vo4uo9u5vj52",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json
index e8fdd83e71..8361c14eb4 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json
@@ -1,5 +1,4 @@
{
- "href": "/collections/112ci-4zz18-p51w7z3fpopo6sm",
"kind": "arvados#collection",
"etag": "52tk5yg024cwhkkcidu3zcmj2",
"uuid": "112ci-4zz18-p51w7z3fpopo6sm",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json
index 86a3bdafbb..9d219e5048 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#collectionList",
"etag": "",
- "self_link": "",
"offset": 0,
"limit": 100,
"items": [
{
- "href": "/collections/112ci-4zz18-x6xfmvz0chnkzgv",
"kind": "arvados#collection",
"etag": "8xyiwnih5b5vzmj5sa33348a7",
"uuid": "112ci-4zz18-x6xfmvz0chnkzgv",
@@ -27,7 +25,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-p51w7z3fpopo6sm",
"kind": "arvados#collection",
"etag": "8cmhep8aixe4p42pxjoct5502",
"uuid": "112ci-4zz18-p51w7z3fpopo6sm",
@@ -48,7 +45,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-xb6gf2yraln7cwa",
"kind": "arvados#collection",
"etag": "de2ol2dyvsba3mn46al760cyg",
"uuid": "112ci-4zz18-xb6gf2yraln7cwa",
@@ -69,7 +65,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-r5jfktpn3a9o0ap",
"kind": "arvados#collection",
"etag": "dby68gd0vatvi090cu0axvtq3",
"uuid": "112ci-4zz18-r5jfktpn3a9o0ap",
@@ -90,7 +85,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-nqxk8xjn6mtskzt",
"kind": "arvados#collection",
"etag": "2b34uzau862w862a2rv36agv6",
"uuid": "112ci-4zz18-nqxk8xjn6mtskzt",
@@ -111,7 +105,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-rs9bcf5qnyfjrkm",
"kind": "arvados#collection",
"etag": "60aywazztwfspnasltufcjxpa",
"uuid": "112ci-4zz18-rs9bcf5qnyfjrkm",
@@ -132,7 +125,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-af656lee4kv7q2m",
"kind": "arvados#collection",
"etag": "1jward6snif3tsjzftxh8hvwh",
"uuid": "112ci-4zz18-af656lee4kv7q2m",
@@ -153,7 +145,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-y2zqix7k9an7nro",
"kind": "arvados#collection",
"etag": "zs2n4zliu6nb5yk3rw6h5ugw",
"uuid": "112ci-4zz18-y2zqix7k9an7nro",
@@ -174,7 +165,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-wq77jfi62u5i4rv",
"kind": "arvados#collection",
"etag": "eijhemzgy44ofmu0dtrowl604",
"uuid": "112ci-4zz18-wq77jfi62u5i4rv",
@@ -195,7 +185,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-unaeckkjgeg7ui0",
"kind": "arvados#collection",
"etag": "1oq7ye0gfbf3ih6y864w3n683",
"uuid": "112ci-4zz18-unaeckkjgeg7ui0",
@@ -216,7 +205,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-5y6atonkxq55lms",
"kind": "arvados#collection",
"etag": "4qmqlro878yx8q7ikhilo8qwn",
"uuid": "112ci-4zz18-5y6atonkxq55lms",
@@ -237,7 +225,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-b3fjqd01pxjvseo",
"kind": "arvados#collection",
"etag": "91v698hngoz241c38bbmh0ogc",
"uuid": "112ci-4zz18-b3fjqd01pxjvseo",
@@ -258,7 +245,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-cwfxl8h41q18n65",
"kind": "arvados#collection",
"etag": "215t842ckrrgjpxrxr4j0gsui",
"uuid": "112ci-4zz18-cwfxl8h41q18n65",
@@ -279,7 +265,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-uv4xu08739tn1vy",
"kind": "arvados#collection",
"etag": "90z6i3oqv197osng3wvjjir3t",
"uuid": "112ci-4zz18-uv4xu08739tn1vy",
@@ -300,7 +285,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-pzisn8c5mefzczv",
"kind": "arvados#collection",
"etag": "5lcf6wvc3wypwobswdz22wen",
"uuid": "112ci-4zz18-pzisn8c5mefzczv",
@@ -321,7 +305,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-mj24uwtnqqrno27",
"kind": "arvados#collection",
"etag": "98s08xew49avui1gy3mzit8je",
"uuid": "112ci-4zz18-mj24uwtnqqrno27",
@@ -342,7 +325,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-oco162516upgqng",
"kind": "arvados#collection",
"etag": "a09wnvl4i51xqx7u9yf4qbi94",
"uuid": "112ci-4zz18-oco162516upgqng",
@@ -363,7 +345,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-tlze7dgczsdwkep",
"kind": "arvados#collection",
"etag": "4ee2xudbc5rkr597drgu9tg10",
"uuid": "112ci-4zz18-tlze7dgczsdwkep",
@@ -384,7 +365,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-nq0kxi9d7w64la1",
"kind": "arvados#collection",
"etag": "5aa3evnbceo3brnps2e1sq8ts",
"uuid": "112ci-4zz18-nq0kxi9d7w64la1",
@@ -405,7 +385,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-fks9mewtw155pvx",
"kind": "arvados#collection",
"etag": "97vicgogv8bovmk4s2jymsdq",
"uuid": "112ci-4zz18-fks9mewtw155pvx",
@@ -426,7 +405,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-kp356e0q2wdl2df",
"kind": "arvados#collection",
"etag": "btktwjclv063s1rd6duvk51v3",
"uuid": "112ci-4zz18-kp356e0q2wdl2df",
@@ -447,7 +425,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-0ey8ob38xf7surq",
"kind": "arvados#collection",
"etag": "bob83na42pufqli1a5buxryvm",
"uuid": "112ci-4zz18-0ey8ob38xf7surq",
@@ -468,7 +445,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-wu2n0fv3cewna1n",
"kind": "arvados#collection",
"etag": "7pl1x327eeutqtsjppdj284g8",
"uuid": "112ci-4zz18-wu2n0fv3cewna1n",
@@ -489,7 +465,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-hyybo6yuvkx4hrm",
"kind": "arvados#collection",
"etag": "2wg1wn2o18ubrgbhbqwwsslhf",
"uuid": "112ci-4zz18-hyybo6yuvkx4hrm",
@@ -510,7 +485,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-h3gjq7gzd4syanw",
"kind": "arvados#collection",
"etag": "8jk0at4e69cwjyjamvm4wz2oj",
"uuid": "112ci-4zz18-h3gjq7gzd4syanw",
@@ -531,7 +505,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-jinwyyaeigjs1yg",
"kind": "arvados#collection",
"etag": "be57zhzufz2hp1tbdwidoro5j",
"uuid": "112ci-4zz18-jinwyyaeigjs1yg",
@@ -552,7 +525,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-etf8aghyxlfxvo1",
"kind": "arvados#collection",
"etag": "29lj2roie4cygo5ffgrduflly",
"uuid": "112ci-4zz18-etf8aghyxlfxvo1",
@@ -573,7 +545,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-jtbn4edpkkhbm9b",
"kind": "arvados#collection",
"etag": "6div78e1nhusii4x1xkp3rg2v",
"uuid": "112ci-4zz18-jtbn4edpkkhbm9b",
@@ -594,7 +565,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-whdleimp34hiqp6",
"kind": "arvados#collection",
"etag": "12wlbsxlmy3sze4v2m0ua7ake",
"uuid": "112ci-4zz18-whdleimp34hiqp6",
@@ -615,7 +585,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-kj8dz72zpo5kbtm",
"kind": "arvados#collection",
"etag": "9bv1bw9afb3w84gu55uzcgd6h",
"uuid": "112ci-4zz18-kj8dz72zpo5kbtm",
@@ -636,7 +605,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-tr306nau9hrr437",
"kind": "arvados#collection",
"etag": "683d77tvlhe97etk9bk2bx8ds",
"uuid": "112ci-4zz18-tr306nau9hrr437",
@@ -657,7 +625,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-oxuk69569mxztp0",
"kind": "arvados#collection",
"etag": "1m34v9jbna2v7gv7auio54i8w",
"uuid": "112ci-4zz18-oxuk69569mxztp0",
@@ -678,7 +645,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-wf8sl6xbyfwjyer",
"kind": "arvados#collection",
"etag": "7l2a9fhqmxg7ghn7osx0s19v4",
"uuid": "112ci-4zz18-wf8sl6xbyfwjyer",
@@ -699,7 +665,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-drpia2es1hp9ydi",
"kind": "arvados#collection",
"etag": "33dw426fhs2vlb50b6301ukn0",
"uuid": "112ci-4zz18-drpia2es1hp9ydi",
@@ -720,7 +685,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-5b4px2i2dwyidfi",
"kind": "arvados#collection",
"etag": "2437tnhn2gmti52lpm8nfq9ct",
"uuid": "112ci-4zz18-5b4px2i2dwyidfi",
@@ -741,7 +705,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-94oslnwnxe1f9wp",
"kind": "arvados#collection",
"etag": "7e0k48zu93o57zudxjp1yrgjq",
"uuid": "112ci-4zz18-94oslnwnxe1f9wp",
@@ -762,7 +725,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-2fk0d5d4jjc1fmq",
"kind": "arvados#collection",
"etag": "cuirr803f54e89reakuq50oaq",
"uuid": "112ci-4zz18-2fk0d5d4jjc1fmq",
@@ -783,7 +745,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-xp9pu81xyc5h422",
"kind": "arvados#collection",
"etag": "3bi5xd8ezxrazk5266cwzn4s4",
"uuid": "112ci-4zz18-xp9pu81xyc5h422",
@@ -804,7 +765,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-znb4lo0if2as58c",
"kind": "arvados#collection",
"etag": "59uaoxy6uh82i6lrvr3ht8gz1",
"uuid": "112ci-4zz18-znb4lo0if2as58c",
@@ -825,7 +785,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-6pvl5ea5u932qzi",
"kind": "arvados#collection",
"etag": "dksrh8jznxoaidl29i1vv5904",
"uuid": "112ci-4zz18-6pvl5ea5u932qzi",
@@ -846,7 +805,6 @@
"is_trashed": false
},
{
- "href": "/collections/112ci-4zz18-wq5pyrxfv1t9isu",
"kind": "arvados#collection",
"etag": "1w1rhhd6oql4ceb7h9t16sf0q",
"uuid": "112ci-4zz18-wq5pyrxfv1t9isu",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json
index f1834e749c..e69b493761 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json
@@ -1,5 +1,4 @@
{
- "href": "/groups/ardev-j7d0g-bmg3pfqtx3ivczp",
"kind": "arvados#group",
"etag": "3hw0vk4mbl0ofvia5k6x4dwrx",
"uuid": "ardev-j7d0g-bmg3pfqtx3ivczp",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json
index fa74e1cb53..6a18d37143 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#groupList",
"etag": "",
- "self_link": "",
"offset": 0,
"limit": 100,
"items": [
{
- "href": "/groups/ardev-j7d0g-ylx7wnu1moge2di",
"kind": "arvados#group",
"etag": "68vubv3iw7663763bozxebmyf",
"uuid": "ardev-j7d0g-ylx7wnu1moge2di",
@@ -27,7 +25,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-mnzhga726itrbrq",
"kind": "arvados#group",
"etag": "68q7r8r37u9hckr2zsynvton3",
"uuid": "ardev-j7d0g-mnzhga726itrbrq",
@@ -48,7 +45,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-0w9m1sz46ljtdnm",
"kind": "arvados#group",
"etag": "ef4vzx5gyudkrg9zml0zdv6qu",
"uuid": "ardev-j7d0g-0w9m1sz46ljtdnm",
@@ -69,7 +65,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-r20iem5ou6h5wao",
"kind": "arvados#group",
"etag": "6h6h4ta6yyf9058delxk8fnqs",
"uuid": "ardev-j7d0g-r20iem5ou6h5wao",
@@ -90,7 +85,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-j7drd8yikkp6evd",
"kind": "arvados#group",
"etag": "6se2y8f9o7uu06pbopgq56xds",
"uuid": "ardev-j7d0g-j7drd8yikkp6evd",
@@ -111,7 +105,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-kh1g7i5va870xt0",
"kind": "arvados#group",
"etag": "2si26vaig3vig9266pqkqh2gy",
"uuid": "ardev-j7d0g-kh1g7i5va870xt0",
@@ -132,7 +125,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-sclkdyuwm4h2m78",
"kind": "arvados#group",
"etag": "edgnz6q0vt2u3o13ujtfohb75",
"uuid": "ardev-j7d0g-sclkdyuwm4h2m78",
@@ -153,7 +145,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-593khc577zuyyhe",
"kind": "arvados#group",
"etag": "39ig9ttgec6lbe096uetn2cb9",
"uuid": "ardev-j7d0g-593khc577zuyyhe",
@@ -174,7 +165,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-iotds0tm559dbz7",
"kind": "arvados#group",
"etag": "1dpr8v6tx6pta0fozq93eyeou",
"uuid": "ardev-j7d0g-iotds0tm559dbz7",
@@ -195,7 +185,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-gbqay74778tonb8",
"kind": "arvados#group",
"etag": "dizbavs2opfe1wpx6thocfki0",
"uuid": "ardev-j7d0g-gbqay74778tonb8",
@@ -216,7 +205,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-fmq1t0jlznehbdm",
"kind": "arvados#group",
"etag": "6xue8m3lx9qpptfvdf13val5t",
"uuid": "ardev-j7d0g-fmq1t0jlznehbdm",
@@ -237,7 +225,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-vxju56ch64u51gq",
"kind": "arvados#group",
"etag": "2gqix9e4m023usi9exhrsjx6z",
"uuid": "ardev-j7d0g-vxju56ch64u51gq",
@@ -258,7 +245,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-g8m4w0d22gv6fbj",
"kind": "arvados#group",
"etag": "73n8x82814o6ihld0kltf468d",
"uuid": "ardev-j7d0g-g8m4w0d22gv6fbj",
@@ -279,7 +265,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-lstqed4y78khaqm",
"kind": "arvados#group",
"etag": "91f7uwq7pj3d3ez1u4smjg3ch",
"uuid": "ardev-j7d0g-lstqed4y78khaqm",
@@ -300,7 +285,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-0jbezvnq8i07l7p",
"kind": "arvados#group",
"etag": "7dbxhvbcfaogwnvo8k4mtqthk",
"uuid": "ardev-j7d0g-0jbezvnq8i07l7p",
@@ -321,7 +305,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-72dxer22g6iltqz",
"kind": "arvados#group",
"etag": "dhfu203rckzdzvx832wm7jv59",
"uuid": "ardev-j7d0g-72dxer22g6iltqz",
@@ -342,7 +325,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-nebzwquxtq1v3o5",
"kind": "arvados#group",
"etag": "7l9oxbdf4e1m9ddnujokf7czz",
"uuid": "ardev-j7d0g-nebzwquxtq1v3o5",
@@ -363,7 +345,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-5589c8dmxevecqh",
"kind": "arvados#group",
"etag": "83862x2o4453mja2rvypjl5gv",
"uuid": "ardev-j7d0g-5589c8dmxevecqh",
@@ -384,7 +365,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-bmg3pfqtx3ivczp",
"kind": "arvados#group",
"etag": "3hw0vk4mbl0ofvia5k6x4dwrx",
"uuid": "ardev-j7d0g-bmg3pfqtx3ivczp",
@@ -405,7 +385,6 @@
"is_trashed": false
},
{
- "href": "/groups/ardev-j7d0g-mfitz2oa4rpycou",
"kind": "arvados#group",
"etag": "6p9xbxpttj782mpqs537gfvc6",
"uuid": "ardev-j7d0g-mfitz2oa4rpycou",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json
index d5bd0d83d1..04077ac39d 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#keepServiceList",
"etag": "",
- "self_link": "",
"offset": null,
"limit": null,
"items": [
{
- "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
"kind": "arvados#keepService",
"etag": "bjzh7og2d9z949lbd38vnnslt",
"uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
@@ -22,7 +20,6 @@
"read_only": false
},
{
- "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
"kind": "arvados#keepService",
"etag": "7m64l69kko4bytpsykf8cay7t",
"uuid": "112ci-bi6l4-f0r03wrqymotwql",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json
index 3d95cf932f..e3b7fa8df6 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#keepServiceList",
"etag": "",
- "self_link": "",
"offset": null,
"limit": null,
"items": [
{
- "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
"kind": "arvados#keepService",
"etag": "bjzh7og2d9z949lbd38vnnslt",
"uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
@@ -22,7 +20,6 @@
"read_only": false
},
{
- "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
"kind": "arvados#keepService",
"etag": "7m64l69kko4bytpsykf8cay7t",
"uuid": "112ci-bi6l4-f0r03wrqymotwql",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json
index f3c289497c..78e1938f6a 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json
@@ -1,5 +1,4 @@
{
- "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
"kind": "arvados#keepService",
"etag": "bjzh7og2d9z949lbd38vnnslt",
"uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json
index 90ba91631e..410ca66985 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#keepServiceList",
"etag": "",
- "self_link": "",
"offset": 0,
"limit": 100,
"items": [
{
- "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
"kind": "arvados#keepService",
"etag": "7m64l69kko4bytpsykf8cay7t",
"uuid": "112ci-bi6l4-f0r03wrqymotwql",
@@ -22,7 +20,6 @@
"read_only": false
},
{
- "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
"kind": "arvados#keepService",
"etag": "bjzh7og2d9z949lbd38vnnslt",
"uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
@@ -38,7 +35,6 @@
"read_only": false
},
{
- "href": "/keep_services/112ci-bi6l4-ko27cfbsf2ssx2m",
"kind": "arvados#keepService",
"etag": "4be61qkpt6nzdfff4vj9nkpmj",
"uuid": "112ci-bi6l4-ko27cfbsf2ssx2m",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json
index c930ee2ce1..8e3117401d 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json
@@ -1,7 +1,6 @@
{
"kind": "arvados#keepServiceList",
"etag": "",
- "self_link": "",
"offset": null,
"limit": null,
"items": [],
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json
index 0664d886f1..aab534d6c6 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json
@@ -1,5 +1,4 @@
{
- "href": "/links/arkau-o0j2j-huxuaxbi46s1yml",
"kind": "arvados#link",
"etag": "zw1rlnbig0kpm9btw8us3pn9",
"uuid": "arkau-o0j2j-huxuaxbi46s1yml",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json
index 25f63bda65..e2392df23a 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json
@@ -1,5 +1,4 @@
{
- "href": "/links/arkau-o0j2j-huxuaxbi46s1yml",
"kind": "arvados#link",
"etag": "zw1rlnbig0kpm9btw8us3pn9",
"uuid": "arkau-o0j2j-huxuaxbi46s1yml",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json
index e720ecf49c..fccabdf47c 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json
@@ -1,12 +1,10 @@
{
"kind": "arvados#linkList",
"etag": "",
- "self_link": "",
"offset": 0,
"limit": 100,
"items": [
{
- "href": "/links/arkau-o0j2j-x2b4rdadxs2fizn",
"kind": "arvados#link",
"etag": "dkhtr9tvp9zfy0d90xjn7w1t7",
"uuid": "arkau-o0j2j-x2b4rdadxs2fizn",
@@ -24,7 +22,6 @@
"properties": {}
},
{
- "href": "/links/arkau-o0j2j-r5am4lz9gnu488k",
"kind": "arvados#link",
"etag": "9nt0c2xn5oz1jzjzawlycmehz",
"uuid": "arkau-o0j2j-r5am4lz9gnu488k",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json
index 87d09ab961..2d3b19172e 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json
@@ -1,5 +1,4 @@
{
- "href": "/users/ardev-tpzed-q6dvn7sby55up1b",
"kind": "arvados#user",
"etag": "b21emst9eu9u1wdpqcz6la583",
"uuid": "ardev-tpzed-q6dvn7sby55up1b",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json
index 87d09ab961..2d3b19172e 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json
@@ -1,5 +1,4 @@
{
- "href": "/users/ardev-tpzed-q6dvn7sby55up1b",
"kind": "arvados#user",
"etag": "b21emst9eu9u1wdpqcz6la583",
"uuid": "ardev-tpzed-q6dvn7sby55up1b",
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json
index 2ff1ded00f..88140e2e32 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json
@@ -1,7 +1,6 @@
{
"kind": "arvados#userList",
"etag": "",
- "self_link": "",
"offset": 0,
"limit": 100,
"items": [
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json
index 38441c588d..5d2972415e 100644
--- a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json
@@ -1,5 +1,4 @@
{
- "href": "/users/ardev-tpzed-000000000000000",
"kind": "arvados#user",
"etag": "2ehmra38iwfuexvz1cjno5xua",
"uuid": "ardev-tpzed-000000000000000",
diff --git a/sdk/java-v2/test-in-docker.sh b/sdk/java-v2/test-in-docker.sh
index 7af3d32c49..d43ae90ab3 100755
--- a/sdk/java-v2/test-in-docker.sh
+++ b/sdk/java-v2/test-in-docker.sh
@@ -1,10 +1,12 @@
-#!/bin/bash -x
+#!/bin/bash
#
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
-#
+
set -e
+set -u
+set -o pipefail
commit_at_dir() {
git log -n1 --format=%H .
@@ -14,7 +16,7 @@ build_version() {
# Output the version being built, or if we're building a
# dev/prerelease, output a version number based on the git log for
# the current working directory.
- if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+ if [[ -n "${ARVADOS_BUILDING_VERSION:-}" ]]; then
echo "$ARVADOS_BUILDING_VERSION"
return
fi
@@ -22,4 +24,8 @@ build_version() {
$WORKSPACE/build/version-at-commit.sh $(commit_at_dir)
}
-exec docker run --rm --user $UID -v $PWD:$PWD -w $PWD gradle:5.3.1 /bin/sh -c 'gradle clean && gradle test && gradle jar install '"-Pversion=$(build_version) $gradle_upload"
+exec docker run --rm \
+ --user "$(id -u)" \
+ --volume "$PWD:/home/arvados-java" \
+ --workdir /home/arvados-java \
+ gradle:6 ./test-inside-docker.sh "-Pversion=$(build_version)" "$@"
diff --git a/sdk/java-v2/test-inside-docker.sh b/sdk/java-v2/test-inside-docker.sh
new file mode 100755
index 0000000000..1da3c43bc2
--- /dev/null
+++ b/sdk/java-v2/test-inside-docker.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+
+set -e
+set -u
+set -o pipefail
+
+declare -a gradle_opts=()
+declare -a gradle_tasks=(clean test jar install)
+
+if ! grep -E '^signing\.[[:alpha:]]+=[^[:space:]]' gradle.properties >/dev/null
+then
+ gradle_opts+=(--exclude-task=signArchives)
+fi
+
+for arg in "$@"
+do
+ case "$arg" in
+ -*) gradle_opts+=("$arg") ;;
+ *) gradle_tasks+=("$arg") ;;
+ esac
+done
+
+set -x
+exec gradle "${gradle_opts[@]}" "${gradle_tasks[@]}"
diff --git a/sdk/python/README.rst b/sdk/python/README.rst
index e40866c624..0a2db99d01 100644
--- a/sdk/python/README.rst
+++ b/sdk/python/README.rst
@@ -46,24 +46,43 @@ You can test the change by running::
arv-get --version
-Installing on Debian systems
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-1. Add this Arvados repository to your sources list::
-
- deb http://apt.arvados.org/ buster main
-
-2. Update your package list.
-
-3. Install the ``python3-arvados-python-client`` package.
+Installing on Debian and Ubuntu systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Arvados publishes packages for Debian 11 "bullseye," Debian 12 "bookworm," Ubuntu 20.04 "focal," and Ubuntu 22.04 "jammy." You can install the Python SDK package on any of these distributions by running the following commands::
+
+ sudo install -d /etc/apt/keyrings
+ sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
+ sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null </dev/null <<'EOF'
+ [arvados]
+ name=Arvados
+ baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/
+ gpgcheck=1
+ gpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados
+ EOF
+ sudo dnf install python3-arvados-python-client
Configuration
-------------
This client software needs two pieces of information to connect to
Arvados: the DNS name of the API server, and an API authorization
-token. You can set these in environment variables, or the file
-``$HOME/.config/arvados/settings.conf``. `The Arvados user
+token. `The Arvados user
documentation
`_ describes
how to find this information in the Arvados Workbench, and install it
diff --git a/sdk/python/arvados-v1-discovery.json b/sdk/python/arvados-v1-discovery.json
index 232c88d067..b1fd3112be 100644
--- a/sdk/python/arvados-v1-discovery.json
+++ b/sdk/python/arvados-v1-discovery.json
@@ -50,59 +50,65 @@
},
"protocol": "rest",
"resources": {
- "api_clients": {
+ "api_client_authorizations": {
"methods": {
"get": {
- "id": "arvados.api_clients.get",
- "path": "api_clients/{uuid}",
+ "id": "arvados.api_client_authorizations.get",
+ "path": "api_client_authorizations/{uuid}",
"httpMethod": "GET",
- "description": "Gets a ApiClient's metadata by UUID.",
+ "description": "Get a ApiClientAuthorization record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClient in question.",
+ "description": "The UUID of the ApiClientAuthorization to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.api_clients.list",
- "path": "api_clients",
+ "list": {
+ "id": "arvados.api_client_authorizations.list",
+ "path": "api_client_authorizations",
"httpMethod": "GET",
- "description": "List ApiClients.\n\n The list
method returns a\n resource list of\n matching ApiClients. For example:\n\n \n {\n \"kind\":\"arvados#apiClientList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a ApiClientAuthorizationList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -110,45 +116,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "ApiClientList"
+ "$ref": "ApiClientAuthorizationList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -156,27 +163,27 @@
]
},
"create": {
- "id": "arvados.api_clients.create",
- "path": "api_clients",
+ "id": "arvados.api_client_authorizations.create",
+ "path": "api_client_authorizations",
"httpMethod": "POST",
- "description": "Create a new ApiClient.",
+ "description": "Create a new ApiClientAuthorization.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
}
@@ -184,33 +191,33 @@
"request": {
"required": true,
"properties": {
- "api_client": {
- "$ref": "ApiClient"
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
}
}
},
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.api_clients.update",
- "path": "api_clients/{uuid}",
+ "id": "arvados.api_client_authorizations.update",
+ "path": "api_client_authorizations/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing ApiClient.",
+ "description": "Update attributes of an existing ApiClientAuthorization.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClient in question.",
+ "description": "The UUID of the ApiClientAuthorization to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -218,158 +225,67 @@
"request": {
"required": true,
"properties": {
- "api_client": {
- "$ref": "ApiClient"
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
}
}
},
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.api_clients.delete",
- "path": "api_clients/{uuid}",
+ "id": "arvados.api_client_authorizations.delete",
+ "path": "api_client_authorizations/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing ApiClient.",
+ "description": "Delete an existing ApiClientAuthorization.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClient in question.",
+ "description": "The UUID of the ApiClientAuthorization to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "list": {
- "id": "arvados.api_clients.list",
- "path": "api_clients",
- "httpMethod": "GET",
- "description": "List ApiClients.\n\n The list
method returns a\n resource list of\n matching ApiClients. For example:\n\n \n {\n \"kind\":\"arvados#apiClientList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "ApiClientList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.api_clients.show",
- "path": "api_clients/{uuid}",
- "httpMethod": "GET",
- "description": "show api_clients",
+ "create_system_auth": {
+ "id": "arvados.api_client_authorizations.create_system_auth",
+ "path": "api_client_authorizations/create_system_auth",
+ "httpMethod": "POST",
+ "description": "Create a token for the system (\"root\") user.",
"parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
+ "scopes": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
"required": false,
+ "default": "[\"all\"]",
+ "description": "An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n",
"location": "query"
}
},
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.api_clients.destroy",
- "path": "api_clients/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy api_clients",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
+ "current": {
+ "id": "arvados.api_client_authorizations.current",
+ "path": "api_client_authorizations/current",
+ "httpMethod": "GET",
+ "description": "Return all metadata for the token used to authorize this request.",
+ "parameters": {},
"response": {
- "$ref": "ApiClient"
+ "$ref": "ApiClientAuthorization"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -377,59 +293,65 @@
}
}
},
- "api_client_authorizations": {
+ "authorized_keys": {
"methods": {
"get": {
- "id": "arvados.api_client_authorizations.get",
- "path": "api_client_authorizations/{uuid}",
+ "id": "arvados.authorized_keys.get",
+ "path": "authorized_keys/{uuid}",
"httpMethod": "GET",
- "description": "Gets a ApiClientAuthorization's metadata by UUID.",
+ "description": "Get a AuthorizedKey record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClientAuthorization in question.",
+ "description": "The UUID of the AuthorizedKey to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "AuthorizedKey"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.api_client_authorizations.list",
- "path": "api_client_authorizations",
+ "list": {
+ "id": "arvados.authorized_keys.list",
+ "path": "authorized_keys",
"httpMethod": "GET",
- "description": "List ApiClientAuthorizations.\n\n The list
method returns a\n resource list of\n matching ApiClientAuthorizations. For example:\n\n \n {\n \"kind\":\"arvados#apiClientAuthorizationList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a AuthorizedKeyList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -437,45 +359,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "ApiClientAuthorizationList"
+ "$ref": "AuthorizedKeyList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -483,27 +406,27 @@
]
},
"create": {
- "id": "arvados.api_client_authorizations.create",
- "path": "api_client_authorizations",
+ "id": "arvados.authorized_keys.create",
+ "path": "authorized_keys",
"httpMethod": "POST",
- "description": "Create a new ApiClientAuthorization.",
+ "description": "Create a new AuthorizedKey.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
}
@@ -511,33 +434,33 @@
"request": {
"required": true,
"properties": {
- "api_client_authorization": {
- "$ref": "ApiClientAuthorization"
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
}
}
},
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "AuthorizedKey"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.api_client_authorizations.update",
- "path": "api_client_authorizations/{uuid}",
+ "id": "arvados.authorized_keys.update",
+ "path": "authorized_keys/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing ApiClientAuthorization.",
+ "description": "Update attributes of an existing AuthorizedKey.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClientAuthorization in question.",
+ "description": "The UUID of the AuthorizedKey to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -545,104 +468,106 @@
"request": {
"required": true,
"properties": {
- "api_client_authorization": {
- "$ref": "ApiClientAuthorization"
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
}
}
},
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "AuthorizedKey"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.api_client_authorizations.delete",
- "path": "api_client_authorizations/{uuid}",
+ "id": "arvados.authorized_keys.delete",
+ "path": "authorized_keys/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing ApiClientAuthorization.",
+ "description": "Delete an existing AuthorizedKey.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ApiClientAuthorization in question.",
+ "description": "The UUID of the AuthorizedKey to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "AuthorizedKey"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
- },
- "create_system_auth": {
- "id": "arvados.api_client_authorizations.create_system_auth",
- "path": "api_client_authorizations/create_system_auth",
- "httpMethod": "POST",
- "description": "create_system_auth api_client_authorizations",
+ }
+ }
+ },
+ "collections": {
+ "methods": {
+ "get": {
+ "id": "arvados.collections.get",
+ "path": "collections/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Collection record by UUID.",
"parameters": {
- "api_client_id": {
- "type": "integer",
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
- "description": "",
"location": "query"
},
- "scopes": {
- "type": "array",
+ "include_trash": {
+ "type": "boolean",
"required": false,
- "description": "",
+ "default": "false",
+ "description": "Show collection even if its `is_trashed` attribute is true.",
"location": "query"
}
},
+ "parameterOrder": [
+ "uuid"
+ ],
"response": {
- "$ref": "ApiClientAuthorization"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "current": {
- "id": "arvados.api_client_authorizations.current",
- "path": "api_client_authorizations/current",
- "httpMethod": "GET",
- "description": "current api_client_authorizations",
- "parameters": {},
- "response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "Collection"
},
"scopes": [
- "https://api.arvados.org/auth/arvados"
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
]
},
"list": {
- "id": "arvados.api_client_authorizations.list",
- "path": "api_client_authorizations",
+ "id": "arvados.collections.list",
+ "path": "collections",
"httpMethod": "GET",
- "description": "List ApiClientAuthorizations.\n\n The list
method returns a\n resource list of\n matching ApiClientAuthorizations. For example:\n\n \n {\n \"kind\":\"arvados#apiClientAuthorizationList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a CollectionList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -650,92 +575,276 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include collections whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include past collection versions.",
"location": "query"
}
},
"response": {
- "$ref": "ApiClientAuthorizationList"
+ "$ref": "CollectionList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "show": {
- "id": "arvados.api_client_authorizations.show",
- "path": "api_client_authorizations/{uuid}",
- "httpMethod": "GET",
- "description": "show api_client_authorizations",
+ "create": {
+ "id": "arvados.collections.create",
+ "path": "collections",
+ "httpMethod": "POST",
+ "description": "Create a new Collection.",
"parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
"select": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
- }
- },
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "replace_segments": {
+ "type": "object",
+ "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "Collection"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.api_client_authorizations.destroy",
- "path": "api_client_authorizations/{uuid}",
+ "update": {
+ "id": "arvados.collections.update",
+ "path": "collections/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "replace_segments": {
+ "type": "object",
+ "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.collections.delete",
+ "path": "collections/{uuid}",
"httpMethod": "DELETE",
- "description": "destroy api_client_authorizations",
+ "description": "Delete an existing Collection.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Collection to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "ApiClientAuthorization"
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "provenance": {
+ "id": "arvados.collections.provenance",
+ "path": "collections/{uuid}/provenance",
+ "httpMethod": "GET",
+ "description": "Detail the provenance of a given collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "used_by": {
+ "id": "arvados.collections.used_by",
+ "path": "collections/{uuid}/used_by",
+ "httpMethod": "GET",
+ "description": "Detail where a given collection has been used.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "trash": {
+ "id": "arvados.collections.trash",
+ "path": "collections/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "Trash a collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "untrash": {
+ "id": "arvados.collections.untrash",
+ "path": "collections/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "Untrash a collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -743,59 +852,129 @@
}
}
},
- "authorized_keys": {
+ "computed_permissions": {
+ "methods": {
+ "list": {
+ "id": "arvados.computed_permissions.list",
+ "path": "computed_permissions",
+ "httpMethod": "GET",
+ "description": "Retrieve a ComputedPermissionList.",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ComputedPermissionList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "containers": {
"methods": {
"get": {
- "id": "arvados.authorized_keys.get",
- "path": "authorized_keys/{uuid}",
+ "id": "arvados.containers.get",
+ "path": "containers/{uuid}",
"httpMethod": "GET",
- "description": "Gets a AuthorizedKey's metadata by UUID.",
+ "description": "Get a Container record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the AuthorizedKey in question.",
+ "description": "The UUID of the Container to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.authorized_keys.list",
- "path": "authorized_keys",
+ "list": {
+ "id": "arvados.containers.list",
+ "path": "containers",
"httpMethod": "GET",
- "description": "List AuthorizedKeys.\n\n The list
method returns a\n resource list of\n matching AuthorizedKeys. For example:\n\n \n {\n \"kind\":\"arvados#authorizedKeyList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a ContainerList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -803,45 +982,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "AuthorizedKeyList"
+ "$ref": "ContainerList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -849,27 +1029,27 @@
]
},
"create": {
- "id": "arvados.authorized_keys.create",
- "path": "authorized_keys",
+ "id": "arvados.containers.create",
+ "path": "containers",
"httpMethod": "POST",
- "description": "Create a new AuthorizedKey.",
+ "description": "Create a new Container.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
}
@@ -877,33 +1057,33 @@
"request": {
"required": true,
"properties": {
- "authorized_key": {
- "$ref": "AuthorizedKey"
+ "container": {
+ "$ref": "Container"
}
}
},
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.authorized_keys.update",
- "path": "authorized_keys/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing AuthorizedKey.",
+ "id": "arvados.containers.update",
+ "path": "containers/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Container.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the AuthorizedKey in question.",
+ "description": "The UUID of the Container to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -911,158 +1091,146 @@
"request": {
"required": true,
"properties": {
- "authorized_key": {
- "$ref": "AuthorizedKey"
+ "container": {
+ "$ref": "Container"
}
}
},
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.authorized_keys.delete",
- "path": "authorized_keys/{uuid}",
+ "id": "arvados.containers.delete",
+ "path": "containers/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing AuthorizedKey.",
+ "description": "Delete an existing Container.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the AuthorizedKey in question.",
+ "description": "The UUID of the Container to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "list": {
- "id": "arvados.authorized_keys.list",
- "path": "authorized_keys",
+ "auth": {
+ "id": "arvados.containers.auth",
+ "path": "containers/{uuid}/auth",
"httpMethod": "GET",
- "description": "List AuthorizedKeys.\n\n The list
method returns a\n resource list of\n matching AuthorizedKeys. For example:\n\n \n {\n \"kind\":\"arvados#authorizedKeyList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Get the API client authorization token associated with this container.",
"parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
+ "uuid": {
"type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
+ "description": "The UUID of the Container to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "lock": {
+ "id": "arvados.containers.lock",
+ "path": "containers/{uuid}/lock",
+ "httpMethod": "POST",
+ "description": "Lock a container (for a dispatcher to begin running it).",
+ "parameters": {
+ "uuid": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
+ "description": "The UUID of the Container to update.",
+ "required": true,
+ "location": "path"
}
},
"response": {
- "$ref": "AuthorizedKeyList"
+ "$ref": "Container"
},
"scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
+ "https://api.arvados.org/auth/arvados"
]
},
- "show": {
- "id": "arvados.authorized_keys.show",
- "path": "authorized_keys/{uuid}",
- "httpMethod": "GET",
- "description": "show authorized_keys",
+ "unlock": {
+ "id": "arvados.containers.unlock",
+ "path": "containers/{uuid}/unlock",
+ "httpMethod": "POST",
+ "description": "Unlock a container (for a dispatcher to stop running it).",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Container to update.",
"required": true,
"location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
}
},
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.authorized_keys.destroy",
- "path": "authorized_keys/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy authorized_keys",
+ "update_priority": {
+ "id": "arvados.containers.update_priority",
+ "path": "containers/{uuid}/update_priority",
+ "httpMethod": "POST",
+ "description": "Recalculate and return the priority of a given container.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Container to update.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "AuthorizedKey"
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "secret_mounts": {
+ "id": "arvados.containers.secret_mounts",
+ "path": "containers/{uuid}/secret_mounts",
+ "httpMethod": "GET",
+ "description": "Return secret mount information for the container associated with the API token authorizing this request.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.containers.current",
+ "path": "containers/current",
+ "httpMethod": "GET",
+ "description": "Return the container record associated with the API token authorizing this request.",
+ "parameters": {},
+ "response": {
+ "$ref": "Container"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -1070,59 +1238,72 @@
}
}
},
- "collections": {
+ "container_requests": {
"methods": {
"get": {
- "id": "arvados.collections.get",
- "path": "collections/{uuid}",
+ "id": "arvados.container_requests.get",
+ "path": "container_requests/{uuid}",
"httpMethod": "GET",
- "description": "Gets a Collection's metadata by UUID.",
+ "description": "Get a ContainerRequest record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Collection in question.",
+ "description": "The UUID of the ContainerRequest to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show container request even if its owner project is trashed.",
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "Collection"
+ "$ref": "ContainerRequest"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.collections.list",
- "path": "collections",
+ "list": {
+ "id": "arvados.container_requests.list",
+ "path": "container_requests",
"httpMethod": "GET",
- "description": "List Collections.\n\n The list
method returns a\n resource list of\n matching Collections. For example:\n\n \n {\n \"kind\":\"arvados#collectionList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a ContainerRequestList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -1130,59 +1311,53 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- },
- "include_trash": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include collections whose is_trashed attribute is true.",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
},
- "include_old_versions": {
+ "include_trash": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include past collection versions.",
+ "description": "Include container requests whose owner project is trashed.",
"location": "query"
}
},
"response": {
- "$ref": "CollectionList"
+ "$ref": "ContainerRequestList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -1190,227 +1365,181 @@
]
},
"create": {
- "id": "arvados.collections.create",
- "path": "collections",
+ "id": "arvados.container_requests.create",
+ "path": "container_requests",
"httpMethod": "POST",
- "description": "Create a new Collection.",
+ "description": "Create a new ContainerRequest.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
- },
- "replace_files": {
- "type": "object",
- "description": "Files and directories to initialize/replace with content from other collections.",
- "required": false,
- "location": "query",
- "properties": {},
- "additionalProperties": {
- "type": "string"
- }
}
},
"request": {
"required": true,
"properties": {
- "collection": {
- "$ref": "Collection"
+ "container_request": {
+ "$ref": "ContainerRequest"
}
}
},
"response": {
- "$ref": "Collection"
+ "$ref": "ContainerRequest"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.collections.update",
- "path": "collections/{uuid}",
+ "id": "arvados.container_requests.update",
+ "path": "container_requests/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing Collection.",
+ "description": "Update attributes of an existing ContainerRequest.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Collection in question.",
+ "description": "The UUID of the ContainerRequest to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
- },
- "replace_files": {
- "type": "object",
- "description": "Files and directories to initialize/replace with content from other collections.",
- "required": false,
- "location": "query",
- "properties": {},
- "additionalProperties": {
- "type": "string"
- }
}
},
"request": {
"required": true,
"properties": {
- "collection": {
- "$ref": "Collection"
+ "container_request": {
+ "$ref": "ContainerRequest"
}
}
},
"response": {
- "$ref": "Collection"
+ "$ref": "ContainerRequest"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.collections.delete",
- "path": "collections/{uuid}",
+ "id": "arvados.container_requests.delete",
+ "path": "container_requests/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing Collection.",
+ "description": "Delete an existing ContainerRequest.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Collection in question.",
+ "description": "The UUID of the ContainerRequest to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Collection"
+ "$ref": "ContainerRequest"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "provenance": {
- "id": "arvados.collections.provenance",
- "path": "collections/{uuid}/provenance",
+ "container_status": {
+ "id": "arvados.container_requests.container_status",
+ "path": "container_requests/{uuid}/container_status",
"httpMethod": "GET",
- "description": "provenance collections",
+ "description": "Return scheduling details for a container request.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
"required": true,
- "location": "path"
+ "description": "The UUID of the container request to query.",
+ "location": "query"
}
},
"response": {
- "$ref": "Collection"
+ "$ref": "ContainerRequest"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
- },
- "used_by": {
- "id": "arvados.collections.used_by",
- "path": "collections/{uuid}/used_by",
- "httpMethod": "GET",
- "description": "used_by collections",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Collection"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "trash": {
- "id": "arvados.collections.trash",
- "path": "collections/{uuid}/trash",
- "httpMethod": "POST",
- "description": "trash collections",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Collection"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "untrash": {
- "id": "arvados.collections.untrash",
- "path": "collections/{uuid}/untrash",
- "httpMethod": "POST",
- "description": "untrash collections",
+ }
+ }
+ },
+ "credentials": {
+ "methods": {
+ "get": {
+ "id": "arvados.credentials.get",
+ "path": "credentials/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Credential record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Credential to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
+ "parameterOrder": [
+ "uuid"
+ ],
"response": {
- "$ref": "Collection"
+ "$ref": "Credential"
},
"scopes": [
- "https://api.arvados.org/auth/arvados"
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
]
},
"list": {
- "id": "arvados.collections.list",
- "path": "collections",
+ "id": "arvados.credentials.list",
+ "path": "credentials",
"httpMethod": "GET",
- "description": "List Collections.\n\n The list
method returns a\n resource list of\n matching Collections. For example:\n\n \n {\n \"kind\":\"arvados#collectionList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a CredentialList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -1418,120 +1547,162 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
- },
- "include_trash": {
- "type": "boolean",
+ }
+ },
+ "response": {
+ "$ref": "CredentialList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.credentials.create",
+ "path": "credentials",
+ "httpMethod": "POST",
+ "description": "Create a new Credential.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
- "default": "false",
- "description": "Include collections whose is_trashed attribute is true.",
"location": "query"
},
- "include_old_versions": {
+ "ensure_unique_name": {
"type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
"required": false,
- "default": "false",
- "description": "Include past collection versions.",
- "location": "query"
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "credential": {
+ "$ref": "Credential"
+ }
}
},
"response": {
- "$ref": "CollectionList"
+ "$ref": "Credential"
},
"scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
+ "https://api.arvados.org/auth/arvados"
]
},
- "show": {
- "id": "arvados.collections.show",
- "path": "collections/{uuid}",
- "httpMethod": "GET",
- "description": "show collections",
+ "update": {
+ "id": "arvados.credentials.update",
+ "path": "credentials/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Credential.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Credential to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- },
- "include_trash": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "Show collection even if its is_trashed attribute is true.",
- "location": "query"
- },
- "include_old_versions": {
- "type": "boolean",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
- "default": "true",
- "description": "Include past collection versions.",
"location": "query"
}
},
+ "request": {
+ "required": true,
+ "properties": {
+ "credential": {
+ "$ref": "Credential"
+ }
+ }
+ },
"response": {
- "$ref": "Collection"
+ "$ref": "Credential"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.collections.destroy",
- "path": "collections/{uuid}",
+ "delete": {
+ "id": "arvados.credentials.delete",
+ "path": "credentials/{uuid}",
"httpMethod": "DELETE",
- "description": "destroy collections",
+ "description": "Delete an existing Credential.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Credential to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Collection"
+ "$ref": "Credential"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "secret": {
+ "id": "arvados.credentials.secret",
+ "path": "credentials/{uuid}/secret",
+ "httpMethod": "GET",
+ "description": "Fetch the secret part of the credential (can only be invoked by running containers).",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Credential to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Credential"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -1539,59 +1710,72 @@
}
}
},
- "containers": {
+ "groups": {
"methods": {
"get": {
- "id": "arvados.containers.get",
- "path": "containers/{uuid}",
+ "id": "arvados.groups.get",
+ "path": "groups/{uuid}",
"httpMethod": "GET",
- "description": "Gets a Container's metadata by UUID.",
+ "description": "Get a Group record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Container in question.",
+ "description": "The UUID of the Group to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Return group/project even if its `is_trashed` attribute is true.",
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.containers.list",
- "path": "containers",
+ "list": {
+ "id": "arvados.groups.list",
+ "path": "groups",
"httpMethod": "GET",
- "description": "List Containers.\n\n The list
method returns a\n resource list of\n matching Containers. For example:\n\n \n {\n \"kind\":\"arvados#containerList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a GroupList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -1599,45 +1783,53 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
"location": "query"
}
},
"response": {
- "$ref": "ContainerList"
+ "$ref": "GroupList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -1645,240 +1837,261 @@
]
},
"create": {
- "id": "arvados.containers.create",
- "path": "containers",
+ "id": "arvados.groups.create",
+ "path": "groups",
"httpMethod": "POST",
- "description": "Create a new Container.",
+ "description": "Create a new Group.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval."
}
},
"request": {
"required": true,
"properties": {
- "container": {
- "$ref": "Container"
+ "group": {
+ "$ref": "Group"
}
}
},
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.containers.update",
- "path": "containers/{uuid}",
+ "id": "arvados.groups.update",
+ "path": "groups/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing Container.",
+ "description": "Update attributes of an existing Group.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Container in question.",
+ "description": "The UUID of the Group to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval."
}
},
"request": {
"required": true,
"properties": {
- "container": {
- "$ref": "Container"
+ "group": {
+ "$ref": "Group"
}
}
},
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.containers.delete",
- "path": "containers/{uuid}",
+ "id": "arvados.groups.delete",
+ "path": "groups/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing Container.",
+ "description": "Delete an existing Group.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Container in question.",
+ "description": "The UUID of the Group to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "auth": {
- "id": "arvados.containers.auth",
- "path": "containers/{uuid}/auth",
+ "contents": {
+ "id": "arvados.groups.contents",
+ "path": "groups/contents",
"httpMethod": "GET",
- "description": "auth containers",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Container"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "lock": {
- "id": "arvados.containers.lock",
- "path": "containers/{uuid}/lock",
- "httpMethod": "POST",
- "description": "lock containers",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Container"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "unlock": {
- "id": "arvados.containers.unlock",
- "path": "containers/{uuid}/unlock",
- "httpMethod": "POST",
- "description": "unlock containers",
+ "description": "List objects that belong to a group.",
"parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Container"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update_priority": {
- "id": "arvados.containers.update_priority",
- "path": "containers/{uuid}/update_priority",
- "httpMethod": "POST",
- "description": "update_priority containers",
- "parameters": {
- "uuid": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return from each matching object.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
+ "location": "query"
+ },
+ "count": {
"type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Container"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "secret_mounts": {
- "id": "arvados.containers.secret_mounts",
- "path": "containers/{uuid}/secret_mounts",
- "httpMethod": "GET",
- "description": "secret_mounts containers",
- "parameters": {
+ "required": false,
+ "default": "exact",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster to return objects from",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
"uuid": {
"type": "string",
- "description": "",
- "required": true,
- "location": "path"
+ "required": false,
+ "default": "",
+ "description": "If given, limit the listing to objects owned by the\nuser or group with this UUID.",
+ "location": "query"
+ },
+ "recursive": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, include contents from child groups recursively.",
+ "location": "query"
+ },
+ "include": {
+ "type": "array",
+ "required": false,
+ "description": "An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\n\n * `\"container_uuid\"`\n * `\"owner_uuid\"`\n * `\"collection_uuid\"`\n\n",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, include past versions of collections in the listing.",
+ "location": "query"
+ },
+ "exclude_home_project": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "If true, exclude contents of the user's home project from the listing.\nCalling this method with this flag set is how clients enumerate objects shared\nwith the current user.",
+ "location": "query"
}
},
"response": {
- "$ref": "Container"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "current": {
- "id": "arvados.containers.current",
- "path": "containers/current",
- "httpMethod": "GET",
- "description": "current containers",
- "parameters": {},
- "response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "list": {
- "id": "arvados.containers.list",
- "path": "containers",
+ "shared": {
+ "id": "arvados.groups.shared",
+ "path": "groups/shared",
"httpMethod": "GET",
- "description": "List Containers.\n\n The list
method returns a\n resource list of\n matching Containers. For example:\n\n \n {\n \"kind\":\"arvados#containerList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "List groups that the current user can access via permission links.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -1886,92 +2099,99 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose `is_trashed` attribute is true.",
+ "location": "query"
+ },
+ "include": {
+ "type": "string",
+ "required": false,
+ "description": "A string naming referenced objects to include in the `included` field of the response. Supported values are:\n\n * `\"owner_uuid\"`\n\n",
"location": "query"
}
},
"response": {
- "$ref": "ContainerList"
+ "$ref": "Group"
},
"scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
+ "https://api.arvados.org/auth/arvados"
]
},
- "show": {
- "id": "arvados.containers.show",
- "path": "containers/{uuid}",
- "httpMethod": "GET",
- "description": "show containers",
+ "trash": {
+ "id": "arvados.groups.trash",
+ "path": "groups/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "Trash a group.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Group to update.",
"required": true,
"location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
}
},
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.containers.destroy",
- "path": "containers/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy containers",
+ "untrash": {
+ "id": "arvados.groups.untrash",
+ "path": "groups/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "Untrash a group.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Group to update.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Container"
+ "$ref": "Group"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -1979,59 +2199,65 @@
}
}
},
- "container_requests": {
+ "keep_services": {
"methods": {
"get": {
- "id": "arvados.container_requests.get",
- "path": "container_requests/{uuid}",
+ "id": "arvados.keep_services.get",
+ "path": "keep_services/{uuid}",
"httpMethod": "GET",
- "description": "Gets a ContainerRequest's metadata by UUID.",
+ "description": "Get a KeepService record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ContainerRequest in question.",
+ "description": "The UUID of the KeepService to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "KeepService"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.container_requests.list",
- "path": "container_requests",
+ "list": {
+ "id": "arvados.keep_services.list",
+ "path": "keep_services",
"httpMethod": "GET",
- "description": "List ContainerRequests.\n\n The list
method returns a\n resource list of\n matching ContainerRequests. For example:\n\n \n {\n \"kind\":\"arvados#containerRequestList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a KeepServiceList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -2039,52 +2265,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- },
- "include_trash": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include container requests whose owner project is trashed.",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "ContainerRequestList"
+ "$ref": "KeepServiceList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -2092,27 +2312,27 @@
]
},
"create": {
- "id": "arvados.container_requests.create",
- "path": "container_requests",
+ "id": "arvados.keep_services.create",
+ "path": "keep_services",
"httpMethod": "POST",
- "description": "Create a new ContainerRequest.",
+ "description": "Create a new KeepService.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
}
@@ -2120,33 +2340,33 @@
"request": {
"required": true,
"properties": {
- "container_request": {
- "$ref": "ContainerRequest"
+ "keep_service": {
+ "$ref": "KeepService"
}
}
},
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "KeepService"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.container_requests.update",
- "path": "container_requests/{uuid}",
+ "id": "arvados.keep_services.update",
+ "path": "keep_services/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing ContainerRequest.",
+ "description": "Update attributes of an existing KeepService.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ContainerRequest in question.",
+ "description": "The UUID of the KeepService to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -2154,85 +2374,112 @@
"request": {
"required": true,
"properties": {
- "container_request": {
- "$ref": "ContainerRequest"
+ "keep_service": {
+ "$ref": "KeepService"
}
}
},
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "KeepService"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.container_requests.delete",
- "path": "container_requests/{uuid}",
+ "id": "arvados.keep_services.delete",
+ "path": "keep_services/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing ContainerRequest.",
+ "description": "Delete an existing KeepService.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the ContainerRequest in question.",
+ "description": "The UUID of the KeepService to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "KeepService"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "container_status": {
- "id": "arvados.container_requests.container_status",
- "path": "container_requests/{uuid}/container_status",
+ "accessible": {
+ "id": "arvados.keep_services.accessible",
+ "path": "keep_services/accessible",
"httpMethod": "GET",
- "description": "container_status container_requests",
- "parameters": {
- "uuid": {
- "type": "string",
- "required": true,
- "description": "The UUID of the ContainerRequest in question.",
- "location": "query"
- }
- },
+ "description": "List Keep services that the current client can access.",
+ "parameters": {},
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "KeepService"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
- },
- "list": {
- "id": "arvados.container_requests.list",
- "path": "container_requests",
- "httpMethod": "GET",
- "description": "List ContainerRequests.\n\n The list
method returns a\n resource list of\n matching ContainerRequests. For example:\n\n \n {\n \"kind\":\"arvados#containerRequestList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ }
+ }
+ },
+ "links": {
+ "methods": {
+ "get": {
+ "id": "arvados.links.get",
+ "path": "links/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a Link record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "list": {
+ "id": "arvados.links.list",
+ "path": "links",
+ "httpMethod": "GET",
+ "description": "Retrieve a LinkList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -2240,106 +2487,162 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- },
- "include_trash": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include container requests whose owner project is trashed.",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "ContainerRequestList"
+ "$ref": "LinkList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "show": {
- "id": "arvados.container_requests.show",
- "path": "container_requests/{uuid}",
- "httpMethod": "GET",
- "description": "show container_requests",
+ "create": {
+ "id": "arvados.links.create",
+ "path": "links",
+ "httpMethod": "POST",
+ "description": "Create a new Link.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.links.update",
+ "path": "links/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Link.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Link to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- },
- "include_trash": {
- "type": "boolean",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
- "default": "false",
- "description": "Show container request even if its owner project is trashed.",
"location": "query"
}
},
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "Link"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.container_requests.destroy",
- "path": "container_requests/{uuid}",
+ "delete": {
+ "id": "arvados.links.delete",
+ "path": "links/{uuid}",
"httpMethod": "DELETE",
- "description": "destroy container_requests",
+ "description": "Delete an existing Link.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the Link to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "ContainerRequest"
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_permissions": {
+ "id": "arvados.links.get_permissions",
+ "path": "permissions/{uuid}",
+ "httpMethod": "GET",
+ "description": "List permissions granted on an Arvados object.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link to query.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -2347,59 +2650,65 @@
}
}
},
- "groups": {
+ "logs": {
"methods": {
"get": {
- "id": "arvados.groups.get",
- "path": "groups/{uuid}",
+ "id": "arvados.logs.get",
+ "path": "logs/{uuid}",
"httpMethod": "GET",
- "description": "Gets a Group's metadata by UUID.",
+ "description": "Get a Log record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Group in question.",
+ "description": "The UUID of the Log to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "Group"
+ "$ref": "Log"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.groups.list",
- "path": "groups",
+ "list": {
+ "id": "arvados.logs.list",
+ "path": "logs",
"httpMethod": "GET",
- "description": "List Groups.\n\n The list
method returns a\n resource list of\n matching Groups. For example:\n\n \n {\n \"kind\":\"arvados#groupList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a LogList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -2407,52 +2716,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- },
- "include_trash": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include items whose is_trashed attribute is true.",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "GroupList"
+ "$ref": "LogList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -2460,141 +2763,161 @@
]
},
"create": {
- "id": "arvados.groups.create",
- "path": "groups",
+ "id": "arvados.logs.create",
+ "path": "logs",
"httpMethod": "POST",
- "description": "Create a new Group.",
+ "description": "Create a new Log.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
- },
- "async": {
- "required": false,
- "type": "boolean",
- "location": "query",
- "default": "false",
- "description": "defer permissions update"
}
},
"request": {
"required": true,
"properties": {
- "group": {
- "$ref": "Group"
+ "log": {
+ "$ref": "Log"
}
}
},
"response": {
- "$ref": "Group"
+ "$ref": "Log"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.groups.update",
- "path": "groups/{uuid}",
+ "id": "arvados.logs.update",
+ "path": "logs/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing Group.",
+ "description": "Update attributes of an existing Log.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Group in question.",
+ "description": "The UUID of the Log to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
- },
- "async": {
- "required": false,
- "type": "boolean",
- "location": "query",
- "default": "false",
- "description": "defer permissions update"
}
},
"request": {
"required": true,
"properties": {
- "group": {
- "$ref": "Group"
+ "log": {
+ "$ref": "Log"
}
}
},
"response": {
- "$ref": "Group"
+ "$ref": "Log"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.groups.delete",
- "path": "groups/{uuid}",
+ "id": "arvados.logs.delete",
+ "path": "logs/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing Group.",
+ "description": "Delete an existing Log.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Group in question.",
+ "description": "The UUID of the Log to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Group"
+ "$ref": "Log"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
+ }
+ }
+ },
+ "users": {
+ "methods": {
+ "get": {
+ "id": "arvados.users.get",
+ "path": "users/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a User record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
},
- "contents": {
- "id": "arvados.groups.contents",
- "path": "groups/contents",
+ "list": {
+ "id": "arvados.users.list",
+ "path": "users",
"httpMethod": "GET",
- "description": "contents groups",
+ "description": "Retrieve a UserList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -2602,409 +2925,371 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- },
- "include_trash": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "Include items whose is_trashed attribute is true.",
- "location": "query"
- },
- "uuid": {
- "type": "string",
- "required": false,
- "default": "",
- "description": "",
- "location": "query"
- },
- "recursive": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include contents from child groups recursively.",
- "location": "query"
- },
- "include": {
- "type": "string",
- "required": false,
- "description": "Include objects referred to by listed field in \"included\" (only owner_uuid).",
- "location": "query"
- },
- "include_old_versions": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "Include past collection versions.",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "Group"
+ "$ref": "UserList"
},
"scopes": [
- "https://api.arvados.org/auth/arvados"
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
]
},
- "shared": {
- "id": "arvados.groups.shared",
- "path": "groups/shared",
- "httpMethod": "GET",
- "description": "shared groups",
+ "create": {
+ "id": "arvados.users.create",
+ "path": "users",
+ "httpMethod": "POST",
+ "description": "Create a new User.",
"parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
- "distinct": {
+ "ensure_unique_name": {
"type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
"required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
+ "default": "false"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.users.update",
+ "path": "users/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing User.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to update.",
+ "required": true,
+ "location": "path"
},
- "bypass_federation": {
- "type": "boolean",
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
"location": "query"
},
- "include_trash": {
+ "bypass_federation": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "Include items whose is_trashed attribute is true.",
- "location": "query"
- },
- "include": {
- "type": "string",
- "required": false,
- "description": "",
+ "description": "If true, do not try to update the user on any other clusters in the federation,\nonly the cluster that received the request.\nYou must be an administrator to use this flag.",
"location": "query"
}
},
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
"response": {
- "$ref": "Group"
+ "$ref": "User"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "trash": {
- "id": "arvados.groups.trash",
- "path": "groups/{uuid}/trash",
- "httpMethod": "POST",
- "description": "trash groups",
+ "delete": {
+ "id": "arvados.users.delete",
+ "path": "users/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing User.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the User to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Group"
+ "$ref": "User"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "untrash": {
- "id": "arvados.groups.untrash",
- "path": "groups/{uuid}/untrash",
+ "current": {
+ "id": "arvados.users.current",
+ "path": "users/current",
+ "httpMethod": "GET",
+ "description": "Return the user record associated with the API token authorizing this request.",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "system": {
+ "id": "arvados.users.system",
+ "path": "users/system",
+ "httpMethod": "GET",
+ "description": "Return this cluster's system (\"root\") user record.",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "activate": {
+ "id": "arvados.users.activate",
+ "path": "users/{uuid}/activate",
"httpMethod": "POST",
- "description": "untrash groups",
+ "description": "Set the `is_active` flag on a user record.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the User to update.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Group"
+ "$ref": "User"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "list": {
- "id": "arvados.groups.list",
- "path": "groups",
- "httpMethod": "GET",
- "description": "List Groups.\n\n The list
method returns a\n resource list of\n matching Groups. For example:\n\n \n {\n \"kind\":\"arvados#groupList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "setup": {
+ "id": "arvados.users.setup",
+ "path": "users/setup",
+ "httpMethod": "POST",
+ "description": "Convenience method to \"fully\" set up a user record with a virtual machine login and notification email.",
"parameters": {
- "filters": {
- "type": "array",
+ "uuid": {
+ "type": "string",
"required": false,
- "description": "",
+ "description": "UUID of an existing user record to set up.",
"location": "query"
},
- "where": {
+ "user": {
"type": "object",
"required": false,
- "description": "",
+ "description": "Attributes of a new user record to set up.",
"location": "query"
},
- "order": {
- "type": "array",
+ "repo_name": {
+ "type": "string",
"required": false,
- "description": "",
+ "description": "This parameter is obsolete and ignored.",
"location": "query"
},
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
+ "vm_uuid": {
+ "type": "string",
"required": false,
+ "description": "If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.",
"location": "query"
},
- "distinct": {
+ "send_notification_email": {
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If true, send an email to the user notifying them they can now access this Arvados cluster.",
"location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "unsetup": {
+ "id": "arvados.users.unsetup",
+ "path": "users/{uuid}/unsetup",
+ "httpMethod": "POST",
+ "description": "Unset a user's active flag and delete associated records.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User to update.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "merge": {
+ "id": "arvados.users.merge",
+ "path": "users/merge",
+ "httpMethod": "POST",
+ "description": "Transfer ownership of one user's data to another.",
+ "parameters": {
+ "new_owner_uuid": {
+ "type": "string",
+ "required": true,
+ "description": "UUID of the user or group that will take ownership of data owned by the old user.",
"location": "query"
},
- "offset": {
- "type": "integer",
+ "new_user_token": {
+ "type": "string",
"required": false,
- "default": "0",
- "description": "",
+ "description": "Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.",
"location": "query"
},
- "count": {
- "type": "string",
+ "redirect_to_new_user": {
+ "type": "boolean",
"required": false,
- "default": "exact",
- "description": "",
+ "default": "false",
+ "description": "If true, authorization attempts for the old user will be redirected to the new user.",
"location": "query"
},
- "cluster_id": {
+ "old_user_uuid": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "description": "UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.",
"location": "query"
},
- "include_trash": {
- "type": "boolean",
+ "new_user_uuid": {
+ "type": "string",
"required": false,
- "default": "false",
- "description": "Include items whose is_trashed attribute is true.",
+ "description": "UUID of the user receiving ownership. You must be an admin to use this option.",
"location": "query"
}
},
"response": {
- "$ref": "GroupList"
+ "$ref": "User"
},
"scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
+ "https://api.arvados.org/auth/arvados"
]
- },
- "show": {
- "id": "arvados.groups.show",
- "path": "groups/{uuid}",
+ }
+ }
+ },
+ "user_agreements": {
+ "methods": {
+ "get": {
+ "id": "arvados.user_agreements.get",
+ "path": "user_agreements/{uuid}",
"httpMethod": "GET",
- "description": "show groups",
+ "description": "Get a UserAgreement record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the UserAgreement to return.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
- },
- "include_trash": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "Show group/project even if its is_trashed attribute is true.",
- "location": "query"
- }
- },
- "response": {
- "$ref": "Group"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.groups.destroy",
- "path": "groups/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy groups",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Group"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "humans": {
- "methods": {
- "get": {
- "id": "arvados.humans.get",
- "path": "humans/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Human's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Human in question.",
- "required": true,
- "location": "path"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "Human"
+ "$ref": "UserAgreement"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.humans.list",
- "path": "humans",
+ "list": {
+ "id": "arvados.user_agreements.list",
+ "path": "user_agreements",
"httpMethod": "GET",
- "description": "List Humans.\n\n The list
method returns a\n resource list of\n matching Humans. For example:\n\n \n {\n \"kind\":\"arvados#humanList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a UserAgreementList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -3012,45 +3297,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "HumanList"
+ "$ref": "UserAgreementList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -3058,27 +3344,27 @@
]
},
"create": {
- "id": "arvados.humans.create",
- "path": "humans",
+ "id": "arvados.user_agreements.create",
+ "path": "user_agreements",
"httpMethod": "POST",
- "description": "Create a new Human.",
+ "description": "Create a new UserAgreement.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
}
@@ -3086,33 +3372,33 @@
"request": {
"required": true,
"properties": {
- "human": {
- "$ref": "Human"
+ "user_agreement": {
+ "$ref": "UserAgreement"
}
}
},
"response": {
- "$ref": "Human"
+ "$ref": "UserAgreement"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.humans.update",
- "path": "humans/{uuid}",
+ "id": "arvados.user_agreements.update",
+ "path": "user_agreements/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing Human.",
+ "description": "Update attributes of an existing UserAgreement.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Human in question.",
+ "description": "The UUID of the UserAgreement to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -3120,65 +3406,125 @@
"request": {
"required": true,
"properties": {
- "human": {
- "$ref": "Human"
+ "user_agreement": {
+ "$ref": "UserAgreement"
}
}
},
"response": {
- "$ref": "Human"
+ "$ref": "UserAgreement"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.humans.delete",
- "path": "humans/{uuid}",
+ "id": "arvados.user_agreements.delete",
+ "path": "user_agreements/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing Human.",
+ "description": "Delete an existing UserAgreement.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Human in question.",
+ "description": "The UUID of the UserAgreement to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Human"
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "signatures": {
+ "id": "arvados.user_agreements.signatures",
+ "path": "user_agreements/signatures",
+ "httpMethod": "GET",
+ "description": "List all user agreement signature links from a user.",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "sign": {
+ "id": "arvados.user_agreements.sign",
+ "path": "user_agreements/sign",
+ "httpMethod": "POST",
+ "description": "Create a signature link from the current user for a given user agreement.",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
+ }
+ }
+ },
+ "virtual_machines": {
+ "methods": {
+ "get": {
+ "id": "arvados.virtual_machines.get",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "GET",
+ "description": "Get a VirtualMachine record by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to return.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
},
"list": {
- "id": "arvados.humans.list",
- "path": "humans",
+ "id": "arvados.virtual_machines.list",
+ "path": "virtual_machines",
"httpMethod": "GET",
- "description": "List Humans.\n\n The list
method returns a\n resource list of\n matching Humans. For example:\n\n \n {\n \"kind\":\"arvados#humanList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a VirtualMachineList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -3186,92 +3532,175 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "HumanList"
+ "$ref": "VirtualMachineList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "show": {
- "id": "arvados.humans.show",
- "path": "humans/{uuid}",
- "httpMethod": "GET",
- "description": "show humans",
+ "create": {
+ "id": "arvados.virtual_machines.create",
+ "path": "virtual_machines",
+ "httpMethod": "POST",
+ "description": "Create a new VirtualMachine.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.virtual_machines.update",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing VirtualMachine.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the VirtualMachine to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
},
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
"response": {
- "$ref": "Human"
+ "$ref": "VirtualMachine"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
- "destroy": {
- "id": "arvados.humans.destroy",
- "path": "humans/{uuid}",
+ "delete": {
+ "id": "arvados.virtual_machines.delete",
+ "path": "virtual_machines/{uuid}",
"httpMethod": "DELETE",
- "description": "destroy humans",
+ "description": "Delete an existing VirtualMachine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine to delete.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "logins": {
+ "id": "arvados.virtual_machines.logins",
+ "path": "virtual_machines/{uuid}/logins",
+ "httpMethod": "GET",
+ "description": "List login permission links for a given virtual machine.",
"parameters": {
"uuid": {
"type": "string",
- "description": "",
+ "description": "The UUID of the VirtualMachine to query.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Human"
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_all_logins": {
+ "id": "arvados.virtual_machines.get_all_logins",
+ "path": "virtual_machines/get_all_logins",
+ "httpMethod": "GET",
+ "description": "List login permission links for all virtual machines.",
+ "parameters": {},
+ "response": {
+ "$ref": "VirtualMachine"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
@@ -3279,59 +3708,65 @@
}
}
},
- "jobs": {
+ "workflows": {
"methods": {
"get": {
- "id": "arvados.jobs.get",
- "path": "jobs/{uuid}",
+ "id": "arvados.workflows.get",
+ "path": "workflows/{uuid}",
"httpMethod": "GET",
- "description": "Gets a Job's metadata by UUID.",
+ "description": "Get a Workflow record by UUID.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Job in question.",
+ "description": "The UUID of the Workflow to return.",
"required": true,
"location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "An array of names of attributes to return in the response.",
+ "required": false,
+ "location": "query"
}
},
"parameterOrder": [
"uuid"
],
"response": {
- "$ref": "Job"
+ "$ref": "Workflow"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
},
- "index": {
- "id": "arvados.jobs.list",
- "path": "jobs",
+ "list": {
+ "id": "arvados.workflows.list",
+ "path": "workflows",
"httpMethod": "GET",
- "description": "List Jobs.\n\n The list
method returns a\n resource list of\n matching Jobs. For example:\n\n \n {\n \"kind\":\"arvados#jobList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
+ "description": "Retrieve a WorkflowList.",
"parameters": {
"filters": {
"type": "array",
"required": false,
- "description": "",
+ "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n",
"location": "query"
},
"where": {
"type": "object",
"required": false,
- "description": "",
+ "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n",
"location": "query"
},
"order": {
"type": "array",
"required": false,
- "description": "",
+ "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n",
"location": "query"
},
"select": {
"type": "array",
- "description": "Attributes of each object to return in the response.",
+ "description": "An array of names of attributes to return from each matching object.",
"required": false,
"location": "query"
},
@@ -3339,45 +3774,46 @@
"type": "boolean",
"required": false,
"default": "false",
- "description": "",
+ "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n",
"location": "query"
},
"limit": {
"type": "integer",
"required": false,
"default": "100",
- "description": "",
+ "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n",
"location": "query"
},
"offset": {
"type": "integer",
"required": false,
"default": "0",
- "description": "",
+ "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n",
"location": "query"
},
"count": {
"type": "string",
"required": false,
"default": "exact",
- "description": "",
+ "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n",
"location": "query"
},
"cluster_id": {
"type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster to return objects from",
"location": "query",
"required": false
},
"bypass_federation": {
"type": "boolean",
"required": false,
- "description": "bypass federation behavior, list items from local instance database only",
+ "default": "false",
+ "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n",
"location": "query"
}
},
"response": {
- "$ref": "JobList"
+ "$ref": "WorkflowList"
},
"scopes": [
"https://api.arvados.org/auth/arvados",
@@ -3385,86 +3821,61 @@
]
},
"create": {
- "id": "arvados.jobs.create",
- "path": "jobs",
+ "id": "arvados.workflows.create",
+ "path": "workflows",
"httpMethod": "POST",
- "description": "Create a new Job.",
+ "description": "Create a new Workflow.",
"parameters": {
"select": {
"type": "array",
- "description": "Attributes of the new object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
},
"ensure_unique_name": {
"type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
"location": "query",
"required": false,
"default": "false"
},
"cluster_id": {
"type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
+ "description": "Cluster ID of a federated cluster where this object should be created.",
"location": "query",
"required": false
- },
- "find_or_create": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "minimum_script_version": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "exclude_script_versions": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
}
},
"request": {
"required": true,
"properties": {
- "job": {
- "$ref": "Job"
+ "workflow": {
+ "$ref": "Workflow"
}
}
},
"response": {
- "$ref": "Job"
+ "$ref": "Workflow"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"update": {
- "id": "arvados.jobs.update",
- "path": "jobs/{uuid}",
+ "id": "arvados.workflows.update",
+ "path": "workflows/{uuid}",
"httpMethod": "PUT",
- "description": "Update attributes of an existing Job.",
+ "description": "Update attributes of an existing Workflow.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Job in question.",
+ "description": "The UUID of the Workflow to update.",
"required": true,
"location": "path"
},
"select": {
"type": "array",
- "description": "Attributes of the updated object to return in the response.",
+ "description": "An array of names of attributes to return in the response.",
"required": false,
"location": "query"
}
@@ -3472,7499 +3883,1165 @@
"request": {
"required": true,
"properties": {
- "job": {
- "$ref": "Job"
+ "workflow": {
+ "$ref": "Workflow"
}
}
},
"response": {
- "$ref": "Job"
+ "$ref": "Workflow"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
},
"delete": {
- "id": "arvados.jobs.delete",
- "path": "jobs/{uuid}",
+ "id": "arvados.workflows.delete",
+ "path": "workflows/{uuid}",
"httpMethod": "DELETE",
- "description": "Delete an existing Job.",
+ "description": "Delete an existing Workflow.",
"parameters": {
"uuid": {
"type": "string",
- "description": "The UUID of the Job in question.",
+ "description": "The UUID of the Workflow to delete.",
"required": true,
"location": "path"
}
},
"response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "queue": {
- "id": "arvados.jobs.queue",
- "path": "jobs/queue",
- "httpMethod": "GET",
- "description": "queue jobs",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "Job"
+ "$ref": "Workflow"
},
"scopes": [
"https://api.arvados.org/auth/arvados"
]
- },
- "queue_size": {
- "id": "arvados.jobs.queue_size",
- "path": "jobs/queue_size",
+ }
+ }
+ },
+ "configs": {
+ "methods": {
+ "get": {
+ "id": "arvados.configs.get",
+ "path": "config",
"httpMethod": "GET",
- "description": "queue_size jobs",
+ "description": "Get this cluster's public configuration settings.",
"parameters": {},
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "cancel": {
- "id": "arvados.jobs.cancel",
- "path": "jobs/{uuid}/cancel",
- "httpMethod": "POST",
- "description": "cancel jobs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "lock": {
- "id": "arvados.jobs.lock",
- "path": "jobs/{uuid}/lock",
- "httpMethod": "POST",
- "description": "lock jobs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.jobs.list",
- "path": "jobs",
- "httpMethod": "GET",
- "description": "List Jobs.\n\n The list
method returns a\n resource list of\n matching Jobs. For example:\n\n \n {\n \"kind\":\"arvados#jobList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "JobList"
- },
+ "parameterOrder": [],
+ "response": {},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
- },
- "show": {
- "id": "arvados.jobs.show",
- "path": "jobs/{uuid}",
- "httpMethod": "GET",
- "description": "show jobs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.jobs.destroy",
- "path": "jobs/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy jobs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
}
}
},
- "job_tasks": {
+ "vocabularies": {
"methods": {
"get": {
- "id": "arvados.job_tasks.get",
- "path": "job_tasks/{uuid}",
+ "id": "arvados.vocabularies.get",
+ "path": "vocabulary",
"httpMethod": "GET",
- "description": "Gets a JobTask's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the JobTask in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "JobTask"
- },
+ "description": "Get this cluster's configured vocabulary definition.\n\nRefer to [metadata vocabulary documentation][] for details.\n\n[metadata vocabulary documentation]: https://doc.aravdos.org/admin/metadata-vocabulary.html\n\n",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
"scopes": [
"https://api.arvados.org/auth/arvados",
"https://api.arvados.org/auth/arvados.readonly"
]
- },
- "index": {
- "id": "arvados.job_tasks.list",
- "path": "job_tasks",
- "httpMethod": "GET",
- "description": "List JobTasks.\n\n The list
method returns a\n resource list of\n matching JobTasks. For example:\n\n \n {\n \"kind\":\"arvados#jobTaskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "JobTaskList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.job_tasks.create",
- "path": "job_tasks",
- "httpMethod": "POST",
- "description": "Create a new JobTask.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "job_task": {
- "$ref": "JobTask"
- }
- }
- },
- "response": {
- "$ref": "JobTask"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.job_tasks.update",
- "path": "job_tasks/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing JobTask.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the JobTask in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "job_task": {
- "$ref": "JobTask"
- }
- }
- },
- "response": {
- "$ref": "JobTask"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.job_tasks.delete",
- "path": "job_tasks/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing JobTask.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the JobTask in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "JobTask"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.job_tasks.list",
- "path": "job_tasks",
- "httpMethod": "GET",
- "description": "List JobTasks.\n\n The list
method returns a\n resource list of\n matching JobTasks. For example:\n\n \n {\n \"kind\":\"arvados#jobTaskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "JobTaskList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.job_tasks.show",
- "path": "job_tasks/{uuid}",
- "httpMethod": "GET",
- "description": "show job_tasks",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "JobTask"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.job_tasks.destroy",
- "path": "job_tasks/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy job_tasks",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "JobTask"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "keep_disks": {
- "methods": {
- "get": {
- "id": "arvados.keep_disks.get",
- "path": "keep_disks/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a KeepDisk's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepDisk in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.keep_disks.list",
- "path": "keep_disks",
- "httpMethod": "GET",
- "description": "List KeepDisks.\n\n The list
method returns a\n resource list of\n matching KeepDisks. For example:\n\n \n {\n \"kind\":\"arvados#keepDiskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepDiskList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.keep_disks.create",
- "path": "keep_disks",
- "httpMethod": "POST",
- "description": "Create a new KeepDisk.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "keep_disk": {
- "$ref": "KeepDisk"
- }
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.keep_disks.update",
- "path": "keep_disks/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing KeepDisk.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepDisk in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "keep_disk": {
- "$ref": "KeepDisk"
- }
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.keep_disks.delete",
- "path": "keep_disks/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing KeepDisk.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepDisk in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "ping": {
- "id": "arvados.keep_disks.ping",
- "path": "keep_disks/ping",
- "httpMethod": "POST",
- "description": "ping keep_disks",
- "parameters": {
- "uuid": {
- "required": false,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "ping_secret": {
- "required": true,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "node_uuid": {
- "required": false,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "filesystem_uuid": {
- "required": false,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "service_host": {
- "required": false,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "service_port": {
- "required": true,
- "type": "string",
- "description": "",
- "location": "query"
- },
- "service_ssl_flag": {
- "required": true,
- "type": "string",
- "description": "",
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.keep_disks.list",
- "path": "keep_disks",
- "httpMethod": "GET",
- "description": "List KeepDisks.\n\n The list
method returns a\n resource list of\n matching KeepDisks. For example:\n\n \n {\n \"kind\":\"arvados#keepDiskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepDiskList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.keep_disks.show",
- "path": "keep_disks/{uuid}",
- "httpMethod": "GET",
- "description": "show keep_disks",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.keep_disks.destroy",
- "path": "keep_disks/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy keep_disks",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "KeepDisk"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "keep_services": {
- "methods": {
- "get": {
- "id": "arvados.keep_services.get",
- "path": "keep_services/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a KeepService's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepService in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.keep_services.list",
- "path": "keep_services",
- "httpMethod": "GET",
- "description": "List KeepServices.\n\n The list
method returns a\n resource list of\n matching KeepServices. For example:\n\n \n {\n \"kind\":\"arvados#keepServiceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepServiceList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.keep_services.create",
- "path": "keep_services",
- "httpMethod": "POST",
- "description": "Create a new KeepService.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "keep_service": {
- "$ref": "KeepService"
- }
- }
- },
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.keep_services.update",
- "path": "keep_services/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing KeepService.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepService in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "keep_service": {
- "$ref": "KeepService"
- }
- }
- },
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.keep_services.delete",
- "path": "keep_services/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing KeepService.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the KeepService in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "accessible": {
- "id": "arvados.keep_services.accessible",
- "path": "keep_services/accessible",
- "httpMethod": "GET",
- "description": "accessible keep_services",
- "parameters": {},
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.keep_services.list",
- "path": "keep_services",
- "httpMethod": "GET",
- "description": "List KeepServices.\n\n The list
method returns a\n resource list of\n matching KeepServices. For example:\n\n \n {\n \"kind\":\"arvados#keepServiceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepServiceList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.keep_services.show",
- "path": "keep_services/{uuid}",
- "httpMethod": "GET",
- "description": "show keep_services",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.keep_services.destroy",
- "path": "keep_services/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy keep_services",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "KeepService"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "links": {
- "methods": {
- "get": {
- "id": "arvados.links.get",
- "path": "links/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Link's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Link in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.links.list",
- "path": "links",
- "httpMethod": "GET",
- "description": "List Links.\n\n The list
method returns a\n resource list of\n matching Links. For example:\n\n \n {\n \"kind\":\"arvados#linkList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "LinkList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.links.create",
- "path": "links",
- "httpMethod": "POST",
- "description": "Create a new Link.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "link": {
- "$ref": "Link"
- }
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.links.update",
- "path": "links/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Link.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Link in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "link": {
- "$ref": "Link"
- }
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.links.delete",
- "path": "links/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Link.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Link in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.links.list",
- "path": "links",
- "httpMethod": "GET",
- "description": "List Links.\n\n The list
method returns a\n resource list of\n matching Links. For example:\n\n \n {\n \"kind\":\"arvados#linkList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "LinkList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.links.show",
- "path": "links/{uuid}",
- "httpMethod": "GET",
- "description": "show links",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.links.destroy",
- "path": "links/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy links",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "get_permissions": {
- "id": "arvados.links.get_permissions",
- "path": "permissions/{uuid}",
- "httpMethod": "GET",
- "description": "get_permissions links",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Link"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "logs": {
- "methods": {
- "get": {
- "id": "arvados.logs.get",
- "path": "logs/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Log's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Log in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.logs.list",
- "path": "logs",
- "httpMethod": "GET",
- "description": "List Logs.\n\n The list
method returns a\n resource list of\n matching Logs. For example:\n\n \n {\n \"kind\":\"arvados#logList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "LogList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.logs.create",
- "path": "logs",
- "httpMethod": "POST",
- "description": "Create a new Log.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "log": {
- "$ref": "Log"
- }
- }
- },
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.logs.update",
- "path": "logs/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Log.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Log in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "log": {
- "$ref": "Log"
- }
- }
- },
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.logs.delete",
- "path": "logs/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Log.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Log in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.logs.list",
- "path": "logs",
- "httpMethod": "GET",
- "description": "List Logs.\n\n The list
method returns a\n resource list of\n matching Logs. For example:\n\n \n {\n \"kind\":\"arvados#logList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "LogList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.logs.show",
- "path": "logs/{uuid}",
- "httpMethod": "GET",
- "description": "show logs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.logs.destroy",
- "path": "logs/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy logs",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Log"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "nodes": {
- "methods": {
- "get": {
- "id": "arvados.nodes.get",
- "path": "nodes/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Node's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Node in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.nodes.list",
- "path": "nodes",
- "httpMethod": "GET",
- "description": "List Nodes.\n\n The list
method returns a\n resource list of\n matching Nodes. For example:\n\n \n {\n \"kind\":\"arvados#nodeList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "NodeList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.nodes.create",
- "path": "nodes",
- "httpMethod": "POST",
- "description": "Create a new Node.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "assign_slot": {
- "required": false,
- "type": "boolean",
- "description": "assign slot and hostname",
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "node": {
- "$ref": "Node"
- }
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.nodes.update",
- "path": "nodes/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Node.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Node in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- },
- "assign_slot": {
- "required": false,
- "type": "boolean",
- "description": "assign slot and hostname",
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "node": {
- "$ref": "Node"
- }
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.nodes.delete",
- "path": "nodes/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Node.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Node in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "ping": {
- "id": "arvados.nodes.ping",
- "path": "nodes/{uuid}/ping",
- "httpMethod": "POST",
- "description": "ping nodes",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "ping_secret": {
- "required": true,
- "type": "string",
- "description": "",
- "location": "query"
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.nodes.list",
- "path": "nodes",
- "httpMethod": "GET",
- "description": "List Nodes.\n\n The list
method returns a\n resource list of\n matching Nodes. For example:\n\n \n {\n \"kind\":\"arvados#nodeList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "NodeList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.nodes.show",
- "path": "nodes/{uuid}",
- "httpMethod": "GET",
- "description": "show nodes",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.nodes.destroy",
- "path": "nodes/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy nodes",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Node"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "pipeline_instances": {
- "methods": {
- "get": {
- "id": "arvados.pipeline_instances.get",
- "path": "pipeline_instances/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a PipelineInstance's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineInstance in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.pipeline_instances.list",
- "path": "pipeline_instances",
- "httpMethod": "GET",
- "description": "List PipelineInstances.\n\n The list
method returns a\n resource list of\n matching PipelineInstances. For example:\n\n \n {\n \"kind\":\"arvados#pipelineInstanceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineInstanceList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.pipeline_instances.create",
- "path": "pipeline_instances",
- "httpMethod": "POST",
- "description": "Create a new PipelineInstance.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "pipeline_instance": {
- "$ref": "PipelineInstance"
- }
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.pipeline_instances.update",
- "path": "pipeline_instances/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing PipelineInstance.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineInstance in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "pipeline_instance": {
- "$ref": "PipelineInstance"
- }
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.pipeline_instances.delete",
- "path": "pipeline_instances/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing PipelineInstance.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineInstance in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "cancel": {
- "id": "arvados.pipeline_instances.cancel",
- "path": "pipeline_instances/{uuid}/cancel",
- "httpMethod": "POST",
- "description": "cancel pipeline_instances",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.pipeline_instances.list",
- "path": "pipeline_instances",
- "httpMethod": "GET",
- "description": "List PipelineInstances.\n\n The list
method returns a\n resource list of\n matching PipelineInstances. For example:\n\n \n {\n \"kind\":\"arvados#pipelineInstanceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineInstanceList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.pipeline_instances.show",
- "path": "pipeline_instances/{uuid}",
- "httpMethod": "GET",
- "description": "show pipeline_instances",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.pipeline_instances.destroy",
- "path": "pipeline_instances/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy pipeline_instances",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "PipelineInstance"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "pipeline_templates": {
- "methods": {
- "get": {
- "id": "arvados.pipeline_templates.get",
- "path": "pipeline_templates/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a PipelineTemplate's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineTemplate in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.pipeline_templates.list",
- "path": "pipeline_templates",
- "httpMethod": "GET",
- "description": "List PipelineTemplates.\n\n The list
method returns a\n resource list of\n matching PipelineTemplates. For example:\n\n \n {\n \"kind\":\"arvados#pipelineTemplateList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineTemplateList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.pipeline_templates.create",
- "path": "pipeline_templates",
- "httpMethod": "POST",
- "description": "Create a new PipelineTemplate.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "pipeline_template": {
- "$ref": "PipelineTemplate"
- }
- }
- },
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.pipeline_templates.update",
- "path": "pipeline_templates/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing PipelineTemplate.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineTemplate in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "pipeline_template": {
- "$ref": "PipelineTemplate"
- }
- }
- },
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.pipeline_templates.delete",
- "path": "pipeline_templates/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing PipelineTemplate.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the PipelineTemplate in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.pipeline_templates.list",
- "path": "pipeline_templates",
- "httpMethod": "GET",
- "description": "List PipelineTemplates.\n\n The list
method returns a\n resource list of\n matching PipelineTemplates. For example:\n\n \n {\n \"kind\":\"arvados#pipelineTemplateList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineTemplateList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.pipeline_templates.show",
- "path": "pipeline_templates/{uuid}",
- "httpMethod": "GET",
- "description": "show pipeline_templates",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.pipeline_templates.destroy",
- "path": "pipeline_templates/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy pipeline_templates",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "PipelineTemplate"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "repositories": {
- "methods": {
- "get": {
- "id": "arvados.repositories.get",
- "path": "repositories/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Repository's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Repository in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.repositories.list",
- "path": "repositories",
- "httpMethod": "GET",
- "description": "List Repositories.\n\n The list
method returns a\n resource list of\n matching Repositories. For example:\n\n \n {\n \"kind\":\"arvados#repositoryList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "RepositoryList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.repositories.create",
- "path": "repositories",
- "httpMethod": "POST",
- "description": "Create a new Repository.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "repository": {
- "$ref": "Repository"
- }
- }
- },
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.repositories.update",
- "path": "repositories/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Repository.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Repository in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "repository": {
- "$ref": "Repository"
- }
- }
- },
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.repositories.delete",
- "path": "repositories/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Repository.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Repository in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "get_all_permissions": {
- "id": "arvados.repositories.get_all_permissions",
- "path": "repositories/get_all_permissions",
- "httpMethod": "GET",
- "description": "get_all_permissions repositories",
- "parameters": {},
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.repositories.list",
- "path": "repositories",
- "httpMethod": "GET",
- "description": "List Repositories.\n\n The list
method returns a\n resource list of\n matching Repositories. For example:\n\n \n {\n \"kind\":\"arvados#repositoryList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "RepositoryList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.repositories.show",
- "path": "repositories/{uuid}",
- "httpMethod": "GET",
- "description": "show repositories",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.repositories.destroy",
- "path": "repositories/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy repositories",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Repository"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "specimens": {
- "methods": {
- "get": {
- "id": "arvados.specimens.get",
- "path": "specimens/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Specimen's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Specimen in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.specimens.list",
- "path": "specimens",
- "httpMethod": "GET",
- "description": "List Specimens.\n\n The list
method returns a\n resource list of\n matching Specimens. For example:\n\n \n {\n \"kind\":\"arvados#specimenList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "SpecimenList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.specimens.create",
- "path": "specimens",
- "httpMethod": "POST",
- "description": "Create a new Specimen.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "specimen": {
- "$ref": "Specimen"
- }
- }
- },
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.specimens.update",
- "path": "specimens/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Specimen.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Specimen in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "specimen": {
- "$ref": "Specimen"
- }
- }
- },
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.specimens.delete",
- "path": "specimens/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Specimen.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Specimen in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.specimens.list",
- "path": "specimens",
- "httpMethod": "GET",
- "description": "List Specimens.\n\n The list
method returns a\n resource list of\n matching Specimens. For example:\n\n \n {\n \"kind\":\"arvados#specimenList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "SpecimenList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.specimens.show",
- "path": "specimens/{uuid}",
- "httpMethod": "GET",
- "description": "show specimens",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.specimens.destroy",
- "path": "specimens/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy specimens",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Specimen"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "traits": {
- "methods": {
- "get": {
- "id": "arvados.traits.get",
- "path": "traits/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Trait's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Trait in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.traits.list",
- "path": "traits",
- "httpMethod": "GET",
- "description": "List Traits.\n\n The list
method returns a\n resource list of\n matching Traits. For example:\n\n \n {\n \"kind\":\"arvados#traitList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "TraitList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.traits.create",
- "path": "traits",
- "httpMethod": "POST",
- "description": "Create a new Trait.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "trait": {
- "$ref": "Trait"
- }
- }
- },
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.traits.update",
- "path": "traits/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Trait.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Trait in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "trait": {
- "$ref": "Trait"
- }
- }
- },
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.traits.delete",
- "path": "traits/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Trait.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Trait in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.traits.list",
- "path": "traits",
- "httpMethod": "GET",
- "description": "List Traits.\n\n The list
method returns a\n resource list of\n matching Traits. For example:\n\n \n {\n \"kind\":\"arvados#traitList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "TraitList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.traits.show",
- "path": "traits/{uuid}",
- "httpMethod": "GET",
- "description": "show traits",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.traits.destroy",
- "path": "traits/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy traits",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Trait"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "users": {
- "methods": {
- "get": {
- "id": "arvados.users.get",
- "path": "users/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a User's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the User in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.users.list",
- "path": "users",
- "httpMethod": "GET",
- "description": "List Users.\n\n The list
method returns a\n resource list of\n matching Users. For example:\n\n \n {\n \"kind\":\"arvados#userList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "UserList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.users.create",
- "path": "users",
- "httpMethod": "POST",
- "description": "Create a new User.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "user": {
- "$ref": "User"
- }
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.users.update",
- "path": "users/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing User.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the User in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "user": {
- "$ref": "User"
- }
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.users.delete",
- "path": "users/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing User.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the User in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "current": {
- "id": "arvados.users.current",
- "path": "users/current",
- "httpMethod": "GET",
- "description": "current users",
- "parameters": {},
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "system": {
- "id": "arvados.users.system",
- "path": "users/system",
- "httpMethod": "GET",
- "description": "system users",
- "parameters": {},
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "activate": {
- "id": "arvados.users.activate",
- "path": "users/{uuid}/activate",
- "httpMethod": "POST",
- "description": "activate users",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "setup": {
- "id": "arvados.users.setup",
- "path": "users/setup",
- "httpMethod": "POST",
- "description": "setup users",
- "parameters": {
- "uuid": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "user": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "repo_name": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "vm_uuid": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "send_notification_email": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "unsetup": {
- "id": "arvados.users.unsetup",
- "path": "users/{uuid}/unsetup",
- "httpMethod": "POST",
- "description": "unsetup users",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "merge": {
- "id": "arvados.users.merge",
- "path": "users/merge",
- "httpMethod": "POST",
- "description": "merge users",
- "parameters": {
- "new_owner_uuid": {
- "type": "string",
- "required": true,
- "description": "",
- "location": "query"
- },
- "new_user_token": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "redirect_to_new_user": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "old_user_uuid": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- },
- "new_user_uuid": {
- "type": "string",
- "required": false,
- "description": "",
- "location": "query"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.users.list",
- "path": "users",
- "httpMethod": "GET",
- "description": "List Users.\n\n The list
method returns a\n resource list of\n matching Users. For example:\n\n \n {\n \"kind\":\"arvados#userList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "UserList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.users.show",
- "path": "users/{uuid}",
- "httpMethod": "GET",
- "description": "show users",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.users.destroy",
- "path": "users/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy users",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "User"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "user_agreements": {
- "methods": {
- "get": {
- "id": "arvados.user_agreements.get",
- "path": "user_agreements/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a UserAgreement's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the UserAgreement in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.user_agreements.list",
- "path": "user_agreements",
- "httpMethod": "GET",
- "description": "List UserAgreements.\n\n The list
method returns a\n resource list of\n matching UserAgreements. For example:\n\n \n {\n \"kind\":\"arvados#userAgreementList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "UserAgreementList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.user_agreements.create",
- "path": "user_agreements",
- "httpMethod": "POST",
- "description": "Create a new UserAgreement.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "user_agreement": {
- "$ref": "UserAgreement"
- }
- }
- },
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.user_agreements.update",
- "path": "user_agreements/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing UserAgreement.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the UserAgreement in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "user_agreement": {
- "$ref": "UserAgreement"
- }
- }
- },
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.user_agreements.delete",
- "path": "user_agreements/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing UserAgreement.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the UserAgreement in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "signatures": {
- "id": "arvados.user_agreements.signatures",
- "path": "user_agreements/signatures",
- "httpMethod": "GET",
- "description": "signatures user_agreements",
- "parameters": {},
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "sign": {
- "id": "arvados.user_agreements.sign",
- "path": "user_agreements/sign",
- "httpMethod": "POST",
- "description": "sign user_agreements",
- "parameters": {},
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.user_agreements.list",
- "path": "user_agreements",
- "httpMethod": "GET",
- "description": "List UserAgreements.\n\n The list
method returns a\n resource list of\n matching UserAgreements. For example:\n\n \n {\n \"kind\":\"arvados#userAgreementList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "UserAgreementList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "new": {
- "id": "arvados.user_agreements.new",
- "path": "user_agreements/new",
- "httpMethod": "GET",
- "description": "new user_agreements",
- "parameters": {},
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "show": {
- "id": "arvados.user_agreements.show",
- "path": "user_agreements/{uuid}",
- "httpMethod": "GET",
- "description": "show user_agreements",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.user_agreements.destroy",
- "path": "user_agreements/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy user_agreements",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "UserAgreement"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "virtual_machines": {
- "methods": {
- "get": {
- "id": "arvados.virtual_machines.get",
- "path": "virtual_machines/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a VirtualMachine's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the VirtualMachine in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.virtual_machines.list",
- "path": "virtual_machines",
- "httpMethod": "GET",
- "description": "List VirtualMachines.\n\n The list
method returns a\n resource list of\n matching VirtualMachines. For example:\n\n \n {\n \"kind\":\"arvados#virtualMachineList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "VirtualMachineList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.virtual_machines.create",
- "path": "virtual_machines",
- "httpMethod": "POST",
- "description": "Create a new VirtualMachine.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "virtual_machine": {
- "$ref": "VirtualMachine"
- }
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.virtual_machines.update",
- "path": "virtual_machines/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing VirtualMachine.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the VirtualMachine in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "virtual_machine": {
- "$ref": "VirtualMachine"
- }
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.virtual_machines.delete",
- "path": "virtual_machines/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing VirtualMachine.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the VirtualMachine in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "logins": {
- "id": "arvados.virtual_machines.logins",
- "path": "virtual_machines/{uuid}/logins",
- "httpMethod": "GET",
- "description": "logins virtual_machines",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "get_all_logins": {
- "id": "arvados.virtual_machines.get_all_logins",
- "path": "virtual_machines/get_all_logins",
- "httpMethod": "GET",
- "description": "get_all_logins virtual_machines",
- "parameters": {},
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.virtual_machines.list",
- "path": "virtual_machines",
- "httpMethod": "GET",
- "description": "List VirtualMachines.\n\n The list
method returns a\n resource list of\n matching VirtualMachines. For example:\n\n \n {\n \"kind\":\"arvados#virtualMachineList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "VirtualMachineList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.virtual_machines.show",
- "path": "virtual_machines/{uuid}",
- "httpMethod": "GET",
- "description": "show virtual_machines",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.virtual_machines.destroy",
- "path": "virtual_machines/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy virtual_machines",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "VirtualMachine"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "workflows": {
- "methods": {
- "get": {
- "id": "arvados.workflows.get",
- "path": "workflows/{uuid}",
- "httpMethod": "GET",
- "description": "Gets a Workflow's metadata by UUID.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Workflow in question.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "uuid"
- ],
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "index": {
- "id": "arvados.workflows.list",
- "path": "workflows",
- "httpMethod": "GET",
- "description": "List Workflows.\n\n The list
method returns a\n resource list of\n matching Workflows. For example:\n\n \n {\n \"kind\":\"arvados#workflowList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "WorkflowList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "create": {
- "id": "arvados.workflows.create",
- "path": "workflows",
- "httpMethod": "POST",
- "description": "Create a new Workflow.",
- "parameters": {
- "select": {
- "type": "array",
- "description": "Attributes of the new object to return in the response.",
- "required": false,
- "location": "query"
- },
- "ensure_unique_name": {
- "type": "boolean",
- "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
- "location": "query",
- "required": false,
- "default": "false"
- },
- "cluster_id": {
- "type": "string",
- "description": "Create object on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- }
- },
- "request": {
- "required": true,
- "properties": {
- "workflow": {
- "$ref": "Workflow"
- }
- }
- },
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "update": {
- "id": "arvados.workflows.update",
- "path": "workflows/{uuid}",
- "httpMethod": "PUT",
- "description": "Update attributes of an existing Workflow.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Workflow in question.",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the updated object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "request": {
- "required": true,
- "properties": {
- "workflow": {
- "$ref": "Workflow"
- }
- }
- },
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "delete": {
- "id": "arvados.workflows.delete",
- "path": "workflows/{uuid}",
- "httpMethod": "DELETE",
- "description": "Delete an existing Workflow.",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "The UUID of the Workflow in question.",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "list": {
- "id": "arvados.workflows.list",
- "path": "workflows",
- "httpMethod": "GET",
- "description": "List Workflows.\n\n The list
method returns a\n resource list of\n matching Workflows. For example:\n\n \n {\n \"kind\":\"arvados#workflowList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n
",
- "parameters": {
- "filters": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "where": {
- "type": "object",
- "required": false,
- "description": "",
- "location": "query"
- },
- "order": {
- "type": "array",
- "required": false,
- "description": "",
- "location": "query"
- },
- "select": {
- "type": "array",
- "description": "Attributes of each object to return in the response.",
- "required": false,
- "location": "query"
- },
- "distinct": {
- "type": "boolean",
- "required": false,
- "default": "false",
- "description": "",
- "location": "query"
- },
- "limit": {
- "type": "integer",
- "required": false,
- "default": "100",
- "description": "",
- "location": "query"
- },
- "offset": {
- "type": "integer",
- "required": false,
- "default": "0",
- "description": "",
- "location": "query"
- },
- "count": {
- "type": "string",
- "required": false,
- "default": "exact",
- "description": "",
- "location": "query"
- },
- "cluster_id": {
- "type": "string",
- "description": "List objects on a remote federated cluster instead of the current one.",
- "location": "query",
- "required": false
- },
- "bypass_federation": {
- "type": "boolean",
- "required": false,
- "description": "bypass federation behavior, list items from local instance database only",
- "location": "query"
- }
- },
- "response": {
- "$ref": "WorkflowList"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- },
- "show": {
- "id": "arvados.workflows.show",
- "path": "workflows/{uuid}",
- "httpMethod": "GET",
- "description": "show workflows",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- },
- "select": {
- "type": "array",
- "description": "Attributes of the object to return in the response.",
- "required": false,
- "location": "query"
- }
- },
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- "destroy": {
- "id": "arvados.workflows.destroy",
- "path": "workflows/{uuid}",
- "httpMethod": "DELETE",
- "description": "destroy workflows",
- "parameters": {
- "uuid": {
- "type": "string",
- "description": "",
- "required": true,
- "location": "path"
- }
- },
- "response": {
- "$ref": "Workflow"
- },
- "scopes": [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- },
- "configs": {
- "methods": {
- "get": {
- "id": "arvados.configs.get",
- "path": "config",
- "httpMethod": "GET",
- "description": "Get public config",
- "parameters": {},
- "parameterOrder": [],
- "response": {},
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- }
- }
- },
- "vocabularies": {
- "methods": {
- "get": {
- "id": "arvados.vocabularies.get",
- "path": "vocabulary",
- "httpMethod": "GET",
- "description": "Get vocabulary definition",
- "parameters": {},
- "parameterOrder": [],
- "response": {},
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- }
- }
- },
- "sys": {
- "methods": {
- "get": {
- "id": "arvados.sys.trash_sweep",
- "path": "sys/trash_sweep",
- "httpMethod": "POST",
- "description": "apply scheduled trash and delete operations",
- "parameters": {},
- "parameterOrder": [],
- "response": {},
- "scopes": [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
- }
- }
- }
- },
- "revision": "20231117",
- "schemas": {
- "ApiClientList": {
- "id": "ApiClientList",
- "description": "ApiClient list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#apiClientList.",
- "default": "arvados#apiClientList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of ApiClients.",
- "items": {
- "$ref": "ApiClient"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of ApiClients."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of ApiClients."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "ApiClient": {
- "id": "ApiClient",
- "description": "ApiClient",
- "type": "object",
- "uuidPrefix": "ozdt8",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "name": {
- "type": "string"
- },
- "url_prefix": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
- },
- "is_trusted": {
- "type": "boolean"
- }
- }
- },
- "ApiClientAuthorizationList": {
- "id": "ApiClientAuthorizationList",
- "description": "ApiClientAuthorization list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#apiClientAuthorizationList.",
- "default": "arvados#apiClientAuthorizationList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of ApiClientAuthorizations.",
- "items": {
- "$ref": "ApiClientAuthorization"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of ApiClientAuthorizations."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of ApiClientAuthorizations."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "ApiClientAuthorization": {
- "id": "ApiClientAuthorization",
- "description": "ApiClientAuthorization",
- "type": "object",
- "uuidPrefix": "gj3su",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "api_token": {
- "type": "string"
- },
- "api_client_id": {
- "type": "integer"
- },
- "user_id": {
- "type": "integer"
- },
- "created_by_ip_address": {
- "type": "string"
- },
- "last_used_by_ip_address": {
- "type": "string"
- },
- "last_used_at": {
- "type": "datetime"
- },
- "expires_at": {
- "type": "datetime"
- },
- "created_at": {
- "type": "datetime"
- },
- "default_owner_uuid": {
- "type": "string"
- },
- "scopes": {
- "type": "Array"
- }
- }
- },
- "AuthorizedKeyList": {
- "id": "AuthorizedKeyList",
- "description": "AuthorizedKey list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#authorizedKeyList.",
- "default": "arvados#authorizedKeyList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of AuthorizedKeys.",
- "items": {
- "$ref": "AuthorizedKey"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of AuthorizedKeys."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of AuthorizedKeys."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "AuthorizedKey": {
- "id": "AuthorizedKey",
- "description": "AuthorizedKey",
- "type": "object",
- "uuidPrefix": "fngyi",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "name": {
- "type": "string"
- },
- "key_type": {
- "type": "string"
- },
- "authorized_user_uuid": {
- "type": "string"
- },
- "public_key": {
- "type": "text"
- },
- "expires_at": {
- "type": "datetime"
- },
- "created_at": {
- "type": "datetime"
- }
- }
- },
- "CollectionList": {
- "id": "CollectionList",
- "description": "Collection list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#collectionList.",
- "default": "arvados#collectionList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of Collections.",
- "items": {
- "$ref": "Collection"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Collections."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Collections."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "Collection": {
- "id": "Collection",
- "description": "Collection",
- "type": "object",
- "uuidPrefix": "4zz18",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "portable_data_hash": {
- "type": "string"
- },
- "replication_desired": {
- "type": "integer"
- },
- "replication_confirmed_at": {
- "type": "datetime"
- },
- "replication_confirmed": {
- "type": "integer"
- },
- "manifest_text": {
- "type": "text"
- },
- "name": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "properties": {
- "type": "Hash"
- },
- "delete_at": {
- "type": "datetime"
- },
- "trash_at": {
- "type": "datetime"
- },
- "is_trashed": {
- "type": "boolean"
- },
- "storage_classes_desired": {
- "type": "Array"
- },
- "storage_classes_confirmed": {
- "type": "Array"
- },
- "storage_classes_confirmed_at": {
- "type": "datetime"
- },
- "current_version_uuid": {
- "type": "string"
- },
- "version": {
- "type": "integer"
- },
- "preserve_version": {
- "type": "boolean"
- },
- "file_count": {
- "type": "integer"
- },
- "file_size_total": {
- "type": "integer"
- }
- }
- },
- "ContainerList": {
- "id": "ContainerList",
- "description": "Container list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#containerList.",
- "default": "arvados#containerList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of Containers.",
- "items": {
- "$ref": "Container"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Containers."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Containers."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "Container": {
- "id": "Container",
- "description": "Container",
- "type": "object",
- "uuidPrefix": "dz642",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
- },
- "modified_at": {
- "type": "datetime"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "state": {
- "type": "string"
- },
- "started_at": {
- "type": "datetime"
- },
- "finished_at": {
- "type": "datetime"
- },
- "log": {
- "type": "string"
- },
- "environment": {
- "type": "Hash"
- },
- "cwd": {
- "type": "string"
- },
- "command": {
- "type": "Array"
- },
- "output_path": {
- "type": "string"
- },
- "mounts": {
- "type": "Hash"
- },
- "runtime_constraints": {
- "type": "Hash"
- },
- "output": {
- "type": "string"
- },
- "container_image": {
- "type": "string"
- },
- "progress": {
- "type": "float"
- },
- "priority": {
- "type": "integer"
- },
- "exit_code": {
- "type": "integer"
- },
- "auth_uuid": {
- "type": "string"
- },
- "locked_by_uuid": {
- "type": "string"
- },
- "scheduling_parameters": {
- "type": "Hash"
- },
- "runtime_status": {
- "type": "Hash"
- },
- "runtime_user_uuid": {
- "type": "text"
- },
- "runtime_auth_scopes": {
- "type": "Array"
- },
- "lock_count": {
- "type": "integer"
- },
- "gateway_address": {
- "type": "string"
- },
- "interactive_session_started": {
- "type": "boolean"
- },
- "output_storage_classes": {
- "type": "Array"
- },
- "output_properties": {
- "type": "Hash"
- },
- "cost": {
- "type": "float"
- },
- "subrequests_cost": {
- "type": "float"
- }
- }
- },
- "ContainerRequestList": {
- "id": "ContainerRequestList",
- "description": "ContainerRequest list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#containerRequestList.",
- "default": "arvados#containerRequestList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of ContainerRequests.",
- "items": {
- "$ref": "ContainerRequest"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of ContainerRequests."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of ContainerRequests."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "ContainerRequest": {
- "id": "ContainerRequest",
- "description": "ContainerRequest",
- "type": "object",
- "uuidPrefix": "xvhdp",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
- },
- "modified_at": {
- "type": "datetime"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "description": {
- "type": "text"
- },
- "properties": {
- "type": "Hash"
- },
- "state": {
- "type": "string"
- },
- "requesting_container_uuid": {
- "type": "string"
- },
- "container_uuid": {
- "type": "string"
- },
- "container_count_max": {
- "type": "integer"
- },
- "mounts": {
- "type": "Hash"
- },
- "runtime_constraints": {
- "type": "Hash"
- },
- "container_image": {
- "type": "string"
- },
- "environment": {
- "type": "Hash"
- },
- "cwd": {
- "type": "string"
- },
- "command": {
- "type": "Array"
- },
- "output_path": {
- "type": "string"
- },
- "priority": {
- "type": "integer"
- },
- "expires_at": {
- "type": "datetime"
- },
- "filters": {
- "type": "text"
- },
- "container_count": {
- "type": "integer"
- },
- "use_existing": {
- "type": "boolean"
- },
- "scheduling_parameters": {
- "type": "Hash"
- },
- "output_uuid": {
- "type": "string"
- },
- "log_uuid": {
- "type": "string"
- },
- "output_name": {
- "type": "string"
- },
- "output_ttl": {
- "type": "integer"
- },
- "output_storage_classes": {
- "type": "Array"
- },
- "output_properties": {
- "type": "Hash"
- },
- "cumulative_cost": {
- "type": "float"
- }
- }
- },
- "GroupList": {
- "id": "GroupList",
- "description": "Group list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#groupList.",
- "default": "arvados#groupList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of Groups.",
- "items": {
- "$ref": "Group"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Groups."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Groups."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "Group": {
- "id": "Group",
- "description": "Group",
- "type": "object",
- "uuidPrefix": "j7d0g",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "name": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "group_class": {
- "type": "string"
- },
- "trash_at": {
- "type": "datetime"
- },
- "is_trashed": {
- "type": "boolean"
- },
- "delete_at": {
- "type": "datetime"
- },
- "properties": {
- "type": "Hash"
- },
- "frozen_by_uuid": {
- "type": "string"
- }
- }
- },
- "HumanList": {
- "id": "HumanList",
- "description": "Human list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#humanList.",
- "default": "arvados#humanList"
- },
- "etag": {
- "type": "string",
- "description": "List version."
- },
- "items": {
- "type": "array",
- "description": "The list of Humans.",
- "items": {
- "$ref": "Human"
- }
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Humans."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Humans."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Human": {
- "id": "Human",
- "description": "Human",
- "type": "object",
- "uuidPrefix": "7a9it",
- "properties": {
- "uuid": {
- "type": "string"
- },
- "etag": {
- "type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "properties": {
- "type": "Hash"
- },
- "created_at": {
- "type": "datetime"
+ "sys": {
+ "methods": {
+ "get": {
+ "id": "arvados.sys.trash_sweep",
+ "path": "sys/trash_sweep",
+ "httpMethod": "POST",
+ "description": "Run scheduled data trash and sweep operations across this cluster's Keep services.",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
}
}
- },
- "JobList": {
- "id": "JobList",
- "description": "Job list",
+ }
+ },
+ "revision": "20250402",
+ "schemas": {
+ "ApiClientAuthorizationList": {
+ "id": "ApiClientAuthorizationList",
+ "description": "A list of ApiClientAuthorization objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#jobList.",
- "default": "arvados#jobList"
+ "description": "Object type. Always arvados#apiClientAuthorizationList.",
+ "default": "arvados#apiClientAuthorizationList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Jobs.",
+ "description": "An array of matching ApiClientAuthorization objects.",
"items": {
- "$ref": "Job"
+ "$ref": "ApiClientAuthorization"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Jobs."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Jobs."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Job": {
- "id": "Job",
- "description": "Job",
+ "ApiClientAuthorization": {
+ "id": "ApiClientAuthorization",
+ "description": "Arvados API client authorization token\n\nThis resource represents an API token a user may use to authenticate an\nArvados API request.",
"type": "object",
- "uuidPrefix": "8i9sb",
+ "uuidPrefix": "gj3su",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
- },
- "owner_uuid": {
- "type": "string"
- },
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
- "modified_at": {
- "type": "datetime"
- },
- "submit_id": {
- "type": "string"
- },
- "script": {
- "type": "string"
+ "description": "Object cache version."
},
- "script_version": {
+ "api_token": {
+ "description": "The secret token that can be used to authorize Arvados API requests.",
"type": "string"
},
- "script_parameters": {
- "type": "Hash"
- },
- "cancelled_by_client_uuid": {
+ "created_by_ip_address": {
+ "description": "The IP address of the client that created this token.",
"type": "string"
},
- "cancelled_by_user_uuid": {
+ "last_used_by_ip_address": {
+ "description": "The IP address of the client that last used this token.",
"type": "string"
},
- "cancelled_at": {
- "type": "datetime"
- },
- "started_at": {
+ "last_used_at": {
+ "description": "The last time this token was used to authorize a request. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "finished_at": {
+ "expires_at": {
+ "description": "The time after which this token is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "running": {
- "type": "boolean"
- },
- "success": {
- "type": "boolean"
- },
- "output": {
- "type": "string"
- },
"created_at": {
+ "description": "The time this API client authorization was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "is_locked_by_uuid": {
- "type": "string"
- },
- "log": {
- "type": "string"
- },
- "tasks_summary": {
- "type": "Hash"
- },
- "runtime_constraints": {
- "type": "Hash"
- },
- "nondeterministic": {
- "type": "boolean"
- },
- "repository": {
- "type": "string"
- },
- "supplied_script_version": {
- "type": "string"
- },
- "docker_image_locator": {
- "type": "string"
- },
- "priority": {
- "type": "integer"
- },
- "description": {
- "type": "string"
- },
- "state": {
- "type": "string"
- },
- "arvados_sdk_version": {
- "type": "string"
+ "scopes": {
+ "description": "An array of strings identifying HTTP methods and API paths this token is\nauthorized to use. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n\n",
+ "type": "Array"
},
- "components": {
- "type": "Hash"
+ "uuid": {
+ "type": "string",
+ "description": "This API client authorization's Arvados UUID, like `zzzzz-gj3su-12345abcde67890`."
}
}
},
- "JobTaskList": {
- "id": "JobTaskList",
- "description": "JobTask list",
+ "AuthorizedKeyList": {
+ "id": "AuthorizedKeyList",
+ "description": "A list of AuthorizedKey objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#jobTaskList.",
- "default": "arvados#jobTaskList"
+ "description": "Object type. Always arvados#authorizedKeyList.",
+ "default": "arvados#authorizedKeyList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of JobTasks.",
+ "description": "An array of matching AuthorizedKey objects.",
"items": {
- "$ref": "JobTask"
+ "$ref": "AuthorizedKey"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of JobTasks."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of JobTasks."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "JobTask": {
- "id": "JobTask",
- "description": "JobTask",
+ "AuthorizedKey": {
+ "id": "AuthorizedKey",
+ "description": "Arvados authorized public key\n\nThis resource represents a public key a user may use to authenticate themselves\nto services on the cluster. Its primary use today is to store SSH keys for\nvirtual machines (\"shell nodes\"). It may be extended to store other keys in\nthe future.",
"type": "object",
- "uuidPrefix": "ot0gb",
+ "uuidPrefix": "fngyi",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
- "owner_uuid": {
- "type": "string"
+ "uuid": {
+ "type": "string",
+ "description": "This authorized key's Arvados UUID, like `zzzzz-fngyi-12345abcde67890`."
},
- "modified_by_client_uuid": {
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this authorized key.",
"type": "string"
},
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this authorized key.",
"type": "string"
},
"modified_at": {
+ "description": "The time this authorized key was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "job_uuid": {
+ "name": {
+ "description": "The name of this authorized key assigned by a user.",
"type": "string"
},
- "sequence": {
- "type": "integer"
- },
- "parameters": {
- "type": "Hash"
- },
- "output": {
- "type": "text"
- },
- "progress": {
- "type": "float"
- },
- "success": {
- "type": "boolean"
- },
- "created_at": {
- "type": "datetime"
+ "key_type": {
+ "description": "A string identifying what type of service uses this key. Supported values are:\n\n * `\"SSH\"`\n\n",
+ "type": "string"
},
- "created_by_job_task_uuid": {
+ "authorized_user_uuid": {
+ "description": "The UUID of the Arvados user that is authorized by this key.",
"type": "string"
},
- "qsequence": {
- "type": "integer"
+ "public_key": {
+ "description": "The full public key, in the format referenced by `key_type`.",
+ "type": "text"
},
- "started_at": {
+ "expires_at": {
+ "description": "The time after which this key is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "finished_at": {
+ "created_at": {
+ "description": "The time this authorized key was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
}
}
},
- "KeepDiskList": {
- "id": "KeepDiskList",
- "description": "KeepDisk list",
+ "CollectionList": {
+ "id": "CollectionList",
+ "description": "A list of Collection objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#keepDiskList.",
- "default": "arvados#keepDiskList"
+ "description": "Object type. Always arvados#collectionList.",
+ "default": "arvados#collectionList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of KeepDisks.",
+ "description": "An array of matching Collection objects.",
"items": {
- "$ref": "KeepDisk"
+ "$ref": "Collection"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of KeepDisks."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of KeepDisks."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "KeepDisk": {
- "id": "KeepDisk",
- "description": "KeepDisk",
+ "Collection": {
+ "id": "Collection",
+ "description": "Arvados data collection\n\nA collection describes how a set of files is stored in data blocks in Keep,\nalong with associated metadata.",
"type": "object",
- "uuidPrefix": "penuu",
+ "uuidPrefix": "4zz18",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this collection.",
"type": "string"
},
- "modified_by_client_uuid": {
- "type": "string"
+ "created_at": {
+ "description": "The time this collection was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this collection.",
"type": "string"
},
"modified_at": {
+ "description": "The time this collection was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "node_uuid": {
- "type": "string"
- },
- "filesystem_uuid": {
+ "portable_data_hash": {
+ "description": "The portable data hash of this collection. This string provides a unique\nand stable reference to these contents.",
"type": "string"
},
- "bytes_total": {
- "type": "integer"
- },
- "bytes_free": {
+ "replication_desired": {
+ "description": "The number of copies that should be made for data in this collection.",
"type": "integer"
},
- "is_readable": {
- "type": "boolean"
- },
- "is_writable": {
- "type": "boolean"
- },
- "last_read_at": {
- "type": "datetime"
- },
- "last_write_at": {
- "type": "datetime"
- },
- "last_ping_at": {
+ "replication_confirmed_at": {
+ "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this collection. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "created_at": {
- "type": "datetime"
+ "replication_confirmed": {
+ "description": "The number of copies of data in this collection that the cluster has confirmed\nexist in storage.",
+ "type": "integer"
},
- "keep_service_uuid": {
- "type": "string"
- }
- }
- },
- "KeepServiceList": {
- "id": "KeepServiceList",
- "description": "KeepService list",
- "type": "object",
- "properties": {
- "kind": {
+ "uuid": {
"type": "string",
- "description": "Object type. Always arvados#keepServiceList.",
- "default": "arvados#keepServiceList"
+ "description": "This collection's Arvados UUID, like `zzzzz-4zz18-12345abcde67890`."
},
- "etag": {
- "type": "string",
- "description": "List version."
+ "manifest_text": {
+ "description": "The manifest text that describes how files are constructed from data blocks\nin this collection. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n",
+ "type": "text"
},
- "items": {
- "type": "array",
- "description": "The list of KeepServices.",
- "items": {
- "$ref": "KeepService"
- }
+ "name": {
+ "description": "The name of this collection assigned by a user.",
+ "type": "string"
},
- "next_link": {
- "type": "string",
- "description": "A link to the next page of KeepServices."
+ "description": {
+ "description": "A longer HTML description of this collection assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "string"
},
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of KeepServices."
+ "properties": {
+ "description": "A hash of arbitrary metadata for this collection.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
},
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "KeepService": {
- "id": "KeepService",
- "description": "KeepService",
- "type": "object",
- "uuidPrefix": "bi6l4",
- "properties": {
- "uuid": {
- "type": "string"
+ "delete_at": {
+ "description": "The time this collection will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
- "etag": {
- "type": "string",
- "description": "Object version."
+ "trash_at": {
+ "description": "The time this collection will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
- "owner_uuid": {
- "type": "string"
+ "is_trashed": {
+ "description": "A boolean flag to indicate whether or not this collection is trashed.",
+ "type": "boolean"
},
- "modified_by_client_uuid": {
- "type": "string"
+ "storage_classes_desired": {
+ "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this collection. Storage classes are configured by the cluster administrator.",
+ "type": "Array"
},
- "modified_by_user_uuid": {
- "type": "string"
+ "storage_classes_confirmed": {
+ "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this collection's data.",
+ "type": "Array"
},
- "modified_at": {
+ "storage_classes_confirmed_at": {
+ "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "service_host": {
+ "current_version_uuid": {
+ "description": "The UUID of the current version of this collection.",
"type": "string"
},
- "service_port": {
+ "version": {
+ "description": "An integer that counts which version of a collection this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n",
"type": "integer"
},
- "service_ssl_flag": {
+ "preserve_version": {
+ "description": "A boolean flag to indicate whether this specific version of this collection\nshould be persisted in cluster storage.",
"type": "boolean"
},
- "service_type": {
- "type": "string"
- },
- "created_at": {
- "type": "datetime"
+ "file_count": {
+ "description": "The number of files represented in this collection's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
},
- "read_only": {
- "type": "boolean"
+ "file_size_total": {
+ "description": "The total size in bytes of files represented in this collection's `manifest_text`.\nThis attribute is read-only.",
+ "type": "integer"
}
}
},
- "LinkList": {
- "id": "LinkList",
- "description": "Link list",
+ "ComputedPermissionList": {
+ "id": "ComputedPermissionList",
+ "description": "A list of ComputedPermission objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#linkList.",
- "default": "arvados#linkList"
+ "description": "Object type. Always arvados#computedPermissionList.",
+ "default": "arvados#computedPermissionList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Links.",
+ "description": "An array of matching ComputedPermission objects.",
"items": {
- "$ref": "Link"
+ "$ref": "ComputedPermission"
}
+ }
+ }
+ },
+ "ComputedPermission": {
+ "id": "ComputedPermission",
+ "description": "Arvados computed permission\n\nComputed permissions do not correspond directly to any Arvados resource, but\nprovide a simple way to query the entire graph of permissions granted to\nusers and groups.",
+ "type": "object",
+ "properties": {
+ "user_uuid": {
+ "description": "The UUID of the Arvados user who has this permission.",
+ "type": "string"
},
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Links."
+ "target_uuid": {
+ "description": "The UUID of the Arvados object the user has access to.",
+ "type": "string"
},
- "next_page_token": {
+ "perm_level": {
+ "description": "A string representing the user's level of access to the target object.\nPossible values are:\n\n * `\"can_read\"`\n * `\"can_write\"`\n * `\"can_manage\"`\n\n",
+ "type": "string"
+ }
+ }
+ },
+ "ContainerList": {
+ "id": "ContainerList",
+ "description": "A list of Container objects.",
+ "type": "object",
+ "properties": {
+ "kind": {
"type": "string",
- "description": "The page token for the next page of Links."
+ "description": "Object type. Always arvados#containerList.",
+ "default": "arvados#containerList"
},
- "selfLink": {
+ "etag": {
"type": "string",
- "description": "A link back to this list."
+ "description": "List cache version."
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of matching Container objects.",
+ "items": {
+ "$ref": "Container"
+ }
}
}
},
- "Link": {
- "id": "Link",
- "description": "Link",
+ "Container": {
+ "id": "Container",
+ "description": "Arvados container record\n\nA container represents compute work that has been or should be dispatched,\nalong with its results. A container can satisfy one or more container requests.",
"type": "object",
- "uuidPrefix": "o0j2j",
+ "uuidPrefix": "dz642",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This container's Arvados UUID, like `zzzzz-dz642-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this container.",
"type": "string"
},
"created_at": {
+ "description": "The time this container was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
+ "modified_at": {
+ "description": "The time this container was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this container.",
"type": "string"
},
- "modified_at": {
+ "state": {
+ "description": "A string representing the container's current execution status. Possible\nvalues are:\n\n * `\"Queued\"` --- This container has not been dispatched yet.\n * `\"Locked\"` --- A dispatcher has claimed this container in preparation to run it.\n * `\"Running\"` --- A dispatcher is running this container.\n * `\"Cancelled\"` --- Container execution has been cancelled by user request.\n * `\"Complete\"` --- A dispatcher ran this container to completion and recorded the results.\n\n",
+ "type": "string"
+ },
+ "started_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "tail_uuid": {
+ "finished_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "log": {
+ "description": "The portable data hash of the Arvados collection that contains this\ncontainer's logs.",
"type": "string"
},
- "link_class": {
+ "environment": {
+ "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.",
+ "type": "Hash"
+ },
+ "cwd": {
+ "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.",
"type": "string"
},
- "name": {
+ "command": {
+ "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.",
+ "type": "Array"
+ },
+ "output_path": {
+ "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.",
"type": "string"
},
- "head_uuid": {
+ "mounts": {
+ "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n",
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n",
+ "type": "Hash"
+ },
+ "output": {
+ "description": "The portable data hash of the Arvados collection that contains this\ncontainer's output file(s).",
"type": "string"
},
- "properties": {
+ "container_image": {
+ "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.",
+ "type": "string"
+ },
+ "progress": {
+ "description": "A float between 0.0 and 1.0 (inclusive) that represents the container's\nexecution progress. This attribute is not implemented yet.",
+ "type": "float"
+ },
+ "priority": {
+ "description": "An integer between 0 and 1000 (inclusive) that represents this container's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n",
+ "type": "integer"
+ },
+ "exit_code": {
+ "description": "An integer that records the Unix exit code of the `command` from a\nfinished container.",
+ "type": "integer"
+ },
+ "auth_uuid": {
+ "description": "The UUID of the Arvados API client authorization token that a dispatcher\nshould use to set up this container. This token is automatically created by\nArvados and this attribute automatically assigned unless a container is\ncreated with `runtime_token`.",
+ "type": "string"
+ },
+ "locked_by_uuid": {
+ "description": "The UUID of the Arvados API client authorization token that successfully\nlocked this container in preparation to execute it.",
+ "type": "string"
+ },
+ "scheduling_parameters": {
+ "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n",
+ "type": "Hash"
+ },
+ "runtime_status": {
+ "description": "A hash with status updates from a running container.\nRefer to the [runtime status reference][] for details.\n\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\n\n",
+ "type": "Hash"
+ },
+ "runtime_user_uuid": {
+ "description": "The UUID of the Arvados user associated with the API client authorization\ntoken used to run this container.",
+ "type": "text"
+ },
+ "runtime_auth_scopes": {
+ "description": "The `scopes` from the API client authorization token used to run this container.",
+ "type": "Array"
+ },
+ "lock_count": {
+ "description": "The number of times this container has been locked by a dispatcher. This\nmay be greater than 1 if a dispatcher locks a container but then execution is\ninterrupted for any reason.",
+ "type": "integer"
+ },
+ "gateway_address": {
+ "description": "A string with the address of the Arvados gateway server, in `HOST:PORT`\nformat. This is for internal use only.",
+ "type": "string"
+ },
+ "interactive_session_started": {
+ "description": "This flag is set true if any user starts an interactive shell inside the\nrunning container.",
+ "type": "boolean"
+ },
+ "output_storage_classes": {
+ "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container. Storage classes are configured by\nthe cluster administrator.",
+ "type": "Array"
+ },
+ "output_properties": {
+ "description": "A hash of arbitrary metadata to set on the output collection of this container.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
+ },
+ "cost": {
+ "description": "A float with the estimated cost of the cloud instance used to run this\ncontainer. The value is `0` if cost estimation is not available on this cluster.",
+ "type": "float"
+ },
+ "subrequests_cost": {
+ "description": "A float with the estimated cost of all cloud instances used to run this\ncontainer and all its subrequests. The value is `0` if cost estimation is not\navailable on this cluster.",
+ "type": "float"
+ },
+ "output_glob": {
+ "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n",
+ "type": "Array"
+ },
+ "service": {
+ "description": "A boolean flag. If set, it informs the system that this is a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.",
+ "type": "boolean"
+ },
+ "published_ports": {
+ "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
+ "type": "jsonb"
}
}
},
- "LogList": {
- "id": "LogList",
- "description": "Log list",
+ "ContainerRequestList": {
+ "id": "ContainerRequestList",
+ "description": "A list of ContainerRequest objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#logList.",
- "default": "arvados#logList"
+ "description": "Object type. Always arvados#containerRequestList.",
+ "default": "arvados#containerRequestList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Logs.",
+ "description": "An array of matching ContainerRequest objects.",
"items": {
- "$ref": "Log"
+ "$ref": "ContainerRequest"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Logs."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Logs."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Log": {
- "id": "Log",
- "description": "Log",
+ "ContainerRequest": {
+ "id": "ContainerRequest",
+ "description": "Arvados container request\n\nA container request represents a user's request that Arvados do some compute\nwork, along with full details about what work should be done. Arvados will\nattempt to fulfill the request by mapping it to a matching container record,\nrunning the work on demand if necessary.",
"type": "object",
- "uuidPrefix": "57u5n",
+ "uuidPrefix": "xvhdp",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
- "id": {
- "type": "integer"
+ "uuid": {
+ "type": "string",
+ "description": "This container request's Arvados UUID, like `zzzzz-xvhdp-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this container request.",
"type": "string"
},
- "modified_by_client_uuid": {
- "type": "string"
+ "created_at": {
+ "description": "The time this container request was created. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
- "modified_by_user_uuid": {
- "type": "string"
+ "modified_at": {
+ "description": "The time this container request was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
},
- "object_uuid": {
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this container request.",
"type": "string"
},
- "event_at": {
- "type": "datetime"
- },
- "event_type": {
+ "name": {
+ "description": "The name of this container request assigned by a user.",
"type": "string"
},
- "summary": {
+ "description": {
+ "description": "A longer HTML description of this container request assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
"type": "text"
},
"properties": {
+ "description": "A hash of arbitrary metadata for this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
},
- "created_at": {
- "type": "datetime"
+ "state": {
+ "description": "A string indicating where this container request is in its lifecycle.\nPossible values are:\n\n * `\"Uncommitted\"` --- The container request has not been finalized and can still be edited.\n * `\"Committed\"` --- The container request is ready to be fulfilled.\n * `\"Final\"` --- The container request has been fulfilled or cancelled.\n\n",
+ "type": "string"
},
- "modified_at": {
- "type": "datetime"
+ "requesting_container_uuid": {
+ "description": "The UUID of the container that created this container request, if any.",
+ "type": "string"
},
- "object_owner_uuid": {
+ "container_uuid": {
+ "description": "The UUID of the container that fulfills this container request, if any.",
"type": "string"
- }
- }
- },
- "NodeList": {
- "id": "NodeList",
- "description": "Node list",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "description": "Object type. Always arvados#nodeList.",
- "default": "arvados#nodeList"
},
- "etag": {
- "type": "string",
- "description": "List version."
+ "container_count_max": {
+ "description": "An integer that defines the maximum number of times Arvados should attempt\nto dispatch a container to fulfill this container request.",
+ "type": "integer"
},
- "items": {
- "type": "array",
- "description": "The list of Nodes.",
- "items": {
- "$ref": "Node"
- }
+ "mounts": {
+ "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n",
+ "type": "Hash"
},
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Nodes."
+ "runtime_constraints": {
+ "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n",
+ "type": "Hash"
},
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Nodes."
+ "container_image": {
+ "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.",
+ "type": "string"
},
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
- }
- }
- },
- "Node": {
- "id": "Node",
- "description": "Node",
- "type": "object",
- "uuidPrefix": "7ekkf",
- "properties": {
- "uuid": {
+ "environment": {
+ "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.",
+ "type": "Hash"
+ },
+ "cwd": {
+ "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.",
"type": "string"
},
- "etag": {
- "type": "string",
- "description": "Object version."
+ "command": {
+ "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.",
+ "type": "Array"
},
- "owner_uuid": {
+ "output_path": {
+ "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.",
"type": "string"
},
- "created_at": {
+ "priority": {
+ "description": "An integer between 0 and 1000 (inclusive) that represents this container request's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n",
+ "type": "integer"
+ },
+ "expires_at": {
+ "description": "The time after which this container request will no longer be fulfilled. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
+ "filters": {
+ "description": "Filters that limit which existing containers are eligible to satisfy this\ncontainer request. This attribute is not implemented yet and should be null.",
+ "type": "text"
+ },
+ "container_count": {
+ "description": "An integer that records how many times Arvados has attempted to dispatch\na container to fulfill this container request.",
+ "type": "integer"
+ },
+ "use_existing": {
+ "description": "A boolean flag. If set, Arvados may choose to satisfy this container\nrequest with an eligible container that already exists. Otherwise, Arvados will\nsatisfy this container request with a newer container, which will usually result\nin the container running again.",
+ "type": "boolean"
+ },
+ "scheduling_parameters": {
+ "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n",
+ "type": "Hash"
+ },
+ "output_uuid": {
+ "description": "The UUID of the Arvados collection that contains output for all the\ncontainer(s) that were dispatched to fulfill this container request.",
"type": "string"
},
- "modified_by_user_uuid": {
+ "log_uuid": {
+ "description": "The UUID of the Arvados collection that contains logs for all the\ncontainer(s) that were dispatched to fulfill this container request.",
"type": "string"
},
- "modified_at": {
- "type": "datetime"
+ "output_name": {
+ "description": "The name to set on the output collection of this container request.",
+ "type": "string"
},
- "slot_number": {
+ "output_ttl": {
+ "description": "An integer in seconds. If greater than zero, when an output collection is\ncreated for this container request, its `expires_at` attribute will be set this\nfar in the future.",
"type": "integer"
},
- "hostname": {
- "type": "string"
+ "output_storage_classes": {
+ "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container request. Storage classes are configured by\nthe cluster administrator.",
+ "type": "Array"
+ },
+ "output_properties": {
+ "description": "A hash of arbitrary metadata to set on the output collection of this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
+ "type": "Hash"
},
- "domain": {
- "type": "string"
+ "cumulative_cost": {
+ "description": "A float with the estimated cost of all cloud instances used to run\ncontainer(s) to fulfill this container request and their subrequests.\nThe value is `0` if cost estimation is not available on this cluster.",
+ "type": "float"
},
- "ip_address": {
- "type": "string"
+ "output_glob": {
+ "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n",
+ "type": "Array"
},
- "last_ping_at": {
- "type": "datetime"
+ "service": {
+ "description": "A boolean flag. If set, it informs the system that this request is for a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.",
+ "type": "boolean"
},
- "properties": {
+ "published_ports": {
+ "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
"type": "Hash"
- },
- "job_uuid": {
- "type": "string"
}
}
},
- "PipelineInstanceList": {
- "id": "PipelineInstanceList",
- "description": "PipelineInstance list",
+ "CredentialList": {
+ "id": "CredentialList",
+ "description": "A list of Credential objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#pipelineInstanceList.",
- "default": "arvados#pipelineInstanceList"
+ "description": "Object type. Always arvados#credentialList.",
+ "default": "arvados#credentialList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of PipelineInstances.",
+ "description": "An array of matching Credential objects.",
"items": {
- "$ref": "PipelineInstance"
+ "$ref": "Credential"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of PipelineInstances."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of PipelineInstances."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "PipelineInstance": {
- "id": "PipelineInstance",
- "description": "PipelineInstance",
+ "Credential": {
+ "id": "Credential",
+ "description": "Arvados credential.",
"type": "object",
- "uuidPrefix": "d1hrv",
+ "uuidPrefix": "oss07",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This credential's Arvados UUID, like `zzzzz-oss07-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this credential.",
"type": "string"
},
"created_at": {
+ "description": "The time this credential was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
- "modified_by_user_uuid": {
- "type": "string"
- },
"modified_at": {
+ "description": "The time this credential was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "pipeline_template_uuid": {
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this credential.",
"type": "string"
},
"name": {
+ "description": "The name of this credential assigned by a user.",
"type": "string"
},
- "components": {
- "type": "Hash"
- },
- "properties": {
- "type": "Hash"
+ "description": {
+ "description": "A longer HTML description of this credential assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "type": "text"
},
- "state": {
+ "credential_class": {
+ "description": "The type of credential being stored.",
"type": "string"
},
- "components_summary": {
- "type": "Hash"
+ "scopes": {
+ "description": "The resources the credential applies to or should be used with.",
+ "type": "Array"
},
- "started_at": {
- "type": "datetime"
+ "external_id": {
+ "description": "The non-secret external identifier associated with a credential, e.g. a username.",
+ "type": "string"
},
- "finished_at": {
+ "expires_at": {
+ "description": "Date after which the credential_secret field is no longer valid. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
- },
- "description": {
- "type": "string"
}
}
},
- "PipelineTemplateList": {
- "id": "PipelineTemplateList",
- "description": "PipelineTemplate list",
+ "GroupList": {
+ "id": "GroupList",
+ "description": "A list of Group objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#pipelineTemplateList.",
- "default": "arvados#pipelineTemplateList"
+ "description": "Object type. Always arvados#groupList.",
+ "default": "arvados#groupList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of PipelineTemplates.",
+ "description": "An array of matching Group objects.",
"items": {
- "$ref": "PipelineTemplate"
+ "$ref": "Group"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of PipelineTemplates."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of PipelineTemplates."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "PipelineTemplate": {
- "id": "PipelineTemplate",
- "description": "PipelineTemplate",
+ "Group": {
+ "id": "Group",
+ "description": "Arvados group\n\nGroups provide a way to organize users or data together, depending on their\n`group_class`.",
"type": "object",
- "uuidPrefix": "p5p6p",
+ "uuidPrefix": "j7d0g",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This group's Arvados UUID, like `zzzzz-j7d0g-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this group.",
"type": "string"
},
"created_at": {
+ "description": "The time this group was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this group.",
"type": "string"
},
"modified_at": {
+ "description": "The time this group was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"name": {
+ "description": "The name of this group assigned by a user.",
+ "type": "string"
+ },
+ "description": {
+ "description": "A longer HTML description of this group assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
"type": "string"
},
- "components": {
+ "group_class": {
+ "description": "A string representing which type of group this is. One of:\n\n * `\"filter\"` --- A virtual project whose contents are selected dynamically by filters.\n * `\"project\"` --- An Arvados project that can contain collections,\n container records, workflows, and subprojects.\n * `\"role\"` --- A group of users that can be granted permissions in Arvados.\n\n",
+ "type": "string"
+ },
+ "trash_at": {
+ "description": "The time this group will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "description": "A boolean flag to indicate whether or not this group is trashed.",
+ "type": "boolean"
+ },
+ "delete_at": {
+ "description": "The time this group will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "properties": {
+ "description": "A hash of arbitrary metadata for this group.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
},
- "description": {
+ "frozen_by_uuid": {
+ "description": "The UUID of the user that has frozen this group, if any. Frozen projects\ncannot have their contents or metadata changed, even by admins.",
"type": "string"
}
}
},
- "RepositoryList": {
- "id": "RepositoryList",
- "description": "Repository list",
+ "KeepServiceList": {
+ "id": "KeepServiceList",
+ "description": "A list of KeepService objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#repositoryList.",
- "default": "arvados#repositoryList"
+ "description": "Object type. Always arvados#keepServiceList.",
+ "default": "arvados#keepServiceList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Repositories.",
+ "description": "An array of matching KeepService objects.",
"items": {
- "$ref": "Repository"
+ "$ref": "KeepService"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Repositories."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Repositories."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Repository": {
- "id": "Repository",
- "description": "Repository",
+ "KeepService": {
+ "id": "KeepService",
+ "description": "Arvados Keep service\n\nThis resource stores information about a single Keep service in this Arvados\ncluster that clients can contact to retrieve and store data.",
"type": "object",
- "uuidPrefix": "s0uqq",
+ "uuidPrefix": "bi6l4",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
- "owner_uuid": {
- "type": "string"
+ "uuid": {
+ "type": "string",
+ "description": "This Keep service's Arvados UUID, like `zzzzz-bi6l4-12345abcde67890`."
},
- "modified_by_client_uuid": {
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this Keep service.",
"type": "string"
},
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this Keep service.",
"type": "string"
},
"modified_at": {
+ "description": "The time this Keep service was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "name": {
+ "service_host": {
+ "description": "The DNS hostname of this Keep service.",
+ "type": "string"
+ },
+ "service_port": {
+ "description": "The TCP port where this Keep service listens.",
+ "type": "integer"
+ },
+ "service_ssl_flag": {
+ "description": "A boolean flag that indicates whether or not this Keep service uses TLS/SSL.",
+ "type": "boolean"
+ },
+ "service_type": {
+ "description": "A string that describes which type of Keep service this is. One of:\n\n * `\"disk\"` --- A service that stores blocks on a local filesystem.\n * `\"blob\"` --- A service that stores blocks in a cloud object store.\n * `\"proxy\"` --- A keepproxy service.\n\n",
"type": "string"
},
"created_at": {
+ "description": "The time this Keep service was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
+ },
+ "read_only": {
+ "description": "A boolean flag. If set, this Keep service does not accept requests to write data\nblocks; it only serves blocks it already has.",
+ "type": "boolean"
}
}
},
- "SpecimenList": {
- "id": "SpecimenList",
- "description": "Specimen list",
+ "LinkList": {
+ "id": "LinkList",
+ "description": "A list of Link objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#specimenList.",
- "default": "arvados#specimenList"
+ "description": "Object type. Always arvados#linkList.",
+ "default": "arvados#linkList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Specimens.",
+ "description": "An array of matching Link objects.",
"items": {
- "$ref": "Specimen"
+ "$ref": "Link"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Specimens."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Specimens."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Specimen": {
- "id": "Specimen",
- "description": "Specimen",
+ "Link": {
+ "id": "Link",
+ "description": "Arvados object link\n\nA link provides a way to define relationships between Arvados objects,\ndepending on their `link_class`.",
"type": "object",
- "uuidPrefix": "j58dm",
+ "uuidPrefix": "o0j2j",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This link's Arvados UUID, like `zzzzz-o0j2j-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this link.",
"type": "string"
},
"created_at": {
+ "description": "The time this link was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this link.",
"type": "string"
},
"modified_at": {
+ "description": "The time this link was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "material": {
+ "tail_uuid": {
+ "description": "The UUID of the Arvados object that is the target of this relationship.",
+ "type": "string"
+ },
+ "link_class": {
+ "description": "A string that defines which kind of link this is. One of:\n\n * `\"permission\"` --- This link grants a permission to the user or group\n referenced by `head_uuid` to the object referenced by `tail_uuid`. The\n access level is set by `name`.\n * `\"star\"` --- This link represents a \"favorite.\" The user referenced\n by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\n * `\"tag\"` --- This link represents an unstructured metadata tag. The object\n referenced by `tail_uuid` has the tag defined by `name`.\n\n",
+ "type": "string"
+ },
+ "name": {
+ "description": "The primary value of this link. For `\"permission\"` links, this is one of\n`\"can_read\"`, `\"can_write\"`, or `\"can_manage\"`.",
+ "type": "string"
+ },
+ "head_uuid": {
+ "description": "The UUID of the Arvados object that is the originator or actor in this\nrelationship. May be null.",
"type": "string"
},
"properties": {
+ "description": "A hash of arbitrary metadata for this link.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
}
}
},
- "TraitList": {
- "id": "TraitList",
- "description": "Trait list",
+ "LogList": {
+ "id": "LogList",
+ "description": "A list of Log objects.",
"type": "object",
"properties": {
"kind": {
"type": "string",
- "description": "Object type. Always arvados#traitList.",
- "default": "arvados#traitList"
+ "description": "Object type. Always arvados#logList.",
+ "default": "arvados#logList"
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Traits.",
+ "description": "An array of matching Log objects.",
"items": {
- "$ref": "Trait"
+ "$ref": "Log"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Traits."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Traits."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
- "Trait": {
- "id": "Trait",
- "description": "Trait",
+ "Log": {
+ "id": "Log",
+ "description": "Arvados log record\n\nThis resource represents a single log record about an event in this Arvados\ncluster. Some individual Arvados services create log records. Users can also\ncreate custom logs.",
"type": "object",
- "uuidPrefix": "q1cn2",
+ "uuidPrefix": "57u5n",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "id": {
+ "description": "The serial number of this log. You can use this in filters to query logs\nthat were created before/after another.",
+ "type": "integer"
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This log's Arvados UUID, like `zzzzz-57u5n-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this log.",
"type": "string"
},
- "modified_by_client_uuid": {
+ "modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this log.",
"type": "string"
},
- "modified_by_user_uuid": {
+ "object_uuid": {
+ "description": "The UUID of the Arvados object that this log pertains to, such as a user\nor container.",
"type": "string"
},
- "modified_at": {
+ "event_at": {
+ "description": " The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "name": {
+ "event_type": {
+ "description": "An arbitrary short string that classifies what type of log this is.",
"type": "string"
},
+ "summary": {
+ "description": "A text string that describes the logged event. This is the primary\nattribute for simple logs.",
+ "type": "text"
+ },
"properties": {
+ "description": "A hash of arbitrary metadata for this log.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
},
"created_at": {
+ "description": "The time this log was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
+ },
+ "modified_at": {
+ "description": "The time this log was last updated. The string encodes a UTC date and time in ISO 8601 format.",
+ "type": "datetime"
+ },
+ "object_owner_uuid": {
+ "description": "The `owner_uuid` of the object referenced by `object_uuid` at the time\nthis log was created.",
+ "type": "string"
}
}
},
"UserList": {
"id": "UserList",
- "description": "User list",
+ "description": "A list of User objects.",
"type": "object",
"properties": {
"kind": {
@@ -10974,86 +5051,84 @@
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Users.",
+ "description": "An array of matching User objects.",
"items": {
"$ref": "User"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Users."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Users."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
"User": {
"id": "User",
- "description": "User",
+ "description": "Arvados user\n\nA user represents a single individual or role who may be authorized to access\nthis Arvados cluster.",
"type": "object",
"uuidPrefix": "tpzed",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This user's Arvados UUID, like `zzzzz-tpzed-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this user.",
"type": "string"
},
"created_at": {
+ "description": "The time this user was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this user.",
"type": "string"
},
"modified_at": {
+ "description": "The time this user was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"email": {
+ "description": "This user's email address.",
"type": "string"
},
"first_name": {
+ "description": "This user's first name.",
"type": "string"
},
"last_name": {
+ "description": "This user's last name.",
"type": "string"
},
"identity_url": {
+ "description": "A URL that represents this user with the cluster's identity provider.",
"type": "string"
},
"is_admin": {
+ "description": "A boolean flag. If set, this user is an administrator of the Arvados\ncluster, and automatically passes most permissions checks.",
"type": "boolean"
},
"prefs": {
+ "description": "A hash that stores cluster-wide user preferences.",
"type": "Hash"
},
"is_active": {
+ "description": "A boolean flag. If unset, this user is not permitted to make any Arvados\nAPI requests.",
"type": "boolean"
},
"username": {
+ "description": "This user's Unix username on virtual machines.",
"type": "string"
}
}
},
"UserAgreementList": {
"id": "UserAgreementList",
- "description": "UserAgreement list",
+ "description": "A list of UserAgreement objects.",
"type": "object",
"properties": {
"kind": {
@@ -11063,119 +5138,128 @@
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of UserAgreements.",
+ "description": "An array of matching UserAgreement objects.",
"items": {
"$ref": "UserAgreement"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of UserAgreements."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of UserAgreements."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
"UserAgreement": {
"id": "UserAgreement",
- "description": "UserAgreement",
+ "description": "Arvados user agreement\n\nA user agreement is a collection with terms that users must agree to before\nthey can use this Arvados cluster.",
"type": "object",
"uuidPrefix": "gv0sa",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this user agreement.",
"type": "string"
},
"created_at": {
+ "description": "The time this user agreement was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this user agreement.",
"type": "string"
},
"modified_at": {
+ "description": "The time this user agreement was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"portable_data_hash": {
+ "description": "The portable data hash of this user agreement. This string provides a unique\nand stable reference to these contents.",
"type": "string"
},
"replication_desired": {
+ "description": "The number of copies that should be made for data in this user agreement.",
"type": "integer"
},
"replication_confirmed_at": {
+ "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this user agreement. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"replication_confirmed": {
+ "description": "The number of copies of data in this user agreement that the cluster has confirmed\nexist in storage.",
"type": "integer"
},
+ "uuid": {
+ "type": "string",
+ "description": "This user agreement's Arvados UUID, like `zzzzz-gv0sa-12345abcde67890`."
+ },
"manifest_text": {
+ "description": "The manifest text that describes how files are constructed from data blocks\nin this user agreement. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n",
"type": "text"
},
"name": {
+ "description": "The name of this user agreement assigned by a user.",
"type": "string"
},
"description": {
+ "description": "A longer HTML description of this user agreement assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
"type": "string"
},
"properties": {
+ "description": "A hash of arbitrary metadata for this user agreement.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n",
"type": "Hash"
},
"delete_at": {
+ "description": "The time this user agreement will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"trash_at": {
+ "description": "The time this user agreement will be trashed. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"is_trashed": {
+ "description": "A boolean flag to indicate whether or not this user agreement is trashed.",
"type": "boolean"
},
"storage_classes_desired": {
+ "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this user agreement. Storage classes are configured by the cluster administrator.",
"type": "Array"
},
"storage_classes_confirmed": {
+ "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this user agreement's data.",
"type": "Array"
},
"storage_classes_confirmed_at": {
+ "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"current_version_uuid": {
+ "description": "The UUID of the current version of this user agreement.",
"type": "string"
},
"version": {
+ "description": "An integer that counts which version of a user agreement this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n",
"type": "integer"
},
"preserve_version": {
+ "description": "A boolean flag to indicate whether this specific version of this user agreement\nshould be persisted in cluster storage.",
"type": "boolean"
},
"file_count": {
+ "description": "The number of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.",
"type": "integer"
},
"file_size_total": {
+ "description": "The total size in bytes of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.",
"type": "integer"
}
}
},
"VirtualMachineList": {
"id": "VirtualMachineList",
- "description": "VirtualMachine list",
+ "description": "A list of VirtualMachine objects.",
"type": "object",
"properties": {
"kind": {
@@ -11185,65 +5269,56 @@
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of VirtualMachines.",
+ "description": "An array of matching VirtualMachine objects.",
"items": {
"$ref": "VirtualMachine"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of VirtualMachines."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of VirtualMachines."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
"VirtualMachine": {
"id": "VirtualMachine",
- "description": "VirtualMachine",
+ "description": "Arvados virtual machine (\"shell node\")\n\nThis resource stores information about a virtual machine or \"shell node\"\nhosted on this Arvados cluster where users can log in and use preconfigured\nArvados client tools.",
"type": "object",
"uuidPrefix": "2x53u",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
},
- "owner_uuid": {
- "type": "string"
+ "uuid": {
+ "type": "string",
+ "description": "This virtual machine's Arvados UUID, like `zzzzz-2x53u-12345abcde67890`."
},
- "modified_by_client_uuid": {
+ "owner_uuid": {
+ "description": "The UUID of the user or group that owns this virtual machine.",
"type": "string"
},
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this virtual machine.",
"type": "string"
},
"modified_at": {
+ "description": "The time this virtual machine was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"hostname": {
+ "description": "The DNS hostname where users should access this virtual machine.",
"type": "string"
},
"created_at": {
+ "description": "The time this virtual machine was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
}
}
},
"WorkflowList": {
"id": "WorkflowList",
- "description": "Workflow list",
+ "description": "A list of Workflow objects.",
"type": "object",
"properties": {
"kind": {
@@ -11253,65 +5328,62 @@
},
"etag": {
"type": "string",
- "description": "List version."
+ "description": "List cache version."
},
"items": {
"type": "array",
- "description": "The list of Workflows.",
+ "description": "An array of matching Workflow objects.",
"items": {
"$ref": "Workflow"
}
- },
- "next_link": {
- "type": "string",
- "description": "A link to the next page of Workflows."
- },
- "next_page_token": {
- "type": "string",
- "description": "The page token for the next page of Workflows."
- },
- "selfLink": {
- "type": "string",
- "description": "A link back to this list."
}
}
},
"Workflow": {
"id": "Workflow",
- "description": "Workflow",
+ "description": "Arvados workflow\n\nA workflow contains workflow definition source code that Arvados can execute\nalong with associated metadata for users.",
"type": "object",
"uuidPrefix": "7fd4e",
"properties": {
- "uuid": {
- "type": "string"
- },
"etag": {
"type": "string",
- "description": "Object version."
+ "description": "Object cache version."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "This workflow's Arvados UUID, like `zzzzz-7fd4e-12345abcde67890`."
},
"owner_uuid": {
+ "description": "The UUID of the user or group that owns this workflow.",
"type": "string"
},
"created_at": {
+ "description": "The time this workflow was created. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
"modified_at": {
+ "description": "The time this workflow was last updated. The string encodes a UTC date and time in ISO 8601 format.",
"type": "datetime"
},
- "modified_by_client_uuid": {
- "type": "string"
- },
"modified_by_user_uuid": {
+ "description": "The UUID of the user that last updated this workflow.",
"type": "string"
},
"name": {
+ "description": "The name of this workflow assigned by a user.",
"type": "string"
},
"description": {
+ "description": "A longer HTML description of this workflow assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
"type": "text"
},
"definition": {
+ "description": "A string with the CWL source of this workflow.",
"type": "text"
+ },
+ "collection_uuid": {
+ "description": "The collection this workflow is linked to, containing the definition of the workflow.",
+ "type": "string"
}
}
}
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
index 83f658201c..6b5427970c 100644
--- a/sdk/python/arvados/__init__.py
+++ b/sdk/python/arvados/__init__.py
@@ -6,8 +6,16 @@
This module provides the entire Python SDK for Arvados. The most useful modules
include:
-* arvados.api - After you `import arvados`, you can call `arvados.api` as a
- shortcut to the client constructor function `arvados.api.api`.
+* arvados.api - This module provides the `arvados.api.api` function to
+ construct an Arvados REST API client, as well as other classes and functions
+ that support it. You can call the `arvados.api` module just like a function
+ as a shortcut for calling `arvados.api.api`.
+
+* arvados.api_resources - The methods on an Arvados REST API client are
+ generated dynamically at runtime. This module documents those methods and
+ return values for the current version of Arvados. This module does not
+ implement anything so you don't need to import it, but it's a helpful
+ reference to understand how to use the Arvados REST API client.
* arvados.collection - The `arvados.collection.Collection` class provides a
high-level interface to read and write collections. It coordinates sending
@@ -28,13 +36,17 @@ from collections import UserDict
from . import api, errors, util
from .api import api_from_config, http_cache
-from .collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
+from .collection import CollectionReader
from arvados.keep import *
-from arvados.stream import *
-from .arvfile import StreamFileReader
from .logging import log_format, log_date_format, log_handler
from .retry import RetryLoop
+# Backwards compatibility shims: these modules used to get pulled in after
+# `import arvados` with previous versions of the SDK. We must keep the names
+# accessible even though there's no longer any functional need for them.
+from . import cache
+from . import safeapi
+
# Previous versions of the PySDK used to say `from .api import api`. This
# made it convenient to call the API client constructor, but difficult to
# access the rest of the `arvados.api` module. The magic below fixes that
@@ -55,136 +67,3 @@ logger = stdliblog.getLogger('arvados')
logger.addHandler(log_handler)
logger.setLevel(stdliblog.DEBUG if config.get('ARVADOS_DEBUG')
else stdliblog.WARNING)
-
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def task_set_output(self, s, num_retries=5):
- for tries_left in RetryLoop(num_retries=num_retries, backoff_start=0):
- try:
- return api('v1').job_tasks().update(
- uuid=self['uuid'],
- body={
- 'output':s,
- 'success':True,
- 'progress':1.0
- }).execute()
- except errors.ApiError as error:
- if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
- logger.debug("task_set_output: job_tasks().update() raised {}, retrying with {} tries left".format(repr(error),tries_left))
- else:
- raise
-
-_current_task = None
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def current_task(num_retries=5):
- global _current_task
- if _current_task:
- return _current_task
-
- for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
- try:
- task = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
- task = UserDict(task)
- task.set_output = types.MethodType(task_set_output, task)
- task.tmpdir = os.environ['TASK_WORK']
- _current_task = task
- return task
- except errors.ApiError as error:
- if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
- logger.debug("current_task: job_tasks().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
- else:
- raise
-
-_current_job = None
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def current_job(num_retries=5):
- global _current_job
- if _current_job:
- return _current_job
-
- for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
- try:
- job = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
- job = UserDict(job)
- job.tmpdir = os.environ['JOB_WORK']
- _current_job = job
- return job
- except errors.ApiError as error:
- if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
- logger.debug("current_job: jobs().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
- else:
- raise
-
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def getjobparam(*args):
- return current_job()['script_parameters'].get(*args)
-
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def get_job_param_mount(*args):
- return os.path.join(os.environ['TASK_KEEPMOUNT'], current_job()['script_parameters'].get(*args))
-
-@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
-def get_task_param_mount(*args):
- return os.path.join(os.environ['TASK_KEEPMOUNT'], current_task()['parameters'].get(*args))
-
-class JobTask(object):
- @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
- def __init__(self, parameters=dict(), runtime_constraints=dict()):
- print("init jobtask %s %s" % (parameters, runtime_constraints))
-
-class job_setup(object):
- @staticmethod
- @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
- def one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=False, api_client=None):
- if if_sequence != current_task()['sequence']:
- return
-
- if not api_client:
- api_client = api('v1')
-
- job_input = current_job()['script_parameters']['input']
- cr = CollectionReader(job_input, api_client=api_client)
- cr.normalize()
- for s in cr.all_streams():
- for f in s.all_files():
- if input_as_path:
- task_input = os.path.join(job_input, s.name(), f.name())
- else:
- task_input = f.as_manifest()
- new_task_attrs = {
- 'job_uuid': current_job()['uuid'],
- 'created_by_job_task_uuid': current_task()['uuid'],
- 'sequence': if_sequence + 1,
- 'parameters': {
- 'input':task_input
- }
- }
- api_client.job_tasks().create(body=new_task_attrs).execute()
- if and_end_task:
- api_client.job_tasks().update(uuid=current_task()['uuid'],
- body={'success':True}
- ).execute()
- exit(0)
-
- @staticmethod
- @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
- def one_task_per_input_stream(if_sequence=0, and_end_task=True):
- if if_sequence != current_task()['sequence']:
- return
- job_input = current_job()['script_parameters']['input']
- cr = CollectionReader(job_input)
- for s in cr.all_streams():
- task_input = s.tokens()
- new_task_attrs = {
- 'job_uuid': current_job()['uuid'],
- 'created_by_job_task_uuid': current_task()['uuid'],
- 'sequence': if_sequence + 1,
- 'parameters': {
- 'input':task_input
- }
- }
- api('v1').job_tasks().create(body=new_task_attrs).execute()
- if and_end_task:
- api('v1').job_tasks().update(uuid=current_task()['uuid'],
- body={'success':True}
- ).execute()
- exit(0)
diff --git a/sdk/python/arvados/_internal/__init__.py b/sdk/python/arvados/_internal/__init__.py
new file mode 100644
index 0000000000..1a2f0a7323
--- /dev/null
+++ b/sdk/python/arvados/_internal/__init__.py
@@ -0,0 +1,114 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+"""Arvados internal utilities
+
+Everything in `arvados._internal` is support code for the Arvados Python SDK
+and tools. Nothing in this module is intended to be part of the public-facing
+SDK API. Classes and functions in this module may be changed or removed at any
+time.
+"""
+
+import functools
+import operator
+import re
+import time
+import warnings
+
+import typing as t
+
+HT = t.TypeVar('HT', bound=t.Hashable)
+
+class Timer:
+ def __init__(self, verbose=False):
+ self.verbose = verbose
+
+ def __enter__(self):
+ self.start = time.time()
+ return self
+
+ def __exit__(self, *args):
+ self.end = time.time()
+ self.secs = self.end - self.start
+ self.msecs = self.secs * 1000 # millisecs
+ if self.verbose:
+ print('elapsed time: %f ms' % self.msecs)
+
+
+def deprecated(version=None, preferred=None):
+ """Mark a callable as deprecated in the SDK
+
+ This will wrap the callable to emit as a DeprecationWarning
+ and add a deprecation notice to its docstring.
+
+ If the following arguments are given, they'll be included in the
+ notices:
+
+ * preferred: str | None --- The name of an alternative that users should
+ use instead.
+
+ * version: str | None --- The version of Arvados when the callable is
+ scheduled to be removed.
+ """
+ if version is None:
+ version = ''
+ else:
+ version = f' and scheduled to be removed in Arvados {version}'
+ if preferred is None:
+ preferred = ''
+ else:
+ preferred = f' Prefer {preferred} instead.'
+ def deprecated_decorator(func):
+ fullname = f'{func.__module__}.{func.__qualname__}'
+ parent, _, name = fullname.rpartition('.')
+ if name == '__init__':
+ fullname = parent
+ warning_msg = f'{fullname} is deprecated{version}.{preferred}'
+ @functools.wraps(func)
+ def deprecated_wrapper(*args, **kwargs):
+ warnings.warn(warning_msg, DeprecationWarning, 2)
+ return func(*args, **kwargs)
+ # Get func's docstring without any trailing newline or empty lines.
+ func_doc = re.sub(r'\n\s*$', '', func.__doc__ or '')
+ match = re.search(r'\n([ \t]+)\S', func_doc)
+ indent = '' if match is None else match.group(1)
+ warning_doc = f'\n\n{indent}.. WARNING:: Deprecated\n{indent} {warning_msg}'
+ # Make the deprecation notice the second "paragraph" of the
+ # docstring if possible. Otherwise append it.
+ docstring, count = re.subn(
+ rf'\n[ \t]*\n{indent}',
+ f'{warning_doc}\n\n{indent}',
+ func_doc,
+ count=1,
+ )
+ if not count:
+ docstring = f'{func_doc.lstrip()}{warning_doc}'
+ deprecated_wrapper.__doc__ = docstring
+ return deprecated_wrapper
+ return deprecated_decorator
+
+
+def parse_seq(
+ s: str,
+ split: t.Callable[[str], t.Iterable[str]]=operator.methodcaller('split', ','),
+ clean: t.Callable[[str], str]=operator.methodcaller('strip'),
+ check: t.Callable[[str], bool]=bool,
+) -> t.Iterator[str]:
+ """Split, clean, and filter a string into multiple items
+
+ The default arguments split on commas, strip substrings, and skip empty
+ items.
+ """
+ return (word for substr in split(s) if check(word := clean(substr)))
+
+
+def uniq(it: t.Iterable[HT]) -> t.Iterator[HT]:
+ """Yield only unique items from an iterable
+
+ The items must be hashable.
+ """
+ seen = set()
+ for item in it:
+ if item not in seen:
+ seen.add(item)
+ yield item
diff --git a/sdk/python/arvados/_internal/basedirs.py b/sdk/python/arvados/_internal/basedirs.py
new file mode 100644
index 0000000000..91546fcd36
--- /dev/null
+++ b/sdk/python/arvados/_internal/basedirs.py
@@ -0,0 +1,188 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+"""Base directories utility module
+
+This module provides a set of classes useful to search and manipulate base
+directory defined by systemd and the XDG specification. Most users will just
+instantiate and use `BaseDirectories`.
+"""
+
+import dataclasses
+import enum
+import itertools
+import logging
+import os
+import shlex
+import stat
+
+from pathlib import Path, PurePath
+from typing import (
+ Iterator,
+ Mapping,
+ Optional,
+ Union,
+)
+
+logger = logging.getLogger('arvados')
+
+@dataclasses.dataclass
+class BaseDirectorySpec:
+ """Parse base directories
+
+ A BaseDirectorySpec defines all the environment variable keys and defaults
+ related to a set of base directories (cache, config, state, etc.). It
+ provides pure methods to parse environment settings into valid paths.
+ """
+ systemd_key: str
+ xdg_home_key: str
+ xdg_home_default: PurePath
+ xdg_dirs_key: Optional[str] = None
+ xdg_dirs_default: str = ''
+
+ @staticmethod
+ def _abspath_from_env(env: Mapping[str, str], key: str) -> Optional[Path]:
+ try:
+ path = Path(env[key])
+ except (KeyError, ValueError):
+ ok = False
+ else:
+ ok = path.is_absolute()
+ return path if ok else None
+
+ @staticmethod
+ def _iter_abspaths(value: str) -> Iterator[Path]:
+ for path_s in value.split(':'):
+ path = Path(path_s)
+ if path.is_absolute():
+ yield path
+
+ def iter_systemd(self, env: Mapping[str, str]) -> Iterator[Path]:
+ return self._iter_abspaths(env.get(self.systemd_key, ''))
+
+ def iter_xdg(self, env: Mapping[str, str], subdir: PurePath) -> Iterator[Path]:
+ yield self.xdg_home(env, subdir)
+ if self.xdg_dirs_key is not None:
+ for path in self._iter_abspaths(env.get(self.xdg_dirs_key) or self.xdg_dirs_default):
+ yield path / subdir
+
+ def xdg_home(self, env: Mapping[str, str], subdir: PurePath) -> Path:
+ return (
+ self._abspath_from_env(env, self.xdg_home_key)
+ or self.xdg_home_default_path(env)
+ ) / subdir
+
+ def xdg_home_default_path(self, env: Mapping[str, str]) -> Path:
+ return (self._abspath_from_env(env, 'HOME') or Path.home()) / self.xdg_home_default
+
+ def xdg_home_is_customized(self, env: Mapping[str, str]) -> bool:
+ xdg_home = self._abspath_from_env(env, self.xdg_home_key)
+ return xdg_home is not None and xdg_home != self.xdg_home_default_path(env)
+
+
+class BaseDirectorySpecs(enum.Enum):
+ """Base directory specifications
+
+ This enum provides easy access to the standard base directory settings.
+ """
+ CACHE = BaseDirectorySpec(
+ 'CACHE_DIRECTORY',
+ 'XDG_CACHE_HOME',
+ PurePath('.cache'),
+ )
+ CONFIG = BaseDirectorySpec(
+ 'CONFIGURATION_DIRECTORY',
+ 'XDG_CONFIG_HOME',
+ PurePath('.config'),
+ 'XDG_CONFIG_DIRS',
+ '/etc/xdg',
+ )
+ STATE = BaseDirectorySpec(
+ 'STATE_DIRECTORY',
+ 'XDG_STATE_HOME',
+ PurePath('.local', 'state'),
+ )
+
+
+class BaseDirectories:
+ """Resolve paths from a base directory spec
+
+ Given a BaseDirectorySpec, this class provides stateful methods to find
+ existing files and return the most-preferred directory for writing.
+ """
+ _STORE_MODE = stat.S_IFDIR | stat.S_IWUSR
+
+ def __init__(
+ self,
+ spec: Union[BaseDirectorySpec, BaseDirectorySpecs, str],
+ env: Mapping[str, str]=os.environ,
+ xdg_subdir: Union[os.PathLike, str]='arvados',
+ ) -> None:
+ if isinstance(spec, str):
+ spec = BaseDirectorySpecs[spec].value
+ elif isinstance(spec, BaseDirectorySpecs):
+ spec = spec.value
+ self._spec = spec
+ self._env = env
+ self._xdg_subdir = PurePath(xdg_subdir)
+
+ def search_paths(self) -> Iterator[Path]:
+ return itertools.chain(
+ self._spec.iter_systemd(self._env),
+ self._spec.iter_xdg(self._env, self._xdg_subdir))
+
+ def search(self, name: str) -> Iterator[Path]:
+ any_found = False
+ for search_path in self.search_paths():
+ path = search_path / name
+ if path.exists():
+ yield path
+ any_found = True
+ # The rest of this function is dedicated to warning the user if they
+ # have a custom XDG_*_HOME value that prevented the search from
+ # succeeding. This should be rare.
+ if any_found or not self._spec.xdg_home_is_customized(self._env):
+ return
+ default_home = self._spec.xdg_home_default_path(self._env)
+ default_path = Path(self._xdg_subdir / name)
+ if not (default_home / default_path).exists():
+ return
+ if self._spec.xdg_dirs_key is None:
+ suggest_key = self._spec.xdg_home_key
+ suggest_value = default_home
+ else:
+ suggest_key = self._spec.xdg_dirs_key
+ cur_value = self._env.get(suggest_key, '')
+ value_sep = ':' if cur_value else ''
+ suggest_value = f'{cur_value}{value_sep}{default_home}'
+ logger.warning(
+ "\
+%s was not found under your configured $%s (%s), \
+but does exist at the default location (%s) - \
+consider running this program with the environment setting %s=%s\
+",
+ default_path,
+ self._spec.xdg_home_key,
+ self._spec.xdg_home(self._env, ''),
+ default_home,
+ suggest_key,
+ shlex.quote(suggest_value),
+ )
+
+ def storage_path(
+ self,
+ subdir: Union[str, os.PathLike]=PurePath(),
+ mode: int=0o700,
+ ) -> Path:
+ for path in self._spec.iter_systemd(self._env):
+ try:
+ mode = path.stat().st_mode
+ except OSError:
+ continue
+ if (mode & self._STORE_MODE) == self._STORE_MODE:
+ break
+ else:
+ path = self._spec.xdg_home(self._env, self._xdg_subdir)
+ path /= subdir
+ path.mkdir(parents=True, exist_ok=True, mode=mode)
+ return path
diff --git a/sdk/python/arvados/diskcache.py b/sdk/python/arvados/_internal/diskcache.py
similarity index 100%
rename from sdk/python/arvados/diskcache.py
rename to sdk/python/arvados/_internal/diskcache.py
diff --git a/sdk/python/arvados/_internal/downloaderbase.py b/sdk/python/arvados/_internal/downloaderbase.py
new file mode 100644
index 0000000000..19334a80f2
--- /dev/null
+++ b/sdk/python/arvados/_internal/downloaderbase.py
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import abc
+
+class DownloaderBase(abc.ABC):
+ def __init__(self):
+ self.collection = None
+ self.target = None
+ self.name = None
+
+ @abc.abstractmethod
+ def head(self, url):
+ ...
+
+ @abc.abstractmethod
+ def download(self, url, headers):
+ ...
diff --git a/sdk/python/arvados/_internal/http_to_keep.py b/sdk/python/arvados/_internal/http_to_keep.py
new file mode 100644
index 0000000000..28f0adba7a
--- /dev/null
+++ b/sdk/python/arvados/_internal/http_to_keep.py
@@ -0,0 +1,187 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import datetime
+import logging
+import re
+import time
+import urllib.parse
+
+import pycurl
+
+import arvados
+import arvados.collection
+import arvados._internal
+
+from .downloaderbase import DownloaderBase
+from .pycurl import PyCurlHelper
+from .to_keep_util import (Response, url_to_keep, generic_check_cached_url)
+
+logger = logging.getLogger('arvados.http_import')
+
+class _Downloader(DownloaderBase, PyCurlHelper):
+ # Wait up to 60 seconds for connection
+ # How long it can be in "low bandwidth" state before it gives up
+ # Low bandwidth threshold is 32 KiB/s
+ DOWNLOADER_TIMEOUT = (60, 300, 32768)
+
+ def __init__(self, apiclient):
+ DownloaderBase.__init__(self)
+ PyCurlHelper.__init__(self, title_case_headers=True)
+ self.curl = pycurl.Curl()
+ self.curl.setopt(pycurl.NOSIGNAL, 1)
+ self.curl.setopt(pycurl.OPENSOCKETFUNCTION,
+ lambda *args, **kwargs: self._socket_open(*args, **kwargs))
+ self.apiclient = apiclient
+
+ def head(self, url):
+ get_headers = {'Accept': 'application/octet-stream'}
+ self._headers = {}
+
+ self.curl.setopt(pycurl.URL, url.encode('utf-8'))
+ self.curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in get_headers.items()])
+
+ self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
+ self.curl.setopt(pycurl.NOBODY, True)
+ self.curl.setopt(pycurl.FOLLOWLOCATION, True)
+
+ self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, True)
+
+ try:
+ self.curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ finally:
+ if self._socket:
+ self._socket.close()
+ self._socket = None
+
+ return Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
+
+ def download(self, url, headers):
+ self.count = 0
+ self.start = time.time()
+ self.checkpoint = self.start
+ self._headers = {}
+ self._first_chunk = True
+ self.collection = None
+ self.parsedurl = urllib.parse.urlparse(url)
+
+ get_headers = {'Accept': 'application/octet-stream'}
+ get_headers.update(headers)
+
+ self.curl.setopt(pycurl.URL, url.encode('utf-8'))
+ self.curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in get_headers.items()])
+
+ self.curl.setopt(pycurl.WRITEFUNCTION, self.body_write)
+ self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+
+ self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
+ self.curl.setopt(pycurl.HTTPGET, True)
+ self.curl.setopt(pycurl.FOLLOWLOCATION, True)
+
+ self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, False)
+
+ try:
+ self.curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ finally:
+ if self._socket:
+ self._socket.close()
+ self._socket = None
+
+ return Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
+
+ def headers_received(self):
+ self.collection = arvados.collection.Collection(api_client=self.apiclient)
+
+ if "Content-Length" in self._headers:
+ self.contentlength = int(self._headers["Content-Length"])
+ logger.info("File size is %s bytes", self.contentlength)
+ else:
+ self.contentlength = None
+
+ if self._headers.get("Content-Disposition"):
+ grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))',
+ self._headers["Content-Disposition"])
+ if grp.group(2):
+ self.name = grp.group(2)
+ else:
+ self.name = grp.group(4)
+ else:
+ self.name = self.parsedurl.path.split("/")[-1]
+
+ # Can't call curl.getinfo(pycurl.RESPONSE_CODE) until
+ # perform() is done but we need to know the status before that
+ # so we have to parse the status line ourselves.
+ mt = re.match(r'^HTTP\/(\d(\.\d)?) ([1-5]\d\d) ([^\r\n\x00-\x08\x0b\x0c\x0e-\x1f\x7f]*)\r\n$', self._headers["x-status-line"])
+ code = int(mt.group(3))
+
+ if not self.name:
+ logger.error("Cannot determine filename from URL or headers")
+ return
+
+ if code == 200:
+ self.target = self.collection.open(self.name, "wb")
+
+ def body_write(self, chunk):
+ if self._first_chunk:
+ self.headers_received()
+ self._first_chunk = False
+
+ self.count += len(chunk)
+
+ if self.target is None:
+ # "If this number is not equal to the size of the byte
+ # string, this signifies an error and libcurl will abort
+ # the request."
+ return 0
+
+ self.target.write(chunk)
+ loopnow = time.time()
+ if (loopnow - self.checkpoint) < 20:
+ return
+
+ bps = self.count / (loopnow - self.start)
+ if self.contentlength is not None:
+ logger.info("%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left",
+ ((self.count * 100) / self.contentlength),
+ (bps / (1024.0*1024.0)),
+ ((self.contentlength-self.count) // bps))
+ else:
+ logger.info("%d downloaded, %6.2f MiB/s", self.count, (bps / (1024.0*1024.0)))
+ self.checkpoint = loopnow
+
+
+def check_cached_url(api, project_uuid, url, etags,
+ utcnow=datetime.datetime.utcnow,
+ varying_url_params="",
+ prefer_cached_downloads=False):
+ return generic_check_cached_url(api, _Downloader(api),
+ project_uuid, url, etags,
+ utcnow=utcnow,
+ varying_url_params=varying_url_params,
+ prefer_cached_downloads=prefer_cached_downloads)
+
+
+def http_to_keep(api, project_uuid, url,
+ utcnow=datetime.datetime.utcnow, varying_url_params="",
+ prefer_cached_downloads=False):
+ """Download a file over HTTP and upload it to keep, with HTTP headers as metadata.
+
+ Before downloading the URL, checks to see if the URL already
+ exists in Keep and applies HTTP caching policy, the
+ varying_url_params and prefer_cached_downloads flags in order to
+ decide whether to use the version in Keep or re-download it.
+ """
+
+ return url_to_keep(api, _Downloader(api),
+ project_uuid, url,
+ utcnow,
+ varying_url_params,
+ prefer_cached_downloads)
diff --git a/sdk/python/arvados/_pycurlhelper.py b/sdk/python/arvados/_internal/pycurl.py
similarity index 100%
rename from sdk/python/arvados/_pycurlhelper.py
rename to sdk/python/arvados/_internal/pycurl.py
diff --git a/sdk/python/arvados/_internal/report_template.py b/sdk/python/arvados/_internal/report_template.py
new file mode 100644
index 0000000000..2c93e1cbc6
--- /dev/null
+++ b/sdk/python/arvados/_internal/report_template.py
@@ -0,0 +1,114 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+try:
+ from html import escape
+except ImportError:
+ from cgi import escape
+
+import json
+from typing import ItemsView
+
+class ReportTemplate(object):
+ """Base class for HTML reports produced by Arvados reporting tools.
+
+ Used by crunchstat-summary and cluster-activity.
+
+ """
+
+ STYLE = '''
+
+ '''
+
+ def __init__(self, label):
+ self.label = label
+ self.cards = []
+
+ def cardlist(self, items):
+ if not isinstance(items, list):
+ items = [items]
+
+ return "\n".join(
+ """
+ """.format(i) for i in items)
+
+ def html(self):
+ return '''
+
+
+ {label}
+
+{js}
+
+{style}
+
+{header}
+
+
+
+
+
+
+{cards}
+
+
+
+ '''.format(label=escape(self.label),
+ js=self.js(),
+ style=self.style(),
+ header=self.headHTML(),
+ cards=self.cardlist(self.cards))
+
+ def js(self):
+ return ''
+
+ def style(self):
+ return self.STYLE
+
+ def headHTML(self):
+ """Return extra HTML text to include in HEAD."""
+ return ''
diff --git a/sdk/python/arvados/_internal/s3_to_keep.py b/sdk/python/arvados/_internal/s3_to_keep.py
new file mode 100644
index 0000000000..27c25f7344
--- /dev/null
+++ b/sdk/python/arvados/_internal/s3_to_keep.py
@@ -0,0 +1,129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import datetime
+import logging
+import time
+import urllib.parse
+
+import arvados
+import arvados.collection
+
+import boto3
+import boto3.s3.transfer
+
+from .downloaderbase import DownloaderBase
+from .to_keep_util import (Response, url_to_keep, generic_check_cached_url)
+
+logger = logging.getLogger('arvados.s3_import')
+
+
+class _Downloader(DownloaderBase):
+ def __init__(self, apiclient, botoclient):
+ super().__init__()
+ self.apiclient = apiclient
+ self.botoclient = botoclient
+ self.headresult = None
+
+ def head(self, url):
+ self.parsedurl = urllib.parse.urlparse(url)
+
+ extraArgs = {}
+ versionId = urllib.parse.parse_qs(self.parsedurl.query).get("versionId", [False])[0]
+ if versionId:
+ extraArgs["VersionId"] = versionId
+ extraArgs["ResponseCacheControl"] = "immutable"
+ response = self.botoclient.head_object(
+ Bucket=self.parsedurl.netloc,
+ Key=self.parsedurl.path.lstrip('/'),
+ **extraArgs
+ )
+ return Response(response['ResponseMetadata']['HTTPStatusCode'],
+ {k.title(): v for k,v in response['ResponseMetadata']['HTTPHeaders'].items()})
+
+ def download(self, url, headers):
+ self.collection = arvados.collection.Collection(api_client=self.apiclient)
+
+ self.count = 0
+ self.start = time.time()
+ self.checkpoint = self.start
+ self.contentlength = None
+ self.target = None
+
+ self.parsedurl = urllib.parse.urlparse(url)
+ extraArgs = {}
+ versionId = urllib.parse.parse_qs(self.parsedurl.query).get("versionId", [None])[0]
+ if versionId:
+ extraArgs["VersionId"] = versionId
+
+ self.name = self.parsedurl.path.split("/")[-1]
+ self.target = self.collection.open(self.name, "wb")
+
+ objectMeta = self.head(url)
+ self.contentlength = int(objectMeta.headers["Content-Length"])
+
+ self.botoclient.download_fileobj(
+ Bucket=self.parsedurl.netloc,
+ Key=self.parsedurl.path.lstrip('/'),
+ Fileobj=self.target,
+ ExtraArgs=extraArgs,
+ Callback=self.data_received,
+ Config=boto3.s3.transfer.TransferConfig(
+ multipart_threshold=64*1024*1024,
+ multipart_chunksize=64*1024*1024,
+ use_threads=False,
+ ))
+
+ return objectMeta
+
+ def data_received(self, count):
+ self.count += count
+
+ loopnow = time.time()
+ if (loopnow - self.checkpoint) < 20:
+ return
+
+ bps = self.count / (loopnow - self.start)
+ if self.contentlength is not None:
+ logger.info("%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left",
+ ((self.count * 100) / self.contentlength),
+ (bps / (1024.0*1024.0)),
+ ((self.contentlength-self.count) // bps))
+ else:
+ logger.info("%d downloaded, %6.2f MiB/s", self.count, (bps / (1024.0*1024.0)))
+ self.checkpoint = loopnow
+
+def get_botoclient(botosession, unsigned_requests):
+ if unsigned_requests:
+ from botocore import UNSIGNED
+ from botocore.config import Config
+ return botosession.client('s3', config=Config(signature_version=UNSIGNED))
+ else:
+ return botosession.client('s3')
+
+
+def check_cached_url(api, botosession, project_uuid, url, etags,
+ utcnow=datetime.datetime.utcnow,
+ prefer_cached_downloads=False,
+ unsigned_requests=False):
+
+ return generic_check_cached_url(api, _Downloader(api, get_botoclient(botosession, unsigned_requests)),
+ project_uuid, url, etags,
+ utcnow=utcnow,
+ prefer_cached_downloads=prefer_cached_downloads)
+
+def s3_to_keep(api, botosession, project_uuid, url,
+ utcnow=datetime.datetime.utcnow,
+ prefer_cached_downloads=False,
+ unsigned_requests=False):
+ """Download a file over S3 and upload it to keep, with HTTP headers as metadata.
+
+ Because simple S3 object fetches are just HTTP underneath, we can
+ reuse most of the HTTP downloading infrastructure.
+ """
+
+ return url_to_keep(api, _Downloader(api, get_botoclient(botosession, unsigned_requests)),
+ project_uuid, url,
+ utcnow=utcnow,
+ prefer_cached_downloads=prefer_cached_downloads)
diff --git a/sdk/python/arvados/_ranges.py b/sdk/python/arvados/_internal/streams.py
similarity index 80%
rename from sdk/python/arvados/_ranges.py
rename to sdk/python/arvados/_internal/streams.py
index bb245ab2bf..fad21a4917 100644
--- a/sdk/python/arvados/_ranges.py
+++ b/sdk/python/arvados/_internal/streams.py
@@ -2,16 +2,17 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import division
-from builtins import object
import logging
+import re
-_logger = logging.getLogger('arvados.ranges')
+from .. import config
+
+_logger = logging.getLogger('arvados.streams')
# Log level below 'debug' !
RANGES_SPAM = 9
-class Range(object):
+class Range:
__slots__ = ("locator", "range_start", "range_size", "segment_offset")
def __init__(self, locator, range_start, range_size, segment_offset=0):
@@ -29,6 +30,26 @@ class Range(object):
self.range_size == other.range_size and
self.segment_offset == other.segment_offset)
+
+class LocatorAndRange:
+ __slots__ = ("locator", "block_size", "segment_offset", "segment_size")
+
+ def __init__(self, locator, block_size, segment_offset, segment_size):
+ self.locator = locator
+ self.block_size = block_size
+ self.segment_offset = segment_offset
+ self.segment_size = segment_size
+
+ def __eq__(self, other):
+ return (self.locator == other.locator and
+ self.block_size == other.block_size and
+ self.segment_offset == other.segment_offset and
+ self.segment_size == other.segment_size)
+
+ def __repr__(self):
+ return "LocatorAndRange(%r, %r, %r, %r)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
+
+
def first_block(data_locators, range_start):
block_start = 0
@@ -60,24 +81,6 @@ def first_block(data_locators, range_start):
return i
-class LocatorAndRange(object):
- __slots__ = ("locator", "block_size", "segment_offset", "segment_size")
-
- def __init__(self, locator, block_size, segment_offset, segment_size):
- self.locator = locator
- self.block_size = block_size
- self.segment_offset = segment_offset
- self.segment_size = segment_size
-
- def __eq__(self, other):
- return (self.locator == other.locator and
- self.block_size == other.block_size and
- self.segment_offset == other.segment_offset and
- self.segment_size == other.segment_size)
-
- def __repr__(self):
- return "LocatorAndRange(%r, %r, %r, %r)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
-
def locators_and_ranges(data_locators, range_start, range_size, limit=None):
"""Get blocks that are covered by a range.
@@ -225,3 +228,60 @@ def replace_range(data_locators, new_range_start, new_range_size, new_locator, n
data_locators[i] = Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_end-old_segment_start))
return
i += 1
+
+def escape(path):
+ return re.sub(r'[\\:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
+
+def normalize_stream(stream_name, stream):
+ """Take manifest stream and return a list of tokens in normalized format.
+
+ :stream_name:
+ The name of the stream.
+
+ :stream:
+ A dict mapping each filename to a list of `_range.LocatorAndRange` objects.
+
+ """
+
+ stream_name = escape(stream_name)
+ stream_tokens = [stream_name]
+ sortedfiles = list(stream.keys())
+ sortedfiles.sort()
+
+ blocks = {}
+ streamoffset = 0
+ # Go through each file and add each referenced block exactly once.
+ for streamfile in sortedfiles:
+ for segment in stream[streamfile]:
+ if segment.locator not in blocks:
+ stream_tokens.append(segment.locator)
+ blocks[segment.locator] = streamoffset
+ streamoffset += segment.block_size
+
+ # Add the empty block if the stream is otherwise empty.
+ if len(stream_tokens) == 1:
+ stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+ for streamfile in sortedfiles:
+ # Add in file segments
+ current_span = None
+ fout = escape(streamfile)
+ for segment in stream[streamfile]:
+ # Collapse adjacent segments
+ streamoffset = blocks[segment.locator] + segment.segment_offset
+ if current_span is None:
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+ else:
+ if streamoffset == current_span[1]:
+ current_span[1] += segment.segment_size
+ else:
+ stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+
+ if current_span is not None:
+ stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+
+ if not stream[streamfile]:
+ stream_tokens.append(u"0:0:{0}".format(fout))
+
+ return stream_tokens
diff --git a/sdk/python/arvados/_internal/to_keep_util.py b/sdk/python/arvados/_internal/to_keep_util.py
new file mode 100644
index 0000000000..bb6df999ba
--- /dev/null
+++ b/sdk/python/arvados/_internal/to_keep_util.py
@@ -0,0 +1,237 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import collections
+import dataclasses
+import typing
+import logging
+import email.utils
+import calendar
+import datetime
+import re
+import urllib.parse
+import arvados
+import arvados.collection
+import arvados._internal
+
+logger = logging.getLogger('arvados.file_import')
+
+CheckCacheResult = collections.namedtuple('CheckCacheResult',
+ ['portable_data_hash', 'file_name',
+ 'uuid', 'clean_url', 'now'])
+
+@dataclasses.dataclass
+class Response:
+ status_code: int
+ headers: typing.Mapping[str, str]
+
+def _my_formatdate(dt):
+ return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
+ localtime=False, usegmt=True)
+
+def _my_parsedate(text):
+ parsed = email.utils.parsedate_tz(text)
+ if parsed:
+ if parsed[9]:
+ # Adjust to UTC
+ return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
+ else:
+ # TZ is zero or missing, assume UTC.
+ return datetime.datetime(*parsed[:6])
+ else:
+ return datetime.datetime(1970, 1, 1)
+
+def _fresh_cache(url, properties, now):
+ pr = properties[url]
+ expires = None
+
+ logger.debug("Checking cache freshness for %s using %s", url, pr)
+
+ if "Cache-Control" in pr:
+ if re.match(r"immutable", pr["Cache-Control"]):
+ return True
+
+ g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
+ if g:
+ expires = _my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
+
+ if expires is None and "Expires" in pr:
+ expires = _my_parsedate(pr["Expires"])
+
+ if expires is None:
+ # Use a default cache time of 24 hours if upstream didn't set
+ # any cache headers, to reduce redundant downloads.
+ expires = _my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
+
+ if not expires:
+ return False
+
+ return (now < expires)
+
+def remember_headers(url, properties, headers, now):
+ properties.setdefault(url, {})
+ for h in ("Cache-Control", "Etag", "Expires", "Date", "Content-Length"):
+ if h in headers:
+ properties[url][h] = headers[h]
+ if "Date" not in headers:
+ properties[url]["Date"] = _my_formatdate(now)
+
+def _changed(url, clean_url, properties, now, downloader):
+ req = downloader.head(url)
+
+ if req.status_code != 200:
+ # Sometimes endpoints are misconfigured and will deny HEAD but
+ # allow GET so instead of failing here, we'll try GET If-None-Match
+ return True
+
+ # previous version of this code used "ETag", now we are
+ # normalizing to "Etag", check for both.
+ etag = properties[url].get("Etag") or properties[url].get("ETag")
+
+ if url in properties:
+ del properties[url]
+ remember_headers(clean_url, properties, req.headers, now)
+
+ if "Etag" in req.headers and etag == req.headers["Etag"]:
+ # Didn't change
+ return False
+
+ return True
+
+
+def generic_check_cached_url(api, downloader, project_uuid, url, etags,
+ utcnow=datetime.datetime.utcnow,
+ varying_url_params="",
+ prefer_cached_downloads=False):
+
+ logger.info("Checking Keep for %s", url)
+
+ varying_params = set(arvados._internal.parse_seq(varying_url_params))
+
+ parsed = urllib.parse.urlparse(url)
+ query = [q for q in urllib.parse.parse_qsl(parsed.query)
+ if q[0] not in varying_params]
+
+ clean_url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params,
+ urllib.parse.urlencode(query, safe="/"), parsed.fragment))
+
+ r1 = api.collections().list(filters=[["properties", "exists", url]]).execute()
+
+ if clean_url == url:
+ items = r1["items"]
+ else:
+ r2 = api.collections().list(filters=[["properties", "exists", clean_url]]).execute()
+ items = r1["items"] + r2["items"]
+
+ now = utcnow()
+
+ for item in items:
+ properties = item["properties"]
+
+ if clean_url in properties:
+ cache_url = clean_url
+ elif url in properties:
+ cache_url = url
+ else:
+ raise Exception("Shouldn't happen, got an API result for %s that doesn't have the URL in properties" % item["uuid"])
+
+ if prefer_cached_downloads or _fresh_cache(cache_url, properties, now):
+ # HTTP caching rules say we should use the cache
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return CheckCacheResult(item["portable_data_hash"], next(iter(cr.keys())),
+ item["uuid"], clean_url, now)
+
+ if not _changed(cache_url, clean_url, properties, now, downloader):
+ # Etag didn't change, same content, just update headers
+ api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return CheckCacheResult(item["portable_data_hash"], next(iter(cr.keys())),
+ item["uuid"], clean_url, now)
+
+ for etagstr in ("Etag", "ETag"):
+ if etagstr in properties[cache_url] and len(properties[cache_url][etagstr]) > 2:
+ etags[properties[cache_url][etagstr]] = item
+
+ logger.debug("Found ETag values %s", etags)
+
+ return CheckCacheResult(None, None, None, clean_url, now)
+
+def etag_quote(etag):
+ # if it already has leading and trailing quotes, do nothing
+ if etag[0] == '"' and etag[-1] == '"':
+ return etag
+ else:
+ # Add quotes.
+ return '"' + etag + '"'
+
+def url_to_keep(api, downloader, project_uuid, url,
+ utcnow=datetime.datetime.utcnow, varying_url_params="",
+ prefer_cached_downloads=False):
+ """Download a from a HTTP-like protocol and upload it to keep, with HTTP headers as metadata.
+
+ Before downloading the URL, checks to see if the URL already
+ exists in Keep and applies HTTP caching policy, the
+ varying_url_params and prefer_cached_downloads flags in order to
+ decide whether to use the version in Keep or re-download it.
+
+ This
+ """
+
+ etags = {}
+ cache_result = generic_check_cached_url(api, downloader,
+ project_uuid, url, etags,
+ utcnow, varying_url_params,
+ prefer_cached_downloads)
+
+ if cache_result.portable_data_hash is not None:
+ return cache_result
+
+ clean_url = cache_result.clean_url
+ now = cache_result.now
+
+ properties = {}
+ headers = {}
+ if etags:
+ headers['If-None-Match'] = ', '.join([etag_quote(k) for k,v in etags.items()])
+ logger.debug("Sending GET request with headers %s", headers)
+
+ logger.info("Beginning download of %s", url)
+
+ req = downloader.download(url, headers)
+
+ c = downloader.collection
+
+ if req.status_code not in (200, 304):
+ raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
+
+ if downloader.target is not None:
+ downloader.target.close()
+
+ remember_headers(clean_url, properties, req.headers, now)
+
+ if req.status_code == 304 and "Etag" in req.headers and req.headers["Etag"] in etags:
+ item = etags[req.headers["Etag"]]
+ item["properties"].update(properties)
+ api.collections().update(uuid=item["uuid"], body={"collection":{"properties": item["properties"]}}).execute()
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return (item["portable_data_hash"], list(cr.keys())[0], item["uuid"], clean_url, now)
+
+ logger.info("Download complete")
+
+ collectionname = "Downloaded from %s" % urllib.parse.quote(clean_url, safe='')
+
+ # max length - space to add a timestamp used by ensure_unique_name
+ max_name_len = 254 - 28
+
+ if len(collectionname) > max_name_len:
+ over = len(collectionname) - max_name_len
+ split = int(max_name_len/2)
+ collectionname = collectionname[0:split] + "â¦" + collectionname[split+over:]
+
+ c.save_new(name=collectionname, owner_uuid=project_uuid, ensure_unique_name=True)
+
+ api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
+
+ return CheckCacheResult(c.portable_data_hash(), downloader.name,
+ c.manifest_locator(), clean_url, now)
diff --git a/sdk/python/arvados/_normalize_stream.py b/sdk/python/arvados/_normalize_stream.py
deleted file mode 100644
index c72b82be1c..0000000000
--- a/sdk/python/arvados/_normalize_stream.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import absolute_import
-from . import config
-
-import re
-
-def escape(path):
- return re.sub(r'[\\:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
-
-def normalize_stream(stream_name, stream):
- """Take manifest stream and return a list of tokens in normalized format.
-
- :stream_name:
- The name of the stream.
-
- :stream:
- A dict mapping each filename to a list of `_range.LocatorAndRange` objects.
-
- """
-
- stream_name = escape(stream_name)
- stream_tokens = [stream_name]
- sortedfiles = list(stream.keys())
- sortedfiles.sort()
-
- blocks = {}
- streamoffset = 0
- # Go through each file and add each referenced block exactly once.
- for streamfile in sortedfiles:
- for segment in stream[streamfile]:
- if segment.locator not in blocks:
- stream_tokens.append(segment.locator)
- blocks[segment.locator] = streamoffset
- streamoffset += segment.block_size
-
- # Add the empty block if the stream is otherwise empty.
- if len(stream_tokens) == 1:
- stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
-
- for streamfile in sortedfiles:
- # Add in file segments
- current_span = None
- fout = escape(streamfile)
- for segment in stream[streamfile]:
- # Collapse adjacent segments
- streamoffset = blocks[segment.locator] + segment.segment_offset
- if current_span is None:
- current_span = [streamoffset, streamoffset + segment.segment_size]
- else:
- if streamoffset == current_span[1]:
- current_span[1] += segment.segment_size
- else:
- stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
- current_span = [streamoffset, streamoffset + segment.segment_size]
-
- if current_span is not None:
- stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
-
- if not stream[streamfile]:
- stream_tokens.append(u"0:0:{0}".format(fout))
-
- return stream_tokens
diff --git a/sdk/python/arvados/api.py b/sdk/python/arvados/api.py
index 8a17e42fcb..d8a21ea852 100644
--- a/sdk/python/arvados/api.py
+++ b/sdk/python/arvados/api.py
@@ -1,15 +1,35 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
-"""Arvados API client
+"""Arvados REST API client
-The code in this module builds Arvados API client objects you can use to submit
-Arvados API requests. This includes extending the underlying HTTP client with
-niceties such as caching, X-Request-Id header for tracking, and more. The main
-client constructors are `api` and `api_from_config`.
+This module provides classes and functions to construct an Arvados REST API
+client. Most users will want to use one of these constructor functions, in
+order of preference:
+
+* `arvados.api.api` provides a high-level interface to construct a client from
+ either arguments or user configuration. You can call this module just like
+ a function as a shortcut for calling `arvados.api.api`.
+
+* `arvados.api.api_from_config` constructs a client from user configuration in
+ a dictionary.
+
+* `arvados.api.api_client` provides a lower-level interface to construct a
+ simpler client object that is not threadsafe.
+
+Other classes and functions is this module support creating and customizing
+the client for specialized use-cases.
+
+The methods on an Arvados REST API client are generated dynamically at
+runtime. The `arvados.api_resources` module documents those methods and
+return values for the current version of Arvados. It does not
+implement anything so you don't need to import it, but it's a helpful
+reference to understand how to use the Arvados REST API client.
"""
import collections
+import errno
+import hashlib
import httplib2
import json
import logging
@@ -19,6 +39,7 @@ import re
import socket
import ssl
import sys
+import tempfile
import threading
import time
import types
@@ -37,9 +58,10 @@ from apiclient import discovery as apiclient_discovery
from apiclient import errors as apiclient_errors
from . import config
from . import errors
+from . import keep
from . import retry
from . import util
-from . import cache
+from ._internal import basedirs
from .logging import GoogleHTTPClientFilter, log_handler
_logger = logging.getLogger('arvados.api')
@@ -87,7 +109,7 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
self.max_request_size < len(kwargs['body'])):
raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
- headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
+ headers['Authorization'] = 'Bearer %s' % self.arvados_api_token
if (time.time() - self._last_request_time) > self._max_keepalive_idle:
# High probability of failure due to connection atrophy. Make
@@ -100,8 +122,8 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
self._last_request_time = time.time()
try:
response, body = self.orig_http_request(uri, method, headers=headers, **kwargs)
- except ssl.SSLCertVerificationError as e:
- raise ssl.SSLCertVerificationError(e.args[0], "Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e)) from None
+ except ssl.CertificateError as e:
+ raise ssl.CertificateError(e.args[0], "Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e)) from None
# googleapiclient only retries 403, 429, and 5xx status codes.
# If we got another 4xx status that we want to retry, convert it into
# 5xx so googleapiclient handles it the way we want.
@@ -155,29 +177,155 @@ def _new_http_error(cls, *args, **kwargs):
errors.ApiError, *args, **kwargs)
apiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)
-def http_cache(data_type: str) -> cache.SafeHTTPCache:
+class ThreadSafeHTTPCache:
+ """Thread-safe replacement for `httplib2.FileCache`
+
+ `arvados.api.http_cache` is the preferred way to construct this object.
+ Refer to that function's docstring for details.
+ """
+
+ def __init__(self, path=None, max_age=None):
+ self._dir = path
+ if max_age is not None:
+ try:
+ self._clean(threshold=time.time() - max_age)
+ except:
+ pass
+
+ def _clean(self, threshold=0):
+ for ent in os.listdir(self._dir):
+ fnm = os.path.join(self._dir, ent)
+ if os.path.isdir(fnm) or not fnm.endswith('.tmp'):
+ continue
+ stat = os.lstat(fnm)
+ if stat.st_mtime < threshold:
+ try:
+ os.unlink(fnm)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def __str__(self):
+ return self._dir
+
+ def _filename(self, url):
+ return os.path.join(self._dir, hashlib.md5(url.encode('utf-8')).hexdigest()+'.tmp')
+
+ def get(self, url):
+ filename = self._filename(url)
+ try:
+ with open(filename, 'rb') as f:
+ return f.read()
+ except (IOError, OSError):
+ return None
+
+ def set(self, url, content):
+ try:
+ fd, tempname = tempfile.mkstemp(dir=self._dir)
+ except:
+ return None
+ try:
+ try:
+ f = os.fdopen(fd, 'wb')
+ except:
+ os.close(fd)
+ raise
+ try:
+ f.write(content)
+ finally:
+ f.close()
+ os.rename(tempname, self._filename(url))
+ tempname = None
+ finally:
+ if tempname:
+ os.unlink(tempname)
+
+ def delete(self, url):
+ try:
+ os.unlink(self._filename(url))
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+
+
+class ThreadSafeAPIClient(object):
+ """Thread-safe wrapper for an Arvados API client
+
+ This class takes all the arguments necessary to build a lower-level
+ Arvados API client `googleapiclient.discovery.Resource`, then
+ transparently builds and wraps a unique object per thread. This works
+ around the fact that the client's underlying HTTP client object is not
+ thread-safe.
+
+ Arguments:
+
+ * apiconfig: Mapping[str, str] | None --- A mapping with entries for
+ `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
+ `ARVADOS_API_HOST_INSECURE`. If not provided, uses
+ `arvados.config.settings` to get these parameters from user
+ configuration. You can pass an empty mapping to build the client
+ solely from `api_params`.
+
+ * keep_params: Mapping[str, Any] --- Keyword arguments used to construct
+ an associated `arvados.keep.KeepClient`.
+
+ * api_params: Mapping[str, Any] --- Keyword arguments used to construct
+ each thread's API client. These have the same meaning as in the
+ `arvados.api.api` function.
+
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ `'v1'`.
+ """
+ def __init__(
+ self,
+ apiconfig: Optional[Mapping[str, str]]=None,
+ keep_params: Optional[Mapping[str, Any]]={},
+ api_params: Optional[Mapping[str, Any]]={},
+ version: Optional[str]=None,
+ ) -> None:
+ if apiconfig or apiconfig is None:
+ self._api_kwargs = api_kwargs_from_config(version, apiconfig, **api_params)
+ else:
+ self._api_kwargs = normalize_api_kwargs(version, **api_params)
+ self.api_token = self._api_kwargs['token']
+ self.request_id = self._api_kwargs.get('request_id')
+ self.local = threading.local()
+ self.keep = keep.KeepClient(api_client=self, **keep_params)
+
+ def localapi(self) -> 'googleapiclient.discovery.Resource':
+ try:
+ client = self.local.api
+ except AttributeError:
+ client = api_client(**self._api_kwargs)
+ client._http._request_id = lambda: self.request_id or util.new_request_id()
+ self.local.api = client
+ return client
+
+ def __getattr__(self, name: str) -> Any:
+ # Proxy nonexistent attributes to the thread-local API client.
+ return getattr(self.localapi(), name)
+
+
+def http_cache(data_type: str) -> Optional[ThreadSafeHTTPCache]:
"""Set up an HTTP file cache
- This function constructs and returns an `arvados.cache.SafeHTTPCache`
- backed by the filesystem under `~/.cache/arvados/`, or `None` if the
- directory cannot be set up. The return value can be passed to
+ This function constructs and returns an `arvados.api.ThreadSafeHTTPCache`
+ backed by the filesystem under a cache directory from the environment, or
+ `None` if the directory cannot be set up. The return value can be passed to
`httplib2.Http` as the `cache` argument.
Arguments:
- * data_type: str --- The name of the subdirectory under `~/.cache/arvados`
+ * data_type: str --- The name of the subdirectory
where data is cached.
"""
try:
- homedir = pathlib.Path.home()
- except RuntimeError:
- return None
- path = pathlib.Path(homedir, '.cache', 'arvados', data_type)
- try:
- path.mkdir(parents=True, exist_ok=True)
- except OSError:
+ path = basedirs.BaseDirectories('CACHE').storage_path(data_type)
+ except (OSError, RuntimeError):
return None
- return cache.SafeHTTPCache(str(path), max_age=60*60*24*2)
+ else:
+ return ThreadSafeHTTPCache(str(path), max_age=60*60*24*2)
def api_client(
version: str,
@@ -211,8 +359,7 @@ def api_client(
Keyword-only arguments:
* cache: bool --- If true, loads the API discovery document from, or
- saves it to, a cache on disk (located at
- `~/.cache/arvados/discovery`).
+ saves it to, a cache on disk.
* http: httplib2.Http | None --- The HTTP client object the API client
object will use to make requests. If not provided, this function will
@@ -412,7 +559,7 @@ def api(
*,
discoveryServiceUrl: Optional[str]=None,
**kwargs: Any,
-) -> 'arvados.safeapi.ThreadSafeApiCache':
+) -> ThreadSafeAPIClient:
"""Dynamically build an Arvados API client
This function provides a high-level "do what I mean" interface to build an
@@ -421,7 +568,7 @@ def api(
like you would write in user configuration; or pass additional arguments
for lower-level control over the client.
- This function returns a `arvados.safeapi.ThreadSafeApiCache`, an
+ This function returns a `arvados.api.ThreadSafeAPIClient`, an
API-compatible wrapper around `googleapiclient.discovery.Resource`. If
you're handling concurrency yourself and/or your application is very
performance-sensitive, consider calling `api_client` directly.
@@ -460,22 +607,20 @@ def api(
else:
kwargs.update(api_kwargs_from_config(version))
version = kwargs.pop('version')
- # We do the import here to avoid a circular import at the top level.
- from .safeapi import ThreadSafeApiCache
- return ThreadSafeApiCache({}, {}, kwargs, version)
+ return ThreadSafeAPIClient({}, {}, kwargs, version)
def api_from_config(
version: Optional[str]=None,
apiconfig: Optional[Mapping[str, str]]=None,
**kwargs: Any
-) -> 'arvados.safeapi.ThreadSafeApiCache':
+) -> ThreadSafeAPIClient:
"""Build an Arvados API client from a configuration mapping
This function builds an Arvados API client from a mapping with user
configuration. It accepts that mapping as an argument, so you can use a
configuration that's different from what the user has set up.
- This function returns a `arvados.safeapi.ThreadSafeApiCache`, an
+ This function returns a `arvados.api.ThreadSafeAPIClient`, an
API-compatible wrapper around `googleapiclient.discovery.Resource`. If
you're handling concurrency yourself and/or your application is very
performance-sensitive, consider calling `api_client` directly.
@@ -496,49 +641,3 @@ def api_from_config(
docstring for more information about their meaning.
"""
return api(**api_kwargs_from_config(version, apiconfig, **kwargs))
-
-class OrderedJsonModel(apiclient.model.JsonModel):
- """Model class for JSON that preserves the contents' order
-
- .. WARNING:: Deprecated
- This model is redundant now that Python dictionaries preserve insertion
- ordering. Code that passes this model to API constructors can remove it.
-
- In Python versions before 3.6, API clients that cared about preserving the
- order of fields in API server responses could use this model to do so.
- Typical usage looked like:
-
- from arvados.api import OrderedJsonModel
- client = arvados.api('v1', ..., model=OrderedJsonModel())
- """
- @util._deprecated(preferred="the default model and rely on Python's built-in dictionary ordering")
- def __init__(self, data_wrapper=False):
- return super().__init__(data_wrapper)
-
-
-RETRY_DELAY_INITIAL = 0
-"""
-.. WARNING:: Deprecated
- This constant was used by retry code in previous versions of the Arvados SDK.
- Changing the value has no effect anymore.
- Prefer passing `num_retries` to an API client constructor instead.
- Refer to the constructor docstrings for details.
-"""
-
-RETRY_DELAY_BACKOFF = 0
-"""
-.. WARNING:: Deprecated
- This constant was used by retry code in previous versions of the Arvados SDK.
- Changing the value has no effect anymore.
- Prefer passing `num_retries` to an API client constructor instead.
- Refer to the constructor docstrings for details.
-"""
-
-RETRY_COUNT = 0
-"""
-.. WARNING:: Deprecated
- This constant was used by retry code in previous versions of the Arvados SDK.
- Changing the value has no effect anymore.
- Prefer passing `num_retries` to an API client constructor instead.
- Refer to the constructor docstrings for details.
-"""
diff --git a/sdk/python/arvados/arvfile.py b/sdk/python/arvados/arvfile.py
index e0e972b5c1..ce18c9f6f2 100644
--- a/sdk/python/arvados/arvfile.py
+++ b/sdk/python/arvados/arvfile.py
@@ -2,13 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from __future__ import division
-from future import standard_library
-from future.utils import listitems, listvalues
-standard_library.install_aliases()
-from builtins import range
-from builtins import object
import bz2
import collections
import copy
@@ -25,14 +18,21 @@ import uuid
import zlib
from . import config
+from ._internal import streams
from .errors import KeepWriteError, AssertionError, ArgumentError
from .keep import KeepLocator
-from ._normalize_stream import normalize_stream
-from ._ranges import locators_and_ranges, replace_range, Range, LocatorAndRange
from .retry import retry_method
+ADD = "add"
+"""Argument value for `Collection` methods to represent an added item"""
+DEL = "del"
+"""Argument value for `Collection` methods to represent a removed item"""
MOD = "mod"
+"""Argument value for `Collection` methods to represent a modified item"""
+TOK = "tok"
+"""Argument value for `Collection` methods to represent an item with token differences"""
WRITE = "write"
+"""Argument value for `Collection` methods to represent that a file was written to"""
_logger = logging.getLogger('arvados.arvfile')
@@ -205,64 +205,6 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
raise IOError(errno.ENOSYS, "Not implemented")
-class StreamFileReader(ArvadosFileReaderBase):
- class _NameAttribute(str):
- # The Python file API provides a plain .name attribute.
- # Older SDK provided a name() method.
- # This class provides both, for maximum compatibility.
- def __call__(self):
- return self
-
- def __init__(self, stream, segments, name):
- super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
- self._stream = stream
- self.segments = segments
-
- def stream_name(self):
- return self._stream.name()
-
- def size(self):
- n = self.segments[-1]
- return n.range_start + n.range_size
-
- @_FileLikeObjectBase._before_close
- @retry_method
- def read(self, size, num_retries=None):
- """Read up to 'size' bytes from the stream, starting at the current file position"""
- if size == 0:
- return b''
-
- data = b''
- available_chunks = locators_and_ranges(self.segments, self._filepos, size)
- if available_chunks:
- lr = available_chunks[0]
- data = self._stream.readfrom(lr.locator+lr.segment_offset,
- lr.segment_size,
- num_retries=num_retries)
-
- self._filepos += len(data)
- return data
-
- @_FileLikeObjectBase._before_close
- @retry_method
- def readfrom(self, start, size, num_retries=None):
- """Read up to 'size' bytes from the stream, starting at 'start'"""
- if size == 0:
- return b''
-
- data = []
- for lr in locators_and_ranges(self.segments, start, size):
- data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
- num_retries=num_retries))
- return b''.join(data)
-
- def as_manifest(self):
- segs = []
- for r in self.segments:
- segs.extend(self._stream.locators_and_ranges(r.locator, r.range_size))
- return " ".join(normalize_stream(".", {self.name: segs})) + "\n"
-
-
def synchronized(orig_func):
@functools.wraps(orig_func)
def synchronized_wrapper(self, *args, **kwargs):
@@ -620,7 +562,7 @@ class _BlockManager(object):
# A WRITABLE block with its owner.closed() implies that its
# size is <= KEEP_BLOCK_SIZE/2.
try:
- small_blocks = [b for b in listvalues(self._bufferblocks)
+ small_blocks = [b for b in self._bufferblocks.values()
if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
except AttributeError:
# Writable blocks without owner shouldn't exist.
@@ -763,7 +705,7 @@ class _BlockManager(object):
self.repack_small_blocks(force=True, sync=True)
with self.lock:
- items = listitems(self._bufferblocks)
+ items = list(self._bufferblocks.items())
for k,v in items:
if v.state() != _BufferBlock.COMMITTED and v.owner:
@@ -907,6 +849,8 @@ class ArvadosFile(object):
def replace_contents(self, other):
"""Replace segments of this file with segments from another `ArvadosFile` object."""
+ eventtype = TOK if self == other else MOD
+
map_loc = {}
self._segments = []
for other_segment in other.segments():
@@ -920,9 +864,10 @@ class ArvadosFile(object):
map_loc[other_segment.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid
new_loc = map_loc[other_segment.locator]
- self._segments.append(Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
+ self._segments.append(streams.Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
self.set_committed(False)
+ self.parent.notify(eventtype, self.parent, self.name, (self, self))
def __eq__(self, other):
if other is self:
@@ -1026,7 +971,7 @@ class ArvadosFile(object):
# segment is past the trucate size, all done
break
elif size < range_end:
- nr = Range(r.locator, r.range_start, size - r.range_start, 0)
+ nr = streams.Range(r.locator, r.range_start, size - r.range_start, 0)
nr.segment_offset = r.segment_offset
new_segs.append(nr)
break
@@ -1039,28 +984,37 @@ class ArvadosFile(object):
padding = self.parent._my_block_manager().get_padding_block()
diff = size - self.size()
while diff > config.KEEP_BLOCK_SIZE:
- self._segments.append(Range(padding.blockid, self.size(), config.KEEP_BLOCK_SIZE, 0))
+ self._segments.append(streams.Range(padding.blockid, self.size(), config.KEEP_BLOCK_SIZE, 0))
diff -= config.KEEP_BLOCK_SIZE
if diff > 0:
- self._segments.append(Range(padding.blockid, self.size(), diff, 0))
+ self._segments.append(streams.Range(padding.blockid, self.size(), diff, 0))
self.set_committed(False)
else:
# size == self.size()
pass
- def readfrom(self, offset, size, num_retries, exact=False):
+ def readfrom(self, offset, size, num_retries, exact=False, return_memoryview=False):
"""Read up to `size` bytes from the file starting at `offset`.
- :exact:
- If False (default), return less data than requested if the read
- crosses a block boundary and the next block isn't cached. If True,
- only return less data than requested when hitting EOF.
+ Arguments:
+
+ * exact: bool --- If False (default), return less data than
+ requested if the read crosses a block boundary and the next
+ block isn't cached. If True, only return less data than
+ requested when hitting EOF.
+
+ * return_memoryview: bool --- If False (default) return a
+ `bytes` object, which may entail making a copy in some
+ situations. If True, return a `memoryview` object which may
+ avoid making a copy, but may be incompatible with code
+ expecting a `bytes` object.
+
"""
with self.lock:
if size == 0 or offset >= self.size():
- return b''
- readsegs = locators_and_ranges(self._segments, offset, size)
+ return memoryview(b'') if return_memoryview else b''
+ readsegs = streams.locators_and_ranges(self._segments, offset, size)
prefetch = None
prefetch_lookahead = self.parent._my_block_manager().prefetch_lookahead
@@ -1076,10 +1030,12 @@ class ArvadosFile(object):
# every 16 MiB).
self._read_counter = (self._read_counter+1) % 128
if self._read_counter == 1:
- prefetch = locators_and_ranges(self._segments,
- offset + size,
- config.KEEP_BLOCK_SIZE * prefetch_lookahead,
- limit=(1+prefetch_lookahead))
+ prefetch = streams.locators_and_ranges(
+ self._segments,
+ offset + size,
+ config.KEEP_BLOCK_SIZE * prefetch_lookahead,
+ limit=(1+prefetch_lookahead),
+ )
locs = set()
data = []
@@ -1099,9 +1055,10 @@ class ArvadosFile(object):
locs.add(lr.locator)
if len(data) == 1:
- return data[0]
+ return data[0] if return_memoryview else data[0].tobytes()
else:
- return b''.join(data)
+ return memoryview(b''.join(data)) if return_memoryview else b''.join(data)
+
@must_be_writable
@synchronized
@@ -1141,11 +1098,14 @@ class ArvadosFile(object):
self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
self._current_bblock.append(data)
-
- replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
-
+ streams.replace_range(
+ self._segments,
+ offset,
+ len(data),
+ self._current_bblock.blockid,
+ self._current_bblock.write_pointer - len(data),
+ )
self.parent.notify(WRITE, self.parent, self.name, (self, self))
-
return len(data)
@synchronized
@@ -1197,9 +1157,9 @@ class ArvadosFile(object):
def _add_segment(self, blocks, pos, size):
"""Internal implementation of add_segment."""
self.set_committed(False)
- for lr in locators_and_ranges(blocks, pos, size):
- last = self._segments[-1] if self._segments else Range(0, 0, 0, 0)
- r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
+ for lr in streams.locators_and_ranges(blocks, pos, size):
+ last = self._segments[-1] if self._segments else streams.Range(0, 0, 0, 0)
+ r = streams.Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
self._segments.append(r)
@synchronized
@@ -1224,9 +1184,13 @@ class ArvadosFile(object):
loc = self.parent._my_block_manager().get_bufferblock(loc).locator()
if portable_locators:
loc = KeepLocator(loc).stripped()
- filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
- segment.segment_offset, segment.range_size))
- buf += ' '.join(normalize_stream(stream_name, {self.name: filestream}))
+ filestream.append(streams.LocatorAndRange(
+ loc,
+ KeepLocator(loc).size,
+ segment.segment_offset,
+ segment.range_size,
+ ))
+ buf += ' '.join(streams.normalize_stream(stream_name, {self.name: filestream}))
buf += "\n"
return buf
@@ -1266,33 +1230,49 @@ class ArvadosFileReader(ArvadosFileReaderBase):
@_FileLikeObjectBase._before_close
@retry_method
- def read(self, size=None, num_retries=None):
+ def read(self, size=-1, num_retries=None, return_memoryview=False):
"""Read up to `size` bytes from the file and return the result.
- Starts at the current file position. If `size` is None, read the
- entire remainder of the file.
+ Starts at the current file position. If `size` is negative or None,
+ read the entire remainder of the file.
+
+ Returns None if the file pointer is at the end of the file.
+
+ Returns a `bytes` object, unless `return_memoryview` is True,
+ in which case it returns a memory view, which may avoid an
+ unnecessary data copy in some situations.
+
"""
- if size is None:
+ if size < 0 or size is None:
data = []
- rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
+ #
+ # specify exact=False, return_memoryview=True here so that we
+ # only copy data once into the final buffer.
+ #
+ rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries, exact=False, return_memoryview=True)
while rd:
data.append(rd)
self._filepos += len(rd)
- rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
- return b''.join(data)
+ rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries, exact=False, return_memoryview=True)
+ return memoryview(b''.join(data)) if return_memoryview else b''.join(data)
else:
- data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True)
+ data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True, return_memoryview=return_memoryview)
self._filepos += len(data)
return data
@_FileLikeObjectBase._before_close
@retry_method
- def readfrom(self, offset, size, num_retries=None):
+ def readfrom(self, offset, size, num_retries=None, return_memoryview=False):
"""Read up to `size` bytes from the stream, starting at the specified file offset.
This method does not change the file position.
+
+ Returns a `bytes` object, unless `return_memoryview` is True,
+ in which case it returns a memory view, which may avoid an
+ unnecessary data copy in some situations.
+
"""
- return self.arvadosfile.readfrom(offset, size, num_retries)
+ return self.arvadosfile.readfrom(offset, size, num_retries, exact=True, return_memoryview=return_memoryview)
def flush(self):
pass
diff --git a/sdk/python/arvados/cache.py b/sdk/python/arvados/cache.py
index 85f2b89ea2..97cdb5af20 100644
--- a/sdk/python/arvados/cache.py
+++ b/sdk/python/arvados/cache.py
@@ -1,76 +1,13 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""arvados.cache - Shim compatibility module
-from builtins import object
-import errno
-import hashlib
-import os
-import tempfile
-import time
+This module used to define `arvados.cache.SafeHTTPCache`. Now it only exists
+to provide backwards compatible imports. New code should prefer to import
+`arvados.api.ThreadSafeHTTPCache`.
-class SafeHTTPCache(object):
- """Thread-safe replacement for httplib2.FileCache"""
+@private
+"""
- def __init__(self, path=None, max_age=None):
- self._dir = path
- if max_age is not None:
- try:
- self._clean(threshold=time.time() - max_age)
- except:
- pass
-
- def _clean(self, threshold=0):
- for ent in os.listdir(self._dir):
- fnm = os.path.join(self._dir, ent)
- if os.path.isdir(fnm) or not fnm.endswith('.tmp'):
- continue
- stat = os.lstat(fnm)
- if stat.st_mtime < threshold:
- try:
- os.unlink(fnm)
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
-
- def __str__(self):
- return self._dir
-
- def _filename(self, url):
- return os.path.join(self._dir, hashlib.md5(url.encode('utf-8')).hexdigest()+'.tmp')
-
- def get(self, url):
- filename = self._filename(url)
- try:
- with open(filename, 'rb') as f:
- return f.read()
- except (IOError, OSError):
- return None
-
- def set(self, url, content):
- try:
- fd, tempname = tempfile.mkstemp(dir=self._dir)
- except:
- return None
- try:
- try:
- f = os.fdopen(fd, 'wb')
- except:
- os.close(fd)
- raise
- try:
- f.write(content)
- finally:
- f.close()
- os.rename(tempname, self._filename(url))
- tempname = None
- finally:
- if tempname:
- os.unlink(tempname)
-
- def delete(self, url):
- try:
- os.unlink(self._filename(url))
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
+from .api import ThreadSafeHTTPCache as SafeHTTPCache
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
index 9e6bd06071..9ef111446a 100644
--- a/sdk/python/arvados/collection.py
+++ b/sdk/python/arvados/collection.py
@@ -12,11 +12,6 @@ cookbook for [an introduction to using the Collection class][cookbook].
[cookbook]: https://doc.arvados.org/sdk/python/cookbook.html#working-with-collections
"""
-from __future__ import absolute_import
-from future.utils import listitems, listvalues, viewkeys
-from builtins import str
-from past.builtins import basestring
-from builtins import object
import ciso8601
import datetime
import errno
@@ -33,12 +28,10 @@ import time
from collections import deque
from stat import *
-from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, WrappableFile, _BlockManager, synchronized, must_be_writable, NoopLock
+from ._internal import streams
+from .api import ThreadSafeAPIClient
+from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, WrappableFile, _BlockManager, synchronized, must_be_writable, NoopLock, ADD, DEL, MOD, TOK, WRITE
from .keep import KeepLocator, KeepClient
-from .stream import StreamReader
-from ._normalize_stream import normalize_stream, escape
-from ._ranges import Range, LocatorAndRange
-from .safeapi import ThreadSafeApiCache
import arvados.config as config
import arvados.errors as errors
import arvados.util
@@ -65,14 +58,7 @@ else:
_logger = logging.getLogger('arvados.collection')
-ADD = "add"
-"""Argument value for `Collection` methods to represent an added item"""
-DEL = "del"
-"""Argument value for `Collection` methods to represent a removed item"""
-MOD = "mod"
-"""Argument value for `Collection` methods to represent a modified item"""
-TOK = "tok"
-"""Argument value for `Collection` methods to represent an item with token differences"""
+
FILE = "file"
"""`create_type` value for `Collection.find_or_create`"""
COLLECTION = "collection"
@@ -341,7 +327,7 @@ class RichCollectionBase(CollectionBase):
self,
path: str,
mode: str="r",
- encoding: Optional[str]=None,
+ encoding: Optional[str]=None
) -> IO:
"""Open a file-like object within the collection
@@ -361,6 +347,7 @@ class RichCollectionBase(CollectionBase):
* encoding: str | None --- The text encoding of the file. Only used
when the file is opened in text mode. The default is
platform-dependent.
+
"""
if not re.search(r'^[rwa][bt]?\+?$', mode):
raise errors.ArgumentError("Invalid mode {!r}".format(mode))
@@ -419,7 +406,7 @@ class RichCollectionBase(CollectionBase):
if value == self._committed:
return
if value:
- for k,v in listitems(self._items):
+ for k,v in self._items.items():
v.set_committed(True)
self._committed = True
else:
@@ -434,7 +421,7 @@ class RichCollectionBase(CollectionBase):
This method does not recurse. It only iterates the contents of this
collection's corresponding stream.
"""
- return iter(viewkeys(self._items))
+ return iter(self._items)
@synchronized
def __getitem__(self, k: str) -> CollectionItem:
@@ -492,7 +479,7 @@ class RichCollectionBase(CollectionBase):
`arvados.arvfile.ArvadosFile` for every file, directly within this
collection's stream. This method does not recurse.
"""
- return listvalues(self._items)
+ return list(self._items.values())
@synchronized
def items(self) -> List[Tuple[str, CollectionItem]]:
@@ -502,7 +489,7 @@ class RichCollectionBase(CollectionBase):
`arvados.arvfile.ArvadosFile` for every file, directly within this
collection's stream. This method does not recurse.
"""
- return listitems(self._items)
+ return list(self._items.items())
def exists(self, path: str) -> bool:
"""Indicate whether this collection includes an item at `path`
@@ -548,7 +535,7 @@ class RichCollectionBase(CollectionBase):
item.remove(pathcomponents[1], recursive=recursive)
def _clonefrom(self, source):
- for k,v in listitems(source):
+ for k,v in source.items():
self._items[k] = v.clone(self, k)
def clone(self):
@@ -612,7 +599,7 @@ class RichCollectionBase(CollectionBase):
source_collection = self
# Find the object
- if isinstance(source, basestring):
+ if isinstance(source, str):
source_obj = source_collection.find(source)
if source_obj is None:
raise IOError(errno.ENOENT, "File not found", source)
@@ -803,11 +790,15 @@ class RichCollectionBase(CollectionBase):
loc = arvfile.parent._my_block_manager().get_bufferblock(loc).locator()
if strip:
loc = KeepLocator(loc).stripped()
- filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
- segment.segment_offset, segment.range_size))
+ filestream.append(streams.LocatorAndRange(
+ loc,
+ KeepLocator(loc).size,
+ segment.segment_offset,
+ segment.range_size,
+ ))
stream[filename] = filestream
if stream:
- buf.append(" ".join(normalize_stream(stream_name, stream)) + "\n")
+ buf.append(" ".join(streams.normalize_stream(stream_name, stream)) + "\n")
for dirname in [s for s in sorted_keys if isinstance(self[s], RichCollectionBase)]:
buf.append(self[dirname].manifest_text(
stream_name=os.path.join(stream_name, dirname),
@@ -924,9 +915,12 @@ class RichCollectionBase(CollectionBase):
# Overwrite path with new item; this can happen if
# path was a file and is now a collection or vice versa
self.copy(final, path, overwrite=True)
- else:
- # Local is missing (presumably deleted) or local doesn't
- # match the "start" value, so save change to conflict file
+ elif event_type == MOD:
+ # Local doesn't match the "start" value or local
+ # is missing (presumably deleted) so save change
+ # to conflict file. Don't do this for TOK events
+ # which means the file didn't change but only had
+ # tokens updated.
self.copy(final, conflictpath)
elif event_type == DEL:
if local == initial:
@@ -994,8 +988,13 @@ class RichCollectionBase(CollectionBase):
was modified.
* item: arvados.arvfile.ArvadosFile |
- arvados.collection.Subcollection --- The new contents at `name`
- within `collection`.
+ arvados.collection.Subcollection --- For ADD events, the new
+ contents at `name` within `collection`; for DEL events, the
+ item that was removed. For MOD and TOK events, a 2-tuple of
+ the previous item and the new item (may be the same object
+ or different, depending on whether the action involved it
+ being modified in place or replaced).
+
"""
if self._callback:
self._callback(event, collection, name, item)
@@ -1024,7 +1023,7 @@ class RichCollectionBase(CollectionBase):
@synchronized
def flush(self) -> None:
"""Upload any pending data to Keep"""
- for e in listvalues(self):
+ for e in self.values():
e.flush()
@@ -1065,7 +1064,7 @@ class Collection(RichCollectionBase):
settings from `apiconfig` (see below). If your client instantiates
many Collection objects, you can help limit memory utilization by
calling `arvados.api.api` to construct an
- `arvados.safeapi.ThreadSafeApiCache`, and use that as the `api_client`
+ `arvados.api.ThreadSafeAPIClient`, and use that as the `api_client`
for every Collection.
* keep_client: arvados.keep.KeepClient | None --- The Keep client
@@ -1117,8 +1116,8 @@ class Collection(RichCollectionBase):
self._api_client = api_client
self._keep_client = keep_client
- # Use the keep client from ThreadSafeApiCache
- if self._keep_client is None and isinstance(self._api_client, ThreadSafeApiCache):
+ # Use the keep client from ThreadSafeAPIClient
+ if self._keep_client is None and isinstance(self._api_client, ThreadSafeAPIClient):
self._keep_client = self._api_client.keep
self._block_manager = block_manager
@@ -1136,7 +1135,7 @@ class Collection(RichCollectionBase):
self._manifest_text = None
self._portable_data_hash = None
self._api_response = None
- self._past_versions = set()
+ self._token_refresh_timestamp = 0
self.lock = threading.RLock()
self.events = None
@@ -1202,20 +1201,6 @@ class Collection(RichCollectionBase):
def writable(self) -> bool:
return True
- @synchronized
- def known_past_version(
- self,
- modified_at_and_portable_data_hash: Tuple[Optional[str], Optional[str]]
- ) -> bool:
- """Indicate whether an API record for this collection has been seen before
-
- As this collection object loads records from the API server, it records
- their `modified_at` and `portable_data_hash` fields. This method accepts
- a 2-tuple with values for those fields, and returns `True` if the
- combination was previously loaded.
- """
- return modified_at_and_portable_data_hash in self._past_versions
-
@synchronized
@retry_method
def update(
@@ -1247,28 +1232,66 @@ class Collection(RichCollectionBase):
the collection's API record from the API server. If not specified,
uses the `num_retries` provided when this instance was constructed.
"""
+
+ token_refresh_period = 60*60
+ time_since_last_token_refresh = (time.time() - self._token_refresh_timestamp)
+ upstream_response = None
+
if other is None:
if self._manifest_locator is None:
raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
- response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)
- if (self.known_past_version((response.get("modified_at"), response.get("portable_data_hash"))) and
- response.get("portable_data_hash") != self.portable_data_hash()):
- # The record on the server is different from our current one, but we've seen it before,
- # so ignore it because it's already been merged.
- # However, if it's the same as our current record, proceed with the update, because we want to update
- # our tokens.
+
+ if re.match(arvados.util.portable_data_hash_pattern, self._manifest_locator) and time_since_last_token_refresh < token_refresh_period:
return
- else:
- self._remember_api_response(response)
- other = CollectionReader(response["manifest_text"])
- baseline = CollectionReader(self._manifest_text)
+
+ upstream_response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)
+ other = CollectionReader(upstream_response["manifest_text"])
+
+ if self.committed():
+ # 1st case, no local changes, content is the same
+ if self.portable_data_hash() == other.portable_data_hash() and time_since_last_token_refresh < token_refresh_period:
+ # No difference in content. Remember the API record
+ # (metadata such as name or properties may have changed)
+ # but don't update the token refresh timestamp.
+ if upstream_response is not None:
+ self._remember_api_response(upstream_response)
+ return
+
+ # 2nd case, no local changes, but either upstream changed
+ # or we want to refresh tokens.
+
+ self.apply(self.diff(other))
+ if upstream_response is not None:
+ self._remember_api_response(upstream_response)
+ self._update_token_timestamp()
+ self.set_committed(True)
+ return
+
+ # 3rd case, upstream changed, but we also have uncommitted
+ # changes that we want to incorporate so they don't get lost.
+
+ # _manifest_text stores the text from last time we received a
+ # record from the API server. This is the state of the
+ # collection before our uncommitted changes.
+ baseline = Collection(self._manifest_text)
+
+ # Get the set of changes between our baseline and the other
+ # collection and apply them to self.
+ #
+ # If a file was modified in both 'self' and 'other', the
+ # 'apply' method keeps the contents of 'self' and creates a
+ # conflict file with the contents of 'other'.
self.apply(baseline.diff(other))
- self._manifest_text = self.manifest_text()
+
+ # Remember the new baseline, changes to a file
+ if upstream_response is not None:
+ self._remember_api_response(upstream_response)
+
@synchronized
def _my_api(self):
if self._api_client is None:
- self._api_client = ThreadSafeApiCache(self._config, version='v1')
+ self._api_client = ThreadSafeAPIClient(self._config, version='v1')
if self._keep_client is None:
self._keep_client = self._api_client.keep
return self._api_client
@@ -1297,7 +1320,11 @@ class Collection(RichCollectionBase):
def _remember_api_response(self, response):
self._api_response = response
- self._past_versions.add((response.get("modified_at"), response.get("portable_data_hash")))
+ self._manifest_text = self._api_response['manifest_text']
+ self._portable_data_hash = self._api_response['portable_data_hash']
+
+ def _update_token_timestamp(self):
+ self._token_refresh_timestamp = time.time()
def _populate_from_api_server(self):
# As in KeepClient itself, we must wait until the last
@@ -1310,8 +1337,7 @@ class Collection(RichCollectionBase):
self._remember_api_response(self._my_api().collections().get(
uuid=self._manifest_locator).execute(
num_retries=self.num_retries))
- self._manifest_text = self._api_response['manifest_text']
- self._portable_data_hash = self._api_response['portable_data_hash']
+
# If not overriden via kwargs, we should try to load the
# replication_desired and storage_classes_desired from the API server
if self.replication_desired is None:
@@ -1536,8 +1562,6 @@ class Collection(RichCollectionBase):
uuid=self._manifest_locator,
body=body
).execute(num_retries=num_retries))
- self._manifest_text = self._api_response["manifest_text"]
- self._portable_data_hash = self._api_response["portable_data_hash"]
self.set_committed(True)
elif body:
self._remember_api_response(self._my_api().collections().update(
@@ -1656,12 +1680,7 @@ class Collection(RichCollectionBase):
body["preserve_version"] = preserve_version
self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
- text = self._api_response["manifest_text"]
-
self._manifest_locator = self._api_response["uuid"]
- self._portable_data_hash = self._api_response["portable_data_hash"]
-
- self._manifest_text = text
self.set_committed(True)
return text
@@ -1709,7 +1728,7 @@ class Collection(RichCollectionBase):
block_locator = self._block_re.match(tok)
if block_locator:
blocksize = int(block_locator.group(1))
- blocks.append(Range(tok, streamoffset, blocksize, 0))
+ blocks.append(streams.Range(tok, streamoffset, blocksize, 0))
streamoffset += blocksize
else:
state = SEGMENTS
@@ -1745,6 +1764,7 @@ class Collection(RichCollectionBase):
stream_name = None
state = STREAM_NAME
+ self._update_token_timestamp()
self.set_committed(True)
@synchronized
@@ -1818,7 +1838,7 @@ class Subcollection(RichCollectionBase):
"""Encode empty directories by using an \056-named (".") empty file"""
if len(self._items) == 0:
return "%s %s 0:0:\\056\n" % (
- escape(stream_name), config.EMPTY_BLOCK_LOCATOR)
+ streams.escape(stream_name), config.EMPTY_BLOCK_LOCATOR)
return super(Subcollection, self)._get_manifest_text(stream_name,
strip, normalize,
only_committed)
@@ -1845,468 +1865,3 @@ class CollectionReader(Collection):
def writable(self) -> bool:
return self._in_init
-
- def _populate_streams(orig_func):
- @functools.wraps(orig_func)
- def populate_streams_wrapper(self, *args, **kwargs):
- # Defer populating self._streams until needed since it creates a copy of the manifest.
- if self._streams is None:
- if self._manifest_text:
- self._streams = [sline.split()
- for sline in self._manifest_text.split("\n")
- if sline]
- else:
- self._streams = []
- return orig_func(self, *args, **kwargs)
- return populate_streams_wrapper
-
- @arvados.util._deprecated('3.0', 'Collection iteration')
- @_populate_streams
- def normalize(self):
- """Normalize the streams returned by `all_streams`"""
- streams = {}
- for s in self.all_streams():
- for f in s.all_files():
- streamname, filename = split(s.name() + "/" + f.name())
- if streamname not in streams:
- streams[streamname] = {}
- if filename not in streams[streamname]:
- streams[streamname][filename] = []
- for r in f.segments:
- streams[streamname][filename].extend(s.locators_and_ranges(r.locator, r.range_size))
-
- self._streams = [normalize_stream(s, streams[s])
- for s in sorted(streams)]
-
- @arvados.util._deprecated('3.0', 'Collection iteration')
- @_populate_streams
- def all_streams(self):
- return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
- for s in self._streams]
-
- @arvados.util._deprecated('3.0', 'Collection iteration')
- @_populate_streams
- def all_files(self):
- for s in self.all_streams():
- for f in s.all_files():
- yield f
-
-
-class CollectionWriter(CollectionBase):
- """Create a new collection from scratch
-
- .. WARNING:: Deprecated
- This class is deprecated. Prefer `arvados.collection.Collection`
- instead.
- """
-
- @arvados.util._deprecated('3.0', 'arvados.collection.Collection')
- def __init__(self, api_client=None, num_retries=0, replication=None):
- """Instantiate a CollectionWriter.
-
- CollectionWriter lets you build a new Arvados Collection from scratch.
- Write files to it. The CollectionWriter will upload data to Keep as
- appropriate, and provide you with the Collection manifest text when
- you're finished.
-
- Arguments:
- * api_client: The API client to use to look up Collections. If not
- provided, CollectionReader will build one from available Arvados
- configuration.
- * num_retries: The default number of times to retry failed
- service requests. Default 0. You may change this value
- after instantiation, but note those changes may not
- propagate to related objects like the Keep client.
- * replication: The number of copies of each block to store.
- If this argument is None or not supplied, replication is
- the server-provided default if available, otherwise 2.
- """
- self._api_client = api_client
- self.num_retries = num_retries
- self.replication = (2 if replication is None else replication)
- self._keep_client = None
- self._data_buffer = []
- self._data_buffer_len = 0
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = '.'
- self._current_file_name = None
- self._current_file_pos = 0
- self._finished_streams = []
- self._close_file = None
- self._queued_file = None
- self._queued_dirents = deque()
- self._queued_trees = deque()
- self._last_open = None
-
- def __exit__(self, exc_type, exc_value, traceback):
- if exc_type is None:
- self.finish()
-
- def do_queued_work(self):
- # The work queue consists of three pieces:
- # * _queued_file: The file object we're currently writing to the
- # Collection.
- # * _queued_dirents: Entries under the current directory
- # (_queued_trees[0]) that we want to write or recurse through.
- # This may contain files from subdirectories if
- # max_manifest_depth == 0 for this directory.
- # * _queued_trees: Directories that should be written as separate
- # streams to the Collection.
- # This function handles the smallest piece of work currently queued
- # (current file, then current directory, then next directory) until
- # no work remains. The _work_THING methods each do a unit of work on
- # THING. _queue_THING methods add a THING to the work queue.
- while True:
- if self._queued_file:
- self._work_file()
- elif self._queued_dirents:
- self._work_dirents()
- elif self._queued_trees:
- self._work_trees()
- else:
- break
-
- def _work_file(self):
- while True:
- buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
- if not buf:
- break
- self.write(buf)
- self.finish_current_file()
- if self._close_file:
- self._queued_file.close()
- self._close_file = None
- self._queued_file = None
-
- def _work_dirents(self):
- path, stream_name, max_manifest_depth = self._queued_trees[0]
- if stream_name != self.current_stream_name():
- self.start_new_stream(stream_name)
- while self._queued_dirents:
- dirent = self._queued_dirents.popleft()
- target = os.path.join(path, dirent)
- if os.path.isdir(target):
- self._queue_tree(target,
- os.path.join(stream_name, dirent),
- max_manifest_depth - 1)
- else:
- self._queue_file(target, dirent)
- break
- if not self._queued_dirents:
- self._queued_trees.popleft()
-
- def _work_trees(self):
- path, stream_name, max_manifest_depth = self._queued_trees[0]
- d = arvados.util.listdir_recursive(
- path, max_depth = (None if max_manifest_depth == 0 else 0))
- if d:
- self._queue_dirents(stream_name, d)
- else:
- self._queued_trees.popleft()
-
- def _queue_file(self, source, filename=None):
- assert (self._queued_file is None), "tried to queue more than one file"
- if not hasattr(source, 'read'):
- source = open(source, 'rb')
- self._close_file = True
- else:
- self._close_file = False
- if filename is None:
- filename = os.path.basename(source.name)
- self.start_new_file(filename)
- self._queued_file = source
-
- def _queue_dirents(self, stream_name, dirents):
- assert (not self._queued_dirents), "tried to queue more than one tree"
- self._queued_dirents = deque(sorted(dirents))
-
- def _queue_tree(self, path, stream_name, max_manifest_depth):
- self._queued_trees.append((path, stream_name, max_manifest_depth))
-
- def write_file(self, source, filename=None):
- self._queue_file(source, filename)
- self.do_queued_work()
-
- def write_directory_tree(self,
- path, stream_name='.', max_manifest_depth=-1):
- self._queue_tree(path, stream_name, max_manifest_depth)
- self.do_queued_work()
-
- def write(self, newdata):
- if isinstance(newdata, bytes):
- pass
- elif isinstance(newdata, str):
- newdata = newdata.encode()
- elif hasattr(newdata, '__iter__'):
- for s in newdata:
- self.write(s)
- return
- self._data_buffer.append(newdata)
- self._data_buffer_len += len(newdata)
- self._current_stream_length += len(newdata)
- while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
- self.flush_data()
-
- def open(self, streampath, filename=None):
- """open(streampath[, filename]) -> file-like object
-
- Pass in the path of a file to write to the Collection, either as a
- single string or as two separate stream name and file name arguments.
- This method returns a file-like object you can write to add it to the
- Collection.
-
- You may only have one file object from the Collection open at a time,
- so be sure to close the object when you're done. Using the object in
- a with statement makes that easy:
-
- with cwriter.open('./doc/page1.txt') as outfile:
- outfile.write(page1_data)
- with cwriter.open('./doc/page2.txt') as outfile:
- outfile.write(page2_data)
- """
- if filename is None:
- streampath, filename = split(streampath)
- if self._last_open and not self._last_open.closed:
- raise errors.AssertionError(
- u"can't open '{}' when '{}' is still open".format(
- filename, self._last_open.name))
- if streampath != self.current_stream_name():
- self.start_new_stream(streampath)
- self.set_current_file_name(filename)
- self._last_open = _WriterFile(self, filename)
- return self._last_open
-
- def flush_data(self):
- data_buffer = b''.join(self._data_buffer)
- if data_buffer:
- self._current_stream_locators.append(
- self._my_keep().put(
- data_buffer[0:config.KEEP_BLOCK_SIZE],
- copies=self.replication))
- self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
- self._data_buffer_len = len(self._data_buffer[0])
-
- def start_new_file(self, newfilename=None):
- self.finish_current_file()
- self.set_current_file_name(newfilename)
-
- def set_current_file_name(self, newfilename):
- if re.search(r'[\t\n]', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain whitespace: %s" %
- newfilename)
- elif re.search(r'\x00', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain NUL characters: %s" %
- newfilename)
- self._current_file_name = newfilename
-
- def current_file_name(self):
- return self._current_file_name
-
- def finish_current_file(self):
- if self._current_file_name is None:
- if self._current_file_pos == self._current_stream_length:
- return
- raise errors.AssertionError(
- "Cannot finish an unnamed file " +
- "(%d bytes at offset %d in '%s' stream)" %
- (self._current_stream_length - self._current_file_pos,
- self._current_file_pos,
- self._current_stream_name))
- self._current_stream_files.append([
- self._current_file_pos,
- self._current_stream_length - self._current_file_pos,
- self._current_file_name])
- self._current_file_pos = self._current_stream_length
- self._current_file_name = None
-
- def start_new_stream(self, newstreamname='.'):
- self.finish_current_stream()
- self.set_current_stream_name(newstreamname)
-
- def set_current_stream_name(self, newstreamname):
- if re.search(r'[\t\n]', newstreamname):
- raise errors.AssertionError(
- "Manifest stream names cannot contain whitespace: '%s'" %
- (newstreamname))
- self._current_stream_name = '.' if newstreamname=='' else newstreamname
-
- def current_stream_name(self):
- return self._current_stream_name
-
- def finish_current_stream(self):
- self.finish_current_file()
- self.flush_data()
- if not self._current_stream_files:
- pass
- elif self._current_stream_name is None:
- raise errors.AssertionError(
- "Cannot finish an unnamed stream (%d bytes in %d files)" %
- (self._current_stream_length, len(self._current_stream_files)))
- else:
- if not self._current_stream_locators:
- self._current_stream_locators.append(config.EMPTY_BLOCK_LOCATOR)
- self._finished_streams.append([self._current_stream_name,
- self._current_stream_locators,
- self._current_stream_files])
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = None
- self._current_file_pos = 0
- self._current_file_name = None
-
- def finish(self):
- """Store the manifest in Keep and return its locator.
-
- This is useful for storing manifest fragments (task outputs)
- temporarily in Keep during a Crunch job.
-
- In other cases you should make a collection instead, by
- sending manifest_text() to the API server's "create
- collection" endpoint.
- """
- return self._my_keep().put(self.manifest_text().encode(),
- copies=self.replication)
-
- def portable_data_hash(self):
- stripped = self.stripped_manifest().encode()
- return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
-
- def manifest_text(self):
- self.finish_current_stream()
- manifest = ''
-
- for stream in self._finished_streams:
- if not re.search(r'^\.(/.*)?$', stream[0]):
- manifest += './'
- manifest += stream[0].replace(' ', '\\040')
- manifest += ' ' + ' '.join(stream[1])
- manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
- manifest += "\n"
-
- return manifest
-
- def data_locators(self):
- ret = []
- for name, locators, files in self._finished_streams:
- ret += locators
- return ret
-
- def save_new(self, name=None):
- return self._api_client.collections().create(
- ensure_unique_name=True,
- body={
- 'name': name,
- 'manifest_text': self.manifest_text(),
- }).execute(num_retries=self.num_retries)
-
-
-class ResumableCollectionWriter(CollectionWriter):
- """CollectionWriter that can serialize internal state to disk
-
- .. WARNING:: Deprecated
- This class is deprecated. Prefer `arvados.collection.Collection`
- instead.
- """
-
- STATE_PROPS = ['_current_stream_files', '_current_stream_length',
- '_current_stream_locators', '_current_stream_name',
- '_current_file_name', '_current_file_pos', '_close_file',
- '_data_buffer', '_dependencies', '_finished_streams',
- '_queued_dirents', '_queued_trees']
-
- @arvados.util._deprecated('3.0', 'arvados.collection.Collection')
- def __init__(self, api_client=None, **kwargs):
- self._dependencies = {}
- super(ResumableCollectionWriter, self).__init__(api_client, **kwargs)
-
- @classmethod
- def from_state(cls, state, *init_args, **init_kwargs):
- # Try to build a new writer from scratch with the given state.
- # If the state is not suitable to resume (because files have changed,
- # been deleted, aren't predictable, etc.), raise a
- # StaleWriterStateError. Otherwise, return the initialized writer.
- # The caller is responsible for calling writer.do_queued_work()
- # appropriately after it's returned.
- writer = cls(*init_args, **init_kwargs)
- for attr_name in cls.STATE_PROPS:
- attr_value = state[attr_name]
- attr_class = getattr(writer, attr_name).__class__
- # Coerce the value into the same type as the initial value, if
- # needed.
- if attr_class not in (type(None), attr_value.__class__):
- attr_value = attr_class(attr_value)
- setattr(writer, attr_name, attr_value)
- # Check dependencies before we try to resume anything.
- if any(KeepLocator(ls).permission_expired()
- for ls in writer._current_stream_locators):
- raise errors.StaleWriterStateError(
- "locators include expired permission hint")
- writer.check_dependencies()
- if state['_current_file'] is not None:
- path, pos = state['_current_file']
- try:
- writer._queued_file = open(path, 'rb')
- writer._queued_file.seek(pos)
- except IOError as error:
- raise errors.StaleWriterStateError(
- u"failed to reopen active file {}: {}".format(path, error))
- return writer
-
- def check_dependencies(self):
- for path, orig_stat in listitems(self._dependencies):
- if not S_ISREG(orig_stat[ST_MODE]):
- raise errors.StaleWriterStateError(u"{} not file".format(path))
- try:
- now_stat = tuple(os.stat(path))
- except OSError as error:
- raise errors.StaleWriterStateError(
- u"failed to stat {}: {}".format(path, error))
- if ((not S_ISREG(now_stat[ST_MODE])) or
- (orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
- (orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
- raise errors.StaleWriterStateError(u"{} changed".format(path))
-
- def dump_state(self, copy_func=lambda x: x):
- state = {attr: copy_func(getattr(self, attr))
- for attr in self.STATE_PROPS}
- if self._queued_file is None:
- state['_current_file'] = None
- else:
- state['_current_file'] = (os.path.realpath(self._queued_file.name),
- self._queued_file.tell())
- return state
-
- def _queue_file(self, source, filename=None):
- try:
- src_path = os.path.realpath(source)
- except Exception:
- raise errors.AssertionError(u"{} not a file path".format(source))
- try:
- path_stat = os.stat(src_path)
- except OSError as stat_error:
- path_stat = None
- super(ResumableCollectionWriter, self)._queue_file(source, filename)
- fd_stat = os.fstat(self._queued_file.fileno())
- if not S_ISREG(fd_stat.st_mode):
- # We won't be able to resume from this cache anyway, so don't
- # worry about further checks.
- self._dependencies[source] = tuple(fd_stat)
- elif path_stat is None:
- raise errors.AssertionError(
- u"could not stat {}: {}".format(source, stat_error))
- elif path_stat.st_ino != fd_stat.st_ino:
- raise errors.AssertionError(
- u"{} changed between open and stat calls".format(source))
- else:
- self._dependencies[src_path] = tuple(fd_stat)
-
- def write(self, data):
- if self._queued_file is None:
- raise errors.AssertionError(
- "resumable writer can't accept unsourced data")
- return super(ResumableCollectionWriter, self).write(data)
diff --git a/sdk/python/arvados/commands/__init__.py b/sdk/python/arvados/commands/__init__.py
index e69de29bb2..2ec1a43dff 100644
--- a/sdk/python/arvados/commands/__init__.py
+++ b/sdk/python/arvados/commands/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+"""Arvados CLI commands
+
+This module implements the CLI tools that are shipped with the Arvados Python
+SDK. Nothing in this module is intended to be part of the public-facing
+SDK API. Classes and functions in this module may be changed or removed at any
+time.
+
+@private
+"""
diff --git a/sdk/python/arvados/commands/_util.py b/sdk/python/arvados/commands/_util.py
index 6c792b2e0d..aff9689971 100644
--- a/sdk/python/arvados/commands/_util.py
+++ b/sdk/python/arvados/commands/_util.py
@@ -3,14 +3,20 @@
# SPDX-License-Identifier: Apache-2.0
import argparse
+import dataclasses
import errno
import json
import logging
+import operator
import os
import re
import signal
import sys
+import typing as t
+
+from .. import _internal
+
FILTER_STR_RE = re.compile(r'''
^\(
\ *(\w+)
@@ -19,16 +25,41 @@ FILTER_STR_RE = re.compile(r'''
\ *\)$
''', re.ASCII | re.VERBOSE)
-def _pos_int(s):
- num = int(s)
- if num < 0:
- raise ValueError("can't accept negative value: %s" % (num,))
- return num
+T = t.TypeVar('T')
+
+@dataclasses.dataclass(unsafe_hash=True)
+class RangedValue(t.Generic[T]):
+ """Validate that an argument string is within a valid range of values"""
+ parse_func: t.Callable[[str], T]
+ valid_range: t.Container[T]
+
+ def __call__(self, s: str) -> T:
+ value = self.parse_func(s)
+ if value in self.valid_range:
+ return value
+ else:
+ raise ValueError(f"{value!r} is not a valid value")
+
+
+@dataclasses.dataclass(unsafe_hash=True)
+class UniqueSplit(t.Generic[T]):
+ """Parse a string into a list of unique values"""
+ split: t.Callable[[str], t.Iterable[str]]=operator.methodcaller('split', ',')
+ clean: t.Callable[[str], str]=operator.methodcaller('strip')
+ check: t.Callable[[str], bool]=bool
+
+ def __call__(self, s: str) -> T:
+ return list(_internal.uniq(_internal.parse_seq(s, self.split, self.clean, self.check)))
+
retry_opt = argparse.ArgumentParser(add_help=False)
-retry_opt.add_argument('--retries', type=_pos_int, default=10, help="""
-Maximum number of times to retry server requests that encounter temporary
-failures (e.g., server down). Default 10.""")
+retry_opt.add_argument(
+ '--retries',
+ type=RangedValue(int, range(0, sys.maxsize)),
+ default=10,
+ help="""Maximum number of times to retry server requests that encounter
+temporary failures (e.g., server down). Default %(default)r.
+""")
def _ignore_error(error):
return None
@@ -36,28 +67,6 @@ def _ignore_error(error):
def _raise_error(error):
raise error
-def make_home_conf_dir(path, mode=None, errors='ignore'):
- # Make the directory path under the user's home directory, making parent
- # directories as needed.
- # If the directory is newly created, and a mode is specified, chmod it
- # with those permissions.
- # If there's an error, return None if errors is 'ignore', else raise an
- # exception.
- error_handler = _ignore_error if (errors == 'ignore') else _raise_error
- tilde_path = os.path.join('~', path)
- abs_path = os.path.expanduser(tilde_path)
- if abs_path == tilde_path:
- return error_handler(ValueError("no home directory available"))
- try:
- os.makedirs(abs_path)
- except OSError as error:
- if error.errno != errno.EEXIST:
- return error_handler(error)
- else:
- if mode is not None:
- os.chmod(abs_path, mode)
- return abs_path
-
CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
def exit_signal_handler(sigcode, frame):
diff --git a/sdk/python/arvados/commands/arv_copy.py b/sdk/python/arvados/commands/arv_copy.py
index 7f5245db86..9914cc3ef6 100755
--- a/sdk/python/arvados/commands/arv_copy.py
+++ b/sdk/python/arvados/commands/arv_copy.py
@@ -13,17 +13,11 @@
# --no-recursive is given, arv-copy copies only the single record
# identified by object-uuid.
#
-# The user must have files $HOME/.config/arvados/{src}.conf and
-# $HOME/.config/arvados/{dst}.conf with valid login credentials for
-# instances src and dst. If either of these files is not found,
+# The user must have configuration files {src}.conf and
+# {dst}.conf in a standard configuration directory with valid login credentials
+# for instances src and dst. If either of these files is not found,
# arv-copy will issue an error.
-from __future__ import division
-from future import standard_library
-from future.utils import listvalues
-standard_library.install_aliases()
-from past.builtins import basestring
-from builtins import object
import argparse
import contextlib
import getpass
@@ -39,6 +33,10 @@ import io
import json
import queue
import threading
+import errno
+
+import httplib2.error
+import googleapiclient
import arvados
import arvados.config
@@ -46,15 +44,20 @@ import arvados.keep
import arvados.util
import arvados.commands._util as arv_cmd
import arvados.commands.keepdocker
-import arvados.http_to_keep
-import ruamel.yaml as yaml
+from arvados.logging import log_handler
+from arvados._internal import basedirs, http_to_keep, s3_to_keep, to_keep_util
from arvados._version import __version__
COMMIT_HASH_RE = re.compile(r'^[0-9a-f]{1,40}$')
+arvlogger = logging.getLogger('arvados')
+keeplogger = logging.getLogger('arvados.keep')
logger = logging.getLogger('arvados.arv-copy')
+# Set this up so connection errors get logged.
+googleapi_logger = logging.getLogger('googleapiclient.http')
+
# local_repo_dir records which git repositories from the Arvados source
# instance have been checked out locally during this run, and to which
# directories.
@@ -93,10 +96,22 @@ def main():
help='Perform copy even if the object appears to exist at the remote destination.')
copy_opts.add_argument(
'--src', dest='source_arvados',
- help='The cluster id of the source Arvados instance. May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf. If not provided, will be inferred from the UUID of the object being copied.')
+ help="""
+Client configuration location for the source Arvados cluster.
+May be either a configuration file path, or a plain identifier like `foo`
+to search for a configuration file `foo.conf` under a systemd or XDG configuration directory.
+If not provided, will search for a configuration file named after the cluster ID of the source object UUID.
+""",
+ )
copy_opts.add_argument(
'--dst', dest='destination_arvados',
- help='The name of the destination Arvados instance (required). May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf. If not provided, will use ARVADOS_API_HOST from environment.')
+ help="""
+Client configuration location for the destination Arvados cluster.
+May be either a configuration file path, or a plain identifier like `foo`
+to search for a configuration file `foo.conf` under a systemd or XDG configuration directory.
+If not provided, will use the default client configuration from the environment or `settings.conf`.
+""",
+ )
copy_opts.add_argument(
'--recursive', dest='recursive', action='store_true',
help='Recursively copy any dependencies for this object, and subprojects. (default)')
@@ -107,7 +122,18 @@ def main():
'--project-uuid', dest='project_uuid',
help='The UUID of the project at the destination to which the collection or workflow should be copied.')
copy_opts.add_argument(
- '--storage-classes', dest='storage_classes',
+ '--replication',
+ type=arv_cmd.RangedValue(int, range(1, sys.maxsize)),
+ metavar='N',
+ help="""
+Number of replicas per storage class for the copied collections at the destination.
+If not provided (or if provided with invalid value),
+use the destination's default replication-level setting (if found),
+or the fallback value 2.
+""")
+ copy_opts.add_argument(
+ '--storage-classes',
+ type=arv_cmd.UniqueSplit(),
help='Comma separated list of storage classes to be used when saving data to the destinaton Arvados instance.')
copy_opts.add_argument("--varying-url-params", type=str, default="",
help="A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.")
@@ -126,21 +152,38 @@ def main():
parents=[copy_opts, arv_cmd.retry_opt])
args = parser.parse_args()
- if args.storage_classes:
- args.storage_classes = [x for x in args.storage_classes.strip().replace(' ', '').split(',') if x]
-
if args.verbose:
- logger.setLevel(logging.DEBUG)
+ arvlogger.setLevel(logging.DEBUG)
else:
- logger.setLevel(logging.INFO)
+ arvlogger.setLevel(logging.INFO)
+ keeplogger.setLevel(logging.WARNING)
if not args.source_arvados and arvados.util.uuid_pattern.match(args.object_uuid):
args.source_arvados = args.object_uuid[:5]
+ if not args.destination_arvados and args.project_uuid:
+ args.destination_arvados = args.project_uuid[:5]
+
+ # Make sure errors trying to connect to clusters get logged.
+ googleapi_logger.setLevel(logging.WARN)
+ googleapi_logger.addHandler(log_handler)
+
# Create API clients for the source and destination instances
src_arv = api_for_instance(args.source_arvados, args.retries)
dst_arv = api_for_instance(args.destination_arvados, args.retries)
+ # Once we've successfully contacted the clusters, we probably
+ # don't want to see logging about retries (unless the user asked
+ # for verbose output).
+ if not args.verbose:
+ googleapi_logger.setLevel(logging.ERROR)
+
+ if src_arv.config()["ClusterID"] == dst_arv.config()["ClusterID"]:
+ logger.info("Copying within cluster %s", src_arv.config()["ClusterID"])
+ else:
+ logger.info("Source cluster is %s", src_arv.config()["ClusterID"])
+ logger.info("Destination cluster is %s", dst_arv.config()["ClusterID"])
+
if not args.project_uuid:
args.project_uuid = dst_arv.users().current().execute(num_retries=args.retries)["uuid"]
@@ -159,8 +202,8 @@ def main():
elif t == 'Group':
set_src_owner_uuid(src_arv.groups(), args.object_uuid, args)
result = copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args)
- elif t == 'httpURL':
- result = copy_from_http(args.object_uuid, src_arv, dst_arv, args)
+ elif t == 'httpURL' or t == 's3URL':
+ result = copy_from_url(args.object_uuid, src_arv, dst_arv, args)
else:
abort("cannot copy object {} of type {}".format(args.object_uuid, t))
except Exception as e:
@@ -168,7 +211,7 @@ def main():
exit(1)
# Clean up any outstanding temp git repositories.
- for d in listvalues(local_repo_dir):
+ for d in local_repo_dir.values():
shutil.rmtree(d, ignore_errors=True)
if not result:
@@ -204,41 +247,68 @@ def set_src_owner_uuid(resource, uuid, args):
# (either local or absolute) to a file with Arvados configuration
# settings.
#
-# Otherwise, it is presumed to be the name of a file in
-# $HOME/.config/arvados/instance_name.conf
+# Otherwise, it is presumed to be the name of a file in a standard
+# configuration directory.
#
def api_for_instance(instance_name, num_retries):
- if not instance_name:
- # Use environment
- return arvados.api('v1')
-
- if '/' in instance_name:
- config_file = instance_name
- else:
- config_file = os.path.join(os.environ['HOME'], '.config', 'arvados', "{}.conf".format(instance_name))
+ msg = []
+ if instance_name:
+ if '/' in instance_name:
+ config_file = instance_name
+ else:
+ dirs = basedirs.BaseDirectories('CONFIG')
+ config_file = next(dirs.search(f'{instance_name}.conf'), '')
+ try:
+ cfg = arvados.config.load(config_file)
+
+ if 'ARVADOS_API_HOST' in cfg and 'ARVADOS_API_TOKEN' in cfg:
+ api_is_insecure = (
+ cfg.get('ARVADOS_API_HOST_INSECURE', '').lower() in set(
+ ['1', 't', 'true', 'y', 'yes']))
+ return arvados.api('v1',
+ host=cfg['ARVADOS_API_HOST'],
+ token=cfg['ARVADOS_API_TOKEN'],
+ insecure=api_is_insecure,
+ num_retries=num_retries,
+ )
+ else:
+ msg.append('missing ARVADOS_API_HOST or ARVADOS_API_TOKEN for {} in config file {}'.format(instance_name, config_file))
+ except OSError as e:
+ if e.errno in (errno.EHOSTUNREACH, errno.ECONNREFUSED, errno.ECONNRESET, errno.ENETUNREACH):
+ verb = 'connect to instance from'
+ elif config_file:
+ verb = 'open'
+ else:
+ verb = 'find'
+ searchlist = ":".join(str(p) for p in dirs.search_paths())
+ config_file = f'{instance_name}.conf in path {searchlist}'
+ msg.append(("Could not {} config file {}: {}").format(
+ verb, config_file, e.strerror))
+ except (httplib2.error.HttpLib2Error, googleapiclient.errors.Error) as e:
+ msg.append("Failed to connect to instance {} at {}, error was {}".format(instance_name, cfg['ARVADOS_API_HOST'], e))
+
+ default_api = None
+ default_instance = None
try:
- cfg = arvados.config.load(config_file)
- except (IOError, OSError) as e:
- abort(("Could not open config file {}: {}\n" +
- "You must make sure that your configuration tokens\n" +
- "for Arvados instance {} are in {} and that this\n" +
- "file is readable.").format(
- config_file, e, instance_name, config_file))
-
- if 'ARVADOS_API_HOST' in cfg and 'ARVADOS_API_TOKEN' in cfg:
- api_is_insecure = (
- cfg.get('ARVADOS_API_HOST_INSECURE', '').lower() in set(
- ['1', 't', 'true', 'y', 'yes']))
- client = arvados.api('v1',
- host=cfg['ARVADOS_API_HOST'],
- token=cfg['ARVADOS_API_TOKEN'],
- insecure=api_is_insecure,
- num_retries=num_retries,
- )
- else:
- abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
- return client
+ default_api = arvados.api('v1', num_retries=num_retries)
+ default_instance = default_api.config()["ClusterID"]
+ except ValueError:
+ pass
+ except (httplib2.error.HttpLib2Error, googleapiclient.errors.Error, OSError) as e:
+ msg.append("Failed to connect to default instance, error was {}".format(e))
+
+ if default_api is not None and (not instance_name or instance_name == default_instance):
+ # Use default settings
+ return default_api
+
+ if instance_name and default_instance and instance_name != default_instance:
+ msg.append("Default credentials are for {} but need to connect to {}".format(default_instance, instance_name))
+
+ for m in msg:
+ logger.error(m)
+
+ abort('Unable to find usable ARVADOS_API_HOST and ARVADOS_API_TOKEN')
# Check if git is available
def check_git_availability():
@@ -258,10 +328,10 @@ def filter_iter(arg):
Pass in a filter field that can either be a string or list.
This will iterate elements as if the field had been written as a list.
"""
- if isinstance(arg, basestring):
- return iter((arg,))
+ if isinstance(arg, str):
+ yield arg
else:
- return iter(arg)
+ yield from arg
def migrate_repository_filter(repo_filter, src_repository, dst_repository):
"""Update a single repository filter in-place for the destination.
@@ -331,8 +401,12 @@ def copy_workflow(wf_uuid, src, dst, args):
"ARVADOS_API_TOKEN": src.api_token,
"PATH": os.environ["PATH"]}
try:
- result = subprocess.run(["arvados-cwl-runner", "--quiet", "--print-keep-deps", "arvwf:"+wf_uuid],
- capture_output=True, env=env)
+ result = subprocess.run(
+ ["arvados-cwl-runner", "--quiet", "--print-keep-deps", "arvwf:"+wf_uuid],
+ env=env,
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ )
except FileNotFoundError:
no_arv_copy = True
else:
@@ -409,7 +483,7 @@ def copy_collections(obj, src, dst, args):
collections_copied[src_id] = dst_col['uuid']
return collections_copied[src_id]
- if isinstance(obj, basestring):
+ if isinstance(obj, str):
# Copy any collections identified in this string to dst, replacing
# them with the dst uuids as necessary.
obj = arvados.util.portable_data_hash_pattern.sub(copy_collection_fn, obj)
@@ -572,6 +646,14 @@ def copy_collection(obj_uuid, src, dst, args):
).execute(num_retries=args.retries)['manifest_text']
return create_collection_from(c, src, dst, args)
+ if args.replication is None:
+ # Obtain default or fallback collection replication setting on the
+ # destination
+ try:
+ args.replication = int(dst.config()["Collections"]["DefaultReplication"])
+ except (KeyError, TypeError, ValueError):
+ args.replication = 2
+
# Fetch the collection's manifest.
manifest = c['manifest_text']
logger.debug("Copying collection %s with manifest: <%s>", obj_uuid, manifest)
@@ -631,7 +713,7 @@ def copy_collection(obj_uuid, src, dst, args):
logger.debug("Getting block %s", word)
data = src_keep.get(word)
put_queue.put((word, data))
- except e:
+ except Exception as e:
logger.error("Error getting block %s: %s", word, e)
transfer_error.append(e)
try:
@@ -663,13 +745,13 @@ def copy_collection(obj_uuid, src, dst, args):
try:
logger.debug("Putting block %s (%s bytes)", blockhash, loc.size)
- dst_locator = dst_keep.put(data, classes=(args.storage_classes or []))
+ dst_locator = dst_keep.put(data, copies=args.replication, classes=(args.storage_classes or []))
with lock:
dst_locators[blockhash] = dst_locator
bytes_written += loc.size
if progress_writer:
progress_writer.report(obj_uuid, bytes_written, bytes_expected)
- except e:
+ except Exception as e:
logger.error("Error putting block %s (%s bytes): %s", blockhash, loc.size, e)
try:
# Drain the 'get' queue so we end early
@@ -736,58 +818,6 @@ def copy_collection(obj_uuid, src, dst, args):
c['manifest_text'] = dst_manifest.getvalue()
return create_collection_from(c, src, dst, args)
-def select_git_url(api, repo_name, retries, allow_insecure_http, allow_insecure_http_opt):
- r = api.repositories().list(
- filters=[['name', '=', repo_name]]).execute(num_retries=retries)
- if r['items_available'] != 1:
- raise Exception('cannot identify repo {}; {} repos found'
- .format(repo_name, r['items_available']))
-
- https_url = [c for c in r['items'][0]["clone_urls"] if c.startswith("https:")]
- http_url = [c for c in r['items'][0]["clone_urls"] if c.startswith("http:")]
- other_url = [c for c in r['items'][0]["clone_urls"] if not c.startswith("http")]
-
- priority = https_url + other_url + http_url
-
- for url in priority:
- if url.startswith("http"):
- u = urllib.parse.urlsplit(url)
- baseurl = urllib.parse.urlunsplit((u.scheme, u.netloc, "", "", ""))
- git_config = ["-c", "credential.%s/.username=none" % baseurl,
- "-c", "credential.%s/.helper=!cred(){ cat >/dev/null; if [ \"$1\" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred" % baseurl]
- else:
- git_config = []
-
- try:
- logger.debug("trying %s", url)
- subprocess.run(
- ['git', *git_config, 'ls-remote', url],
- check=True,
- env={
- 'ARVADOS_API_TOKEN': api.api_token,
- 'GIT_ASKPASS': '/bin/false',
- 'HOME': os.environ['HOME'],
- },
- stdout=subprocess.DEVNULL,
- )
- except subprocess.CalledProcessError:
- pass
- else:
- git_url = url
- break
- else:
- raise Exception('Cannot access git repository, tried {}'
- .format(priority))
-
- if git_url.startswith("http:"):
- if allow_insecure_http:
- logger.warning("Using insecure git url %s but will allow this because %s", git_url, allow_insecure_http_opt)
- else:
- raise Exception("Refusing to use insecure git url %s, use %s if you really want this." % (git_url, allow_insecure_http_opt))
-
- return (git_url, git_config)
-
-
def copy_docker_image(docker_image, docker_image_tag, src, dst, args):
"""Copy the docker image identified by docker_image and
docker_image_tag from src to dst. Create appropriate
@@ -894,6 +924,9 @@ def uuid_type(api, object_uuid):
if object_uuid.startswith("http:") or object_uuid.startswith("https:"):
return 'httpURL'
+ if object_uuid.startswith("s3:"):
+ return 's3URL'
+
p = object_uuid.split('-')
if len(p) == 3:
type_prefix = p[1]
@@ -904,21 +937,34 @@ def uuid_type(api, object_uuid):
return None
-def copy_from_http(url, src, dst, args):
+def copy_from_url(url, src, dst, args):
project_uuid = args.project_uuid
- varying_url_params = args.varying_url_params
+ # Ensure string of varying parameters is well-formed
prefer_cached_downloads = args.prefer_cached_downloads
- cached = arvados.http_to_keep.check_cached_url(src, project_uuid, url, {},
- varying_url_params=varying_url_params,
- prefer_cached_downloads=prefer_cached_downloads)
+ cached = to_keep_util.CheckCacheResult(None, None, None, None, None)
+
+ if url.startswith("http:") or url.startswith("https:"):
+ cached = http_to_keep.check_cached_url(src, project_uuid, url, {},
+ varying_url_params=args.varying_url_params,
+ prefer_cached_downloads=prefer_cached_downloads)
+ elif url.startswith("s3:"):
+ import boto3.session
+ botosession = boto3.session.Session()
+ cached = s3_to_keep.check_cached_url(src, botosession, project_uuid, url, {},
+ prefer_cached_downloads=prefer_cached_downloads)
+
if cached[2] is not None:
return copy_collection(cached[2], src, dst, args)
- cached = arvados.http_to_keep.http_to_keep(dst, project_uuid, url,
- varying_url_params=varying_url_params,
- prefer_cached_downloads=prefer_cached_downloads)
+ if url.startswith("http:") or url.startswith("https:"):
+ cached = http_to_keep.http_to_keep(dst, project_uuid, url,
+ varying_url_params=args.varying_url_params,
+ prefer_cached_downloads=prefer_cached_downloads)
+ elif url.startswith("s3:"):
+ cached = s3_to_keep.s3_to_keep(dst, botosession, project_uuid, url,
+ prefer_cached_downloads=prefer_cached_downloads)
if cached is not None:
return {"uuid": cached[2]}
diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py
index 6823ee1bea..1abc9caad3 100644
--- a/sdk/python/arvados/commands/keepdocker.py
+++ b/sdk/python/arvados/commands/keepdocker.py
@@ -18,6 +18,7 @@ import tempfile
import ciso8601
from operator import itemgetter
+from pathlib import Path
from stat import *
import arvados
@@ -25,7 +26,12 @@ import arvados.config
import arvados.util
import arvados.commands._util as arv_cmd
import arvados.commands.put as arv_put
+
+from arvados._internal import basedirs
from arvados._version import __version__
+from typing import (
+ Callable,
+)
logger = logging.getLogger('arvados.keepdocker')
logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
@@ -181,9 +187,12 @@ def save_image(image_hash, image_file):
except STAT_CACHE_ERRORS:
pass # We won't resume from this cache. No big deal.
-def get_cache_dir():
- return arv_cmd.make_home_conf_dir(
- os.path.join('.cache', 'arvados', 'docker'), 0o700)
+def get_cache_dir(
+ mkparent: Callable[[], Path]=basedirs.BaseDirectories('CACHE').storage_path,
+) -> str:
+ path = mkparent() / 'docker'
+ path.mkdir(mode=0o700, exist_ok=True)
+ return str(path)
def prep_image_file(filename):
# Return a file object ready to save a Docker image,
diff --git a/sdk/python/arvados/commands/ls.py b/sdk/python/arvados/commands/ls.py
index ac038f5040..d67f5cc453 100644
--- a/sdk/python/arvados/commands/ls.py
+++ b/sdk/python/arvados/commands/ls.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import division
-
import argparse
import collections
import logging
diff --git a/sdk/python/arvados/commands/migrate19.py b/sdk/python/arvados/commands/migrate19.py
deleted file mode 100644
index 2fef419ee8..0000000000
--- a/sdk/python/arvados/commands/migrate19.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import print_function
-from __future__ import division
-import argparse
-import time
-import sys
-import logging
-import shutil
-import tempfile
-import os
-import subprocess
-import re
-
-import arvados
-import arvados.commands.keepdocker
-from arvados._version import __version__
-from arvados.collection import CollectionReader
-from .. import util
-
-logger = logging.getLogger('arvados.migrate-docker19')
-logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
- else logging.INFO)
-
-_migration_link_class = 'docker_image_migration'
-_migration_link_name = 'migrate_1.9_1.10'
-
-class MigrationFailed(Exception):
- pass
-
-@util._deprecated('3.0')
-def main(arguments=None):
- """Docker image format migration tool for Arvados.
-
- This converts Docker images stored in Arvados from image format v1
- (Docker <= 1.9) to image format v2 (Docker >= 1.10).
-
- Requires Docker running on the local host.
-
- Usage:
-
- 1) Run arvados/docker/migrate-docker19/build.sh to create
- arvados/migrate-docker19 Docker image.
-
- 2) Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
-
- 3) Run arv-migrate-docker19 from the Arvados Python SDK on the host (not in a container).
-
- This will query Arvados for v1 format Docker images. For each image that
- does not already have a corresponding v2 format image (as indicated by a
- docker_image_migration tag) it will perform the following process:
-
- i) download the image from Arvados
- ii) load it into Docker
- iii) update the Docker version, which updates the image
- iv) save the v2 format image and upload to Arvados
- v) create a migration link
-
- """
-
- migrate19_parser = argparse.ArgumentParser()
- migrate19_parser.add_argument(
- '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
- help='Print version and exit.')
- migrate19_parser.add_argument(
- '--verbose', action="store_true", help="Print stdout/stderr even on success")
- migrate19_parser.add_argument(
- '--force', action="store_true", help="Try to migrate even if there isn't enough space")
-
- migrate19_parser.add_argument(
- '--storage-driver', type=str, default="overlay",
- help="Docker storage driver, e.g. aufs, overlay, vfs")
-
- exgroup = migrate19_parser.add_mutually_exclusive_group()
- exgroup.add_argument(
- '--dry-run', action='store_true', help="Print number of pending migrations.")
- exgroup.add_argument(
- '--print-unmigrated', action='store_true',
- default=False, help="Print list of images needing migration.")
-
- migrate19_parser.add_argument('--tempdir', help="Set temporary directory")
-
- migrate19_parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
- default=None, help="List of images to be migrated")
-
- args = migrate19_parser.parse_args(arguments)
-
- if args.tempdir:
- tempfile.tempdir = args.tempdir
-
- if args.verbose:
- logger.setLevel(logging.DEBUG)
-
- only_migrate = None
- if args.infile:
- only_migrate = set()
- for l in args.infile:
- only_migrate.add(l.strip())
-
- api_client = arvados.api()
-
- user = api_client.users().current().execute()
- if not user['is_admin']:
- raise Exception("This command requires an admin token")
- sys_uuid = user['uuid'][:12] + '000000000000000'
-
- images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3)
-
- is_new = lambda img: img['dockerhash'].startswith('sha256:')
-
- count_new = 0
- old_images = []
- for uuid, img in images:
- if img["dockerhash"].startswith("sha256:"):
- continue
- key = (img["repo"], img["tag"], img["timestamp"])
- old_images.append(img)
-
- migration_links = arvados.util.list_all(api_client.links().list, filters=[
- ['link_class', '=', _migration_link_class],
- ['name', '=', _migration_link_name],
- ])
-
- already_migrated = set()
- for m in migration_links:
- already_migrated.add(m["tail_uuid"])
-
- items = arvados.util.list_all(api_client.collections().list,
- filters=[["uuid", "in", [img["collection"] for img in old_images]]],
- select=["uuid", "portable_data_hash", "manifest_text", "owner_uuid"])
- uuid_to_collection = {i["uuid"]: i for i in items}
-
- need_migrate = {}
- totalbytes = 0
- biggest = 0
- biggest_pdh = None
- for img in old_images:
- i = uuid_to_collection[img["collection"]]
- pdh = i["portable_data_hash"]
- if pdh not in already_migrated and pdh not in need_migrate and (only_migrate is None or pdh in only_migrate):
- need_migrate[pdh] = img
- with CollectionReader(i["manifest_text"]) as c:
- size = list(c.values())[0].size()
- if size > biggest:
- biggest = size
- biggest_pdh = pdh
- totalbytes += size
-
-
- if args.storage_driver == "vfs":
- will_need = (biggest*20)
- else:
- will_need = (biggest*2.5)
-
- if args.print_unmigrated:
- only_migrate = set()
- for pdh in need_migrate:
- print(pdh)
- return
-
- logger.info("Already migrated %i images", len(already_migrated))
- logger.info("Need to migrate %i images", len(need_migrate))
- logger.info("Using tempdir %s", tempfile.gettempdir())
- logger.info("Biggest image %s is about %i MiB", biggest_pdh, biggest>>20)
- logger.info("Total data to migrate about %i MiB", totalbytes>>20)
-
- df_out = subprocess.check_output(["df", "-B1", tempfile.gettempdir()])
- ln = df_out.splitlines()[1]
- filesystem, blocks, used, available, use_pct, mounted = re.match(r"^([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+)", ln).groups(1)
- if int(available) <= will_need:
- logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, int(will_need)>>20)
- if not args.force:
- exit(1)
- else:
- logger.warn("--force provided, will migrate anyway")
-
- if args.dry_run:
- return
-
- success = []
- failures = []
- count = 1
- for old_image in list(need_migrate.values()):
- if uuid_to_collection[old_image["collection"]]["portable_data_hash"] in already_migrated:
- continue
-
- oldcol = CollectionReader(uuid_to_collection[old_image["collection"]]["manifest_text"])
- tarfile = list(oldcol.keys())[0]
-
- logger.info("[%i/%i] Migrating %s:%s (%s) (%i MiB)", count, len(need_migrate), old_image["repo"],
- old_image["tag"], old_image["collection"], list(oldcol.values())[0].size()>>20)
- count += 1
- start = time.time()
-
- varlibdocker = tempfile.mkdtemp()
- dockercache = tempfile.mkdtemp()
- try:
- with tempfile.NamedTemporaryFile() as envfile:
- envfile.write("ARVADOS_API_HOST=%s\n" % (arvados.config.get("ARVADOS_API_HOST")))
- envfile.write("ARVADOS_API_TOKEN=%s\n" % (arvados.config.get("ARVADOS_API_TOKEN")))
- if arvados.config.get("ARVADOS_API_HOST_INSECURE"):
- envfile.write("ARVADOS_API_HOST_INSECURE=%s\n" % (arvados.config.get("ARVADOS_API_HOST_INSECURE")))
- envfile.flush()
-
- dockercmd = ["docker", "run",
- "--privileged",
- "--rm",
- "--env-file", envfile.name,
- "--volume", "%s:/var/lib/docker" % varlibdocker,
- "--volume", "%s:/root/.cache/arvados/docker" % dockercache,
- "arvados/migrate-docker19:1.0",
- "/root/migrate.sh",
- "%s/%s" % (old_image["collection"], tarfile),
- tarfile[0:40],
- old_image["repo"],
- old_image["tag"],
- uuid_to_collection[old_image["collection"]]["owner_uuid"],
- args.storage_driver]
-
- proc = subprocess.Popen(dockercmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = proc.communicate()
-
- initial_space = re.search(r"Initial available space is (\d+)", out)
- imgload_space = re.search(r"Available space after image load is (\d+)", out)
- imgupgrade_space = re.search(r"Available space after image upgrade is (\d+)", out)
- keepdocker_space = re.search(r"Available space after arv-keepdocker is (\d+)", out)
- cleanup_space = re.search(r"Available space after cleanup is (\d+)", out)
-
- if initial_space:
- isp = int(initial_space.group(1))
- logger.info("Available space initially: %i MiB", (isp)/(2**20))
- if imgload_space:
- sp = int(imgload_space.group(1))
- logger.debug("Used after load: %i MiB", (isp-sp)/(2**20))
- if imgupgrade_space:
- sp = int(imgupgrade_space.group(1))
- logger.debug("Used after upgrade: %i MiB", (isp-sp)/(2**20))
- if keepdocker_space:
- sp = int(keepdocker_space.group(1))
- logger.info("Used after upload: %i MiB", (isp-sp)/(2**20))
-
- if cleanup_space:
- sp = int(cleanup_space.group(1))
- logger.debug("Available after cleanup: %i MiB", (sp)/(2**20))
-
- if proc.returncode != 0:
- logger.error("Failed with return code %i", proc.returncode)
- logger.error("--- Stdout ---\n%s", out)
- logger.error("--- Stderr ---\n%s", err)
- raise MigrationFailed()
-
- if args.verbose:
- logger.info("--- Stdout ---\n%s", out)
- logger.info("--- Stderr ---\n%s", err)
-
- migrated = re.search(r"Migrated uuid is ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15})", out)
- if migrated:
- newcol = CollectionReader(migrated.group(1))
-
- api_client.links().create(body={"link": {
- 'owner_uuid': sys_uuid,
- 'link_class': _migration_link_class,
- 'name': _migration_link_name,
- 'tail_uuid': oldcol.portable_data_hash(),
- 'head_uuid': newcol.portable_data_hash()
- }}).execute(num_retries=3)
-
- logger.info("Migrated '%s' (%s) to '%s' (%s) in %is",
- oldcol.portable_data_hash(), old_image["collection"],
- newcol.portable_data_hash(), migrated.group(1),
- time.time() - start)
- already_migrated.add(oldcol.portable_data_hash())
- success.append(old_image["collection"])
- else:
- logger.error("Error migrating '%s'", old_image["collection"])
- failures.append(old_image["collection"])
- except Exception as e:
- logger.error("Failed to migrate %s in %is", old_image["collection"], time.time() - start,
- exc_info=(not isinstance(e, MigrationFailed)))
- failures.append(old_image["collection"])
- finally:
- shutil.rmtree(varlibdocker)
- shutil.rmtree(dockercache)
-
- logger.info("Successfully migrated %i images", len(success))
- if failures:
- logger.error("Failed to migrate %i images", len(failures))
diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py
index 0e732eafde..65e0fcce1d 100644
--- a/sdk/python/arvados/commands/put.py
+++ b/sdk/python/arvados/commands/put.py
@@ -2,10 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import division
-from future.utils import listitems, listvalues
-from builtins import str
-from builtins import object
import argparse
import arvados
import arvados.collection
@@ -30,12 +26,15 @@ import threading
import time
import traceback
-from apiclient import errors as apiclient_errors
-from arvados._version import __version__
-from arvados.util import keep_locator_pattern
+from pathlib import Path
+import arvados.util
import arvados.commands._util as arv_cmd
+from apiclient import errors as apiclient_errors
+from arvados._internal import basedirs
+from arvados._version import __version__
+
api_client = None
upload_opts = argparse.ArgumentParser(add_help=False)
@@ -141,7 +140,10 @@ physical storage devices (e.g., disks) should have a copy of each data
block. Default is to use the server-provided default (if any) or 2.
""")
-upload_opts.add_argument('--storage-classes', help="""
+upload_opts.add_argument(
+ '--storage-classes',
+ type=arv_cmd.UniqueSplit(),
+ help="""
Specify comma separated list of storage classes to be used when saving data to Keep.
""")
@@ -355,7 +357,7 @@ class ArvPutLogFormatter(logging.Formatter):
class ResumeCache(object):
- CACHE_DIR = '.cache/arvados/arv-put'
+ CACHE_DIR = 'arv-put'
def __init__(self, file_spec):
self.cache_file = open(file_spec, 'a+')
@@ -372,9 +374,14 @@ class ResumeCache(object):
md5.update(b'-1')
elif args.filename:
md5.update(args.filename.encode())
- return os.path.join(
- arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'),
- md5.hexdigest())
+ cache_path = Path(cls.CACHE_DIR)
+ if len(cache_path.parts) == 1:
+ cache_path = basedirs.BaseDirectories('CACHE').storage_path(cache_path)
+ else:
+ # Note this is a noop if cache_path is absolute, which is what we want.
+ cache_path = Path.home() / cache_path
+ cache_path.mkdir(parents=True, exist_ok=True, mode=0o700)
+ return str(cache_path / md5.hexdigest())
def _lock_file(self, fileobj):
try:
@@ -437,7 +444,7 @@ class ResumeCache(object):
class ArvPutUploadJob(object):
- CACHE_DIR = '.cache/arvados/arv-put'
+ CACHE_DIR = 'arv-put'
EMPTY_STATE = {
'manifest' : None, # Last saved manifest checkpoint
'files' : {} # Previous run file list: {path : {size, mtime}}
@@ -696,7 +703,7 @@ class ArvPutUploadJob(object):
Recursively get the total size of the collection
"""
size = 0
- for item in listvalues(collection):
+ for item in collection.values():
if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
size += self._collection_size(item)
else:
@@ -863,11 +870,14 @@ class ArvPutUploadJob(object):
md5.update(b'\0'.join([p.encode() for p in realpaths]))
if self.filename:
md5.update(self.filename.encode())
- cache_filename = md5.hexdigest()
- cache_filepath = os.path.join(
- arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
- cache_filename)
- return cache_filepath
+ cache_path = Path(self.CACHE_DIR)
+ if len(cache_path.parts) == 1:
+ cache_path = basedirs.BaseDirectories('CACHE').storage_path(cache_path)
+ else:
+ # Note this is a noop if cache_path is absolute, which is what we want.
+ cache_path = Path.home() / cache_path
+ cache_path.mkdir(parents=True, exist_ok=True, mode=0o700)
+ return str(cache_path / md5.hexdigest())
def _setup_state(self, update_collection):
"""
@@ -946,7 +956,7 @@ class ArvPutUploadJob(object):
oldest_exp = None
oldest_loc = None
block_found = False
- for m in keep_locator_pattern.finditer(self._state['manifest']):
+ for m in arvados.util.keep_locator_pattern.finditer(self._state['manifest']):
loc = m.group(0)
try:
exp = datetime.datetime.utcfromtimestamp(int(loc.split('@')[1], 16))
@@ -978,7 +988,7 @@ class ArvPutUploadJob(object):
def collection_file_paths(self, col, path_prefix='.'):
"""Return a list of file paths by recursively go through the entire collection `col`"""
file_paths = []
- for name, item in listitems(col):
+ for name, item in col.items():
if isinstance(item, arvados.arvfile.ArvadosFile):
file_paths.append(os.path.join(path_prefix, name))
elif isinstance(item, arvados.collection.Subcollection):
@@ -1058,7 +1068,7 @@ class ArvPutUploadJob(object):
locators.append(loc)
return locators
elif isinstance(item, arvados.collection.Collection):
- l = [self._datablocks_on_item(x) for x in listvalues(item)]
+ l = [self._datablocks_on_item(x) for x in item.values()]
# Fast list flattener method taken from:
# http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
return [loc for sublist in l for loc in sublist]
@@ -1213,11 +1223,6 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
else:
reporter = None
- # Split storage-classes argument
- storage_classes = None
- if args.storage_classes:
- storage_classes = args.storage_classes.strip().replace(' ', '').split(',')
-
# Setup exclude regex from all the --exclude arguments provided
name_patterns = []
exclude_paths = []
@@ -1276,7 +1281,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
owner_uuid = project_uuid,
ensure_unique_name = True,
update_collection = args.update_collection,
- storage_classes=storage_classes,
+ storage_classes=args.storage_classes,
logger=logger,
dry_run=args.dry_run,
follow_links=args.follow_links,
diff --git a/sdk/python/arvados/commands/run.py b/sdk/python/arvados/commands/run.py
index 0fe05da22b..474111d882 100644
--- a/sdk/python/arvados/commands/run.py
+++ b/sdk/python/arvados/commands/run.py
@@ -15,11 +15,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import print_function
-from __future__ import absolute_import
-from builtins import range
-from past.builtins import basestring
-from builtins import object
import arvados
import arvados.commands.ws as ws
import argparse
diff --git a/sdk/python/arvados/commands/ws.py b/sdk/python/arvados/commands/ws.py
index 04a90cf20b..3508682399 100644
--- a/sdk/python/arvados/commands/ws.py
+++ b/sdk/python/arvados/commands/ws.py
@@ -2,16 +2,16 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-import sys
-import logging
import argparse
-import arvados
import json
+import logging
+import signal
+import sys
+
+import arvados
from arvados.events import subscribe
from arvados._version import __version__
from . import _util as arv_cmd
-import signal
def main(arguments=None):
logger = logging.getLogger('arvados.arv-ws')
diff --git a/sdk/python/arvados/config.py b/sdk/python/arvados/config.py
index 6f3bd02790..950901d506 100644
--- a/sdk/python/arvados/config.py
+++ b/sdk/python/arvados/config.py
@@ -10,19 +10,40 @@
import os
import re
+from typing import (
+ Callable,
+ Iterable,
+ Union,
+)
+
+from . import util
+from ._internal import basedirs
+
_settings = None
-if os.environ.get('HOME') is not None:
- default_config_file = os.environ['HOME'] + '/.config/arvados/settings.conf'
-else:
- default_config_file = ''
+default_config_file = ''
+"""
+.. WARNING:: Deprecated
+ Default configuration initialization now searches for the "default"
+ configuration in several places. This value no longer has any effect.
+"""
KEEP_BLOCK_SIZE = 2**26
EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
-def initialize(config_file=default_config_file):
+def initialize(
+ config_file: Union[
+ str,
+ os.PathLike,
+ Callable[[str], Iterable[os.PathLike]],
+ ]=basedirs.BaseDirectories('CONFIG').search,
+) -> None:
global _settings
_settings = {}
+ if callable(config_file):
+ search_paths = iter(config_file('settings.conf'))
+ config_file = next(search_paths, '')
+
# load the specified config file if available
try:
_settings = load(config_file)
diff --git a/sdk/python/arvados/crunch.py b/sdk/python/arvados/crunch.py
deleted file mode 100644
index 6dd144c43b..0000000000
--- a/sdk/python/arvados/crunch.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from builtins import object
-import json
-import os
-from . import util
-
-class TaskOutputDir(object):
- """Keep-backed directory for staging outputs of Crunch tasks.
-
- Example, in a crunch task whose output is a file called "out.txt"
- containing "42":
-
- import arvados
- import arvados.crunch
- import os
-
- out = arvados.crunch.TaskOutputDir()
- with open(os.path.join(out.path, 'out.txt'), 'w') as f:
- f.write('42')
- arvados.current_task().set_output(out.manifest_text())
- """
- @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
- def __init__(self):
- self.path = os.environ['TASK_KEEPMOUNT_TMP']
-
- def __str__(self):
- return self.path
-
- def manifest_text(self):
- snapshot = os.path.join(self.path, '.arvados#collection')
- return json.load(open(snapshot))['manifest_text']
diff --git a/sdk/python/arvados/events.py b/sdk/python/arvados/events.py
index 88a916e659..624a1b62f8 100644
--- a/sdk/python/arvados/events.py
+++ b/sdk/python/arvados/events.py
@@ -264,7 +264,7 @@ class EventClient(threading.Thread):
This method runs in a separate thread to receive and process events
from the server.
"""
- self.setName(f'ArvadosWebsockets-{self.ident}')
+ self.name = f'ArvadosWebsockets-{self.ident}'
while self._client_ok and not self.is_closed.is_set():
try:
with self._subscribe_lock:
@@ -300,7 +300,7 @@ class PollClient(threading.Thread):
* api: arvados.api_resources.ArvadosAPIClient --- The Arvados API
client used to query logs. It will be used in a separate thread,
- so if it is not an instance of `arvados.safeapi.ThreadSafeApiCache`
+ so if it is not an instance of `arvados.api.ThreadSafeAPIClient`
it should not be reused after the thread is started.
* filters: arvados.events.Filter | None --- One event filter to
@@ -525,7 +525,7 @@ def subscribe(
* api: arvados.api_resources.ArvadosAPIClient --- The Arvados API
client used to query logs. It may be used in a separate thread,
- so if it is not an instance of `arvados.safeapi.ThreadSafeApiCache`
+ so if it is not an instance of `arvados.api.ThreadSafeAPIClient`
it should not be reused after this method returns.
* filters: arvados.events.Filter | None --- One event filter to
diff --git a/sdk/python/arvados/http_to_keep.py b/sdk/python/arvados/http_to_keep.py
deleted file mode 100644
index f247afeaff..0000000000
--- a/sdk/python/arvados/http_to_keep.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import calendar
-import dataclasses
-import datetime
-import email.utils
-import logging
-import re
-import time
-import typing
-import urllib.parse
-
-import pycurl
-
-import arvados
-import arvados.collection
-from arvados._pycurlhelper import PyCurlHelper
-
-logger = logging.getLogger('arvados.http_import')
-
-def _my_formatdate(dt):
- return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
- localtime=False, usegmt=True)
-
-def _my_parsedate(text):
- parsed = email.utils.parsedate_tz(text)
- if parsed:
- if parsed[9]:
- # Adjust to UTC
- return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
- else:
- # TZ is zero or missing, assume UTC.
- return datetime.datetime(*parsed[:6])
- else:
- return datetime.datetime(1970, 1, 1)
-
-def _fresh_cache(url, properties, now):
- pr = properties[url]
- expires = None
-
- logger.debug("Checking cache freshness for %s using %s", url, pr)
-
- if "Cache-Control" in pr:
- if re.match(r"immutable", pr["Cache-Control"]):
- return True
-
- g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
- if g:
- expires = _my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
-
- if expires is None and "Expires" in pr:
- expires = _my_parsedate(pr["Expires"])
-
- if expires is None:
- # Use a default cache time of 24 hours if upstream didn't set
- # any cache headers, to reduce redundant downloads.
- expires = _my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
-
- if not expires:
- return False
-
- return (now < expires)
-
-def _remember_headers(url, properties, headers, now):
- properties.setdefault(url, {})
- for h in ("Cache-Control", "Etag", "Expires", "Date", "Content-Length"):
- if h in headers:
- properties[url][h] = headers[h]
- if "Date" not in headers:
- properties[url]["Date"] = _my_formatdate(now)
-
-@dataclasses.dataclass
-class _Response:
- status_code: int
- headers: typing.Mapping[str, str]
-
-
-class _Downloader(PyCurlHelper):
- # Wait up to 60 seconds for connection
- # How long it can be in "low bandwidth" state before it gives up
- # Low bandwidth threshold is 32 KiB/s
- DOWNLOADER_TIMEOUT = (60, 300, 32768)
-
- def __init__(self, apiclient):
- super(_Downloader, self).__init__(title_case_headers=True)
- self.curl = pycurl.Curl()
- self.curl.setopt(pycurl.NOSIGNAL, 1)
- self.curl.setopt(pycurl.OPENSOCKETFUNCTION,
- lambda *args, **kwargs: self._socket_open(*args, **kwargs))
- self.target = None
- self.apiclient = apiclient
-
- def head(self, url):
- get_headers = {'Accept': 'application/octet-stream'}
- self._headers = {}
-
- self.curl.setopt(pycurl.URL, url.encode('utf-8'))
- self.curl.setopt(pycurl.HTTPHEADER, [
- '{}: {}'.format(k,v) for k,v in get_headers.items()])
-
- self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
- self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
- self.curl.setopt(pycurl.NOBODY, True)
- self.curl.setopt(pycurl.FOLLOWLOCATION, True)
-
- self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, True)
-
- try:
- self.curl.perform()
- except Exception as e:
- raise arvados.errors.HttpError(0, str(e))
- finally:
- if self._socket:
- self._socket.close()
- self._socket = None
-
- return _Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
-
- def download(self, url, headers):
- self.count = 0
- self.start = time.time()
- self.checkpoint = self.start
- self._headers = {}
- self._first_chunk = True
- self.collection = None
- self.parsedurl = urllib.parse.urlparse(url)
-
- get_headers = {'Accept': 'application/octet-stream'}
- get_headers.update(headers)
-
- self.curl.setopt(pycurl.URL, url.encode('utf-8'))
- self.curl.setopt(pycurl.HTTPHEADER, [
- '{}: {}'.format(k,v) for k,v in get_headers.items()])
-
- self.curl.setopt(pycurl.WRITEFUNCTION, self.body_write)
- self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
-
- self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
- self.curl.setopt(pycurl.HTTPGET, True)
- self.curl.setopt(pycurl.FOLLOWLOCATION, True)
-
- self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, False)
-
- try:
- self.curl.perform()
- except Exception as e:
- raise arvados.errors.HttpError(0, str(e))
- finally:
- if self._socket:
- self._socket.close()
- self._socket = None
-
- return _Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
-
- def headers_received(self):
- self.collection = arvados.collection.Collection(api_client=self.apiclient)
-
- if "Content-Length" in self._headers:
- self.contentlength = int(self._headers["Content-Length"])
- logger.info("File size is %s bytes", self.contentlength)
- else:
- self.contentlength = None
-
- if self._headers.get("Content-Disposition"):
- grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))',
- self._headers["Content-Disposition"])
- if grp.group(2):
- self.name = grp.group(2)
- else:
- self.name = grp.group(4)
- else:
- self.name = self.parsedurl.path.split("/")[-1]
-
- # Can't call curl.getinfo(pycurl.RESPONSE_CODE) until
- # perform() is done but we need to know the status before that
- # so we have to parse the status line ourselves.
- mt = re.match(r'^HTTP\/(\d(\.\d)?) ([1-5]\d\d) ([^\r\n\x00-\x08\x0b\x0c\x0e-\x1f\x7f]*)\r\n$', self._headers["x-status-line"])
- code = int(mt.group(3))
-
- if not self.name:
- logger.error("Cannot determine filename from URL or headers")
- return
-
- if code == 200:
- self.target = self.collection.open(self.name, "wb")
-
- def body_write(self, chunk):
- if self._first_chunk:
- self.headers_received()
- self._first_chunk = False
-
- self.count += len(chunk)
-
- if self.target is None:
- # "If this number is not equal to the size of the byte
- # string, this signifies an error and libcurl will abort
- # the request."
- return 0
-
- self.target.write(chunk)
- loopnow = time.time()
- if (loopnow - self.checkpoint) < 20:
- return
-
- bps = self.count / (loopnow - self.start)
- if self.contentlength is not None:
- logger.info("%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left",
- ((self.count * 100) / self.contentlength),
- (bps / (1024.0*1024.0)),
- ((self.contentlength-self.count) // bps))
- else:
- logger.info("%d downloaded, %6.2f MiB/s", count, (bps / (1024.0*1024.0)))
- self.checkpoint = loopnow
-
-
-def _changed(url, clean_url, properties, now, curldownloader):
- req = curldownloader.head(url)
-
- if req.status_code != 200:
- # Sometimes endpoints are misconfigured and will deny HEAD but
- # allow GET so instead of failing here, we'll try GET If-None-Match
- return True
-
- # previous version of this code used "ETag", now we are
- # normalizing to "Etag", check for both.
- etag = properties[url].get("Etag") or properties[url].get("ETag")
-
- if url in properties:
- del properties[url]
- _remember_headers(clean_url, properties, req.headers, now)
-
- if "Etag" in req.headers and etag == req.headers["Etag"]:
- # Didn't change
- return False
-
- return True
-
-def _etag_quote(etag):
- # if it already has leading and trailing quotes, do nothing
- if etag[0] == '"' and etag[-1] == '"':
- return etag
- else:
- # Add quotes.
- return '"' + etag + '"'
-
-
-def check_cached_url(api, project_uuid, url, etags,
- utcnow=datetime.datetime.utcnow,
- varying_url_params="",
- prefer_cached_downloads=False):
-
- logger.info("Checking Keep for %s", url)
-
- varying_params = [s.strip() for s in varying_url_params.split(",")]
-
- parsed = urllib.parse.urlparse(url)
- query = [q for q in urllib.parse.parse_qsl(parsed.query)
- if q[0] not in varying_params]
-
- clean_url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params,
- urllib.parse.urlencode(query, safe="/"), parsed.fragment))
-
- r1 = api.collections().list(filters=[["properties", "exists", url]]).execute()
-
- if clean_url == url:
- items = r1["items"]
- else:
- r2 = api.collections().list(filters=[["properties", "exists", clean_url]]).execute()
- items = r1["items"] + r2["items"]
-
- now = utcnow()
-
- curldownloader = _Downloader(api)
-
- for item in items:
- properties = item["properties"]
-
- if clean_url in properties:
- cache_url = clean_url
- elif url in properties:
- cache_url = url
- else:
- raise Exception("Shouldn't happen, got an API result for %s that doesn't have the URL in properties" % item["uuid"])
-
- if prefer_cached_downloads or _fresh_cache(cache_url, properties, now):
- # HTTP caching rules say we should use the cache
- cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
- return (item["portable_data_hash"], next(iter(cr.keys())), item["uuid"], clean_url, now)
-
- if not _changed(cache_url, clean_url, properties, now, curldownloader):
- # Etag didn't change, same content, just update headers
- api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
- cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
- return (item["portable_data_hash"], next(iter(cr.keys())), item["uuid"], clean_url, now)
-
- for etagstr in ("Etag", "ETag"):
- if etagstr in properties[cache_url] and len(properties[cache_url][etagstr]) > 2:
- etags[properties[cache_url][etagstr]] = item
-
- logger.debug("Found ETag values %s", etags)
-
- return (None, None, None, clean_url, now)
-
-
-def http_to_keep(api, project_uuid, url,
- utcnow=datetime.datetime.utcnow, varying_url_params="",
- prefer_cached_downloads=False):
- """Download a file over HTTP and upload it to keep, with HTTP headers as metadata.
-
- Before downloading the URL, checks to see if the URL already
- exists in Keep and applies HTTP caching policy, the
- varying_url_params and prefer_cached_downloads flags in order to
- decide whether to use the version in Keep or re-download it.
- """
-
- etags = {}
- cache_result = check_cached_url(api, project_uuid, url, etags,
- utcnow, varying_url_params,
- prefer_cached_downloads)
-
- if cache_result[0] is not None:
- return cache_result
-
- clean_url = cache_result[3]
- now = cache_result[4]
-
- properties = {}
- headers = {}
- if etags:
- headers['If-None-Match'] = ', '.join([_etag_quote(k) for k,v in etags.items()])
- logger.debug("Sending GET request with headers %s", headers)
-
- logger.info("Beginning download of %s", url)
-
- curldownloader = _Downloader(api)
-
- req = curldownloader.download(url, headers)
-
- c = curldownloader.collection
-
- if req.status_code not in (200, 304):
- raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
-
- if curldownloader.target is not None:
- curldownloader.target.close()
-
- _remember_headers(clean_url, properties, req.headers, now)
-
- if req.status_code == 304 and "Etag" in req.headers and req.headers["Etag"] in etags:
- item = etags[req.headers["Etag"]]
- item["properties"].update(properties)
- api.collections().update(uuid=item["uuid"], body={"collection":{"properties": item["properties"]}}).execute()
- cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
- return (item["portable_data_hash"], list(cr.keys())[0], item["uuid"], clean_url, now)
-
- logger.info("Download complete")
-
- collectionname = "Downloaded from %s" % urllib.parse.quote(clean_url, safe='')
-
- # max length - space to add a timestamp used by ensure_unique_name
- max_name_len = 254 - 28
-
- if len(collectionname) > max_name_len:
- over = len(collectionname) - max_name_len
- split = int(max_name_len/2)
- collectionname = collectionname[0:split] + "â¦" + collectionname[split+over:]
-
- c.save_new(name=collectionname, owner_uuid=project_uuid, ensure_unique_name=True)
-
- api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
-
- return (c.portable_data_hash(), curldownloader.name, c.manifest_locator(), clean_url, now)
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
index d1be6b931e..4e98135923 100644
--- a/sdk/python/arvados/keep.py
+++ b/sdk/python/arvados/keep.py
@@ -2,16 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from __future__ import division
import copy
-from future import standard_library
-from future.utils import native_str
-standard_library.install_aliases()
-from builtins import next
-from builtins import str
-from builtins import range
-from builtins import object
import collections
import datetime
import hashlib
@@ -28,28 +19,24 @@ import ssl
import sys
import threading
import resource
-from . import timer
import urllib.parse
import traceback
import weakref
-if sys.version_info >= (3, 0):
- from io import BytesIO
-else:
- from cStringIO import StringIO as BytesIO
+from io import BytesIO
import arvados
import arvados.config as config
import arvados.errors
import arvados.retry as retry
import arvados.util
-import arvados.diskcache
-from arvados._pycurlhelper import PyCurlHelper
+
+from ._internal import basedirs, diskcache, Timer, parse_seq
+from ._internal.pycurl import PyCurlHelper
_logger = logging.getLogger('arvados.keep')
global_client_object = None
-
# Monkey patch TCP constants when not available (apple). Values sourced from:
# http://www.opensource.apple.com/source/xnu/xnu-2422.115.4/bsd/netinet/tcp.h
if sys.platform == 'darwin':
@@ -60,7 +47,6 @@ if sys.platform == 'darwin':
if not hasattr(socket, 'TCP_KEEPCNT'):
socket.TCP_KEEPCNT = 0x102
-
class KeepLocator(object):
EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
@@ -85,7 +71,7 @@ class KeepLocator(object):
def __str__(self):
return '+'.join(
- native_str(s)
+ str(s)
for s in [self.md5sum, self.size,
self.permission_hint()] + self.hints
if s is not None)
@@ -145,40 +131,6 @@ class KeepLocator(object):
return self.perm_expiry <= as_of_dt
-class Keep(object):
- """Simple interface to a global KeepClient object.
-
- THIS CLASS IS DEPRECATED. Please instantiate your own KeepClient with your
- own API client. The global KeepClient will build an API client from the
- current Arvados configuration, which may not match the one you built.
- """
- _last_key = None
-
- @classmethod
- def global_client_object(cls):
- global global_client_object
- # Previously, KeepClient would change its behavior at runtime based
- # on these configuration settings. We simulate that behavior here
- # by checking the values and returning a new KeepClient if any of
- # them have changed.
- key = (config.get('ARVADOS_API_HOST'),
- config.get('ARVADOS_API_TOKEN'),
- config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
- config.get('ARVADOS_KEEP_PROXY'),
- os.environ.get('KEEP_LOCAL_STORE'))
- if (global_client_object is None) or (cls._last_key != key):
- global_client_object = KeepClient()
- cls._last_key = key
- return global_client_object
-
- @staticmethod
- def get(locator, **kwargs):
- return Keep.global_client_object().get(locator, **kwargs)
-
- @staticmethod
- def put(data, **kwargs):
- return Keep.global_client_object().put(data, **kwargs)
-
class KeepBlockCache(object):
def __init__(self, cache_max=0, max_slots=0, disk_cache=False, disk_cache_dir=None):
self.cache_max = cache_max
@@ -190,8 +142,7 @@ class KeepBlockCache(object):
self._cache_updating = threading.Condition(self._cache_lock)
if self._disk_cache and self._disk_cache_dir is None:
- self._disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
- os.makedirs(self._disk_cache_dir, mode=0o700, exist_ok=True)
+ self._disk_cache_dir = str(basedirs.BaseDirectories('CACHE').storage_path('keep'))
if self._max_slots == 0:
if self._disk_cache:
@@ -219,7 +170,7 @@ class KeepBlockCache(object):
if self._disk_cache:
fs = os.statvfs(self._disk_cache_dir)
# Calculation of available space incorporates existing cache usage
- existing_usage = arvados.diskcache.DiskCacheSlot.cache_usage(self._disk_cache_dir)
+ existing_usage = diskcache.DiskCacheSlot.cache_usage(self._disk_cache_dir)
avail = (fs.f_bavail * fs.f_bsize + existing_usage) / 4
maxdisk = int((fs.f_blocks * fs.f_bsize) * 0.10)
# pick smallest of:
@@ -235,12 +186,12 @@ class KeepBlockCache(object):
self.cache_total = 0
if self._disk_cache:
- self._cache = arvados.diskcache.DiskCacheSlot.init_cache(self._disk_cache_dir, self._max_slots)
+ self._cache = diskcache.DiskCacheSlot.init_cache(self._disk_cache_dir, self._max_slots)
for slot in self._cache.values():
self.cache_total += slot.size()
self.cap_cache()
- class CacheSlot(object):
+ class _CacheSlot:
__slots__ = ("locator", "ready", "content")
def __init__(self, locator):
@@ -305,7 +256,7 @@ class KeepBlockCache(object):
return n
if self._disk_cache:
# see if it exists on disk
- n = arvados.diskcache.DiskCacheSlot.get_from_disk(locator, self._disk_cache_dir)
+ n = diskcache.DiskCacheSlot.get_from_disk(locator, self._disk_cache_dir)
if n is not None:
self._cache[n.locator] = n
self.cache_total += n.size()
@@ -335,9 +286,9 @@ class KeepBlockCache(object):
self._resize_cache(self.cache_max, self._max_slots-1)
if self._disk_cache:
- n = arvados.diskcache.DiskCacheSlot(locator, self._disk_cache_dir)
+ n = diskcache.DiskCacheSlot(locator, self._disk_cache_dir)
else:
- n = KeepBlockCache.CacheSlot(locator)
+ n = KeepBlockCache._CacheSlot(locator)
self._cache[n.locator] = n
return n, True
@@ -381,7 +332,12 @@ class KeepBlockCache(object):
self.cap_cache()
-class Counter(object):
+ def clear(self):
+ with self._cache_lock:
+ self._cache.clear()
+ self.cache_total = 0
+
+class _Counter:
def __init__(self, v=0):
self._lk = threading.Lock()
self._val = v
@@ -399,10 +355,10 @@ class KeepClient(object):
DEFAULT_TIMEOUT = PyCurlHelper.DEFAULT_TIMEOUT
DEFAULT_PROXY_TIMEOUT = PyCurlHelper.DEFAULT_PROXY_TIMEOUT
- class KeepService(PyCurlHelper):
+ class _KeepService(PyCurlHelper):
"""Make requests to a single Keep service, and track results.
- A KeepService is intended to last long enough to perform one
+ A _KeepService is intended to last long enough to perform one
transaction (GET or PUT) against one Keep service. This can
involve calling either get() or put() multiple times in order
to retry after transient failures. However, calling both get()
@@ -422,7 +378,7 @@ class KeepClient(object):
download_counter=None,
headers={},
insecure=False):
- super(KeepClient.KeepService, self).__init__()
+ super().__init__()
self.root = root
self._user_agent_pool = user_agent_pool
self._result = {'error': None}
@@ -467,7 +423,7 @@ class KeepClient(object):
curl = self._get_user_agent()
ok = None
try:
- with timer.Timer() as t:
+ with Timer() as t:
self._headers = {}
response_body = BytesIO()
curl.setopt(pycurl.NOSIGNAL, 1)
@@ -562,7 +518,7 @@ class KeepClient(object):
curl = self._get_user_agent()
ok = None
try:
- with timer.Timer() as t:
+ with Timer() as t:
self._headers = {}
body_reader = BytesIO(body)
response_body = BytesIO()
@@ -632,7 +588,7 @@ class KeepClient(object):
return True
- class KeepWriterQueue(queue.Queue):
+ class _KeepWriterQueue(queue.Queue):
def __init__(self, copies, classes=[]):
queue.Queue.__init__(self) # Old-style superclass
self.wanted_copies = copies
@@ -717,7 +673,7 @@ class KeepClient(object):
self.pending_tries_notification.wait()
- class KeepWriterThreadPool(object):
+ class _KeepWriterThreadPool:
def __init__(self, data, data_hash, copies, max_service_replicas, timeout=None, classes=[]):
self.total_task_nr = 0
if (not max_service_replicas) or (max_service_replicas >= copies):
@@ -726,10 +682,10 @@ class KeepClient(object):
num_threads = int(math.ceil(1.0*copies/max_service_replicas))
_logger.debug("Pool max threads is %d", num_threads)
self.workers = []
- self.queue = KeepClient.KeepWriterQueue(copies, classes)
+ self.queue = KeepClient._KeepWriterQueue(copies, classes)
# Create workers
for _ in range(num_threads):
- w = KeepClient.KeepWriterThread(self.queue, data, data_hash, timeout)
+ w = KeepClient._KeepWriterThread(self.queue, data, data_hash, timeout)
self.workers.append(w)
def add_task(self, ks, service_root):
@@ -750,11 +706,18 @@ class KeepClient(object):
return self.queue.response
- class KeepWriterThread(threading.Thread):
- class TaskFailed(RuntimeError): pass
+ class _KeepWriterThread(threading.Thread):
+ class TaskFailed(RuntimeError):
+ """Exception for failed Keep writes
+
+ TODO: Move this class to the module top level and document it
+
+ @private
+ """
+
def __init__(self, queue, data, data_hash, timeout=None):
- super(KeepClient.KeepWriterThread, self).__init__()
+ super().__init__()
self.timeout = timeout
self.queue = queue
self.data = data
@@ -771,7 +734,7 @@ class KeepClient(object):
locator, copies, classes = self.do_task(service, service_root)
except Exception as e:
if not isinstance(e, self.TaskFailed):
- _logger.exception("Exception in KeepWriterThread")
+ _logger.exception("Exception in _KeepWriterThread")
self.queue.write_fail(service)
else:
self.queue.write_success(locator, copies, classes)
@@ -798,7 +761,7 @@ class KeepClient(object):
result.get('body'))
raise self.TaskFailed()
- _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
+ _logger.debug("_KeepWriterThread %s succeeded %s+%i %s",
str(threading.current_thread()),
self.data_hash,
len(self.data),
@@ -808,13 +771,13 @@ class KeepClient(object):
except (KeyError, ValueError):
replicas_stored = 1
- classes_confirmed = {}
+ classes_confirmed = collections.defaultdict(int)
try:
scch = result['headers']['x-keep-storage-classes-confirmed']
- for confirmation in scch.replace(' ', '').split(','):
- if '=' in confirmation:
- stored_class, stored_copies = confirmation.split('=')[:2]
- classes_confirmed[stored_class] = int(stored_copies)
+ for confirmation in parse_seq(scch):
+ stored_class, _, stored_copies = confirmation.partition('=')
+ if stored_copies:
+ classes_confirmed[stored_class] += int(stored_copies)
except (KeyError, ValueError):
# Storage classes confirmed header missing or corrupt
classes_confirmed = None
@@ -906,12 +869,12 @@ class KeepClient(object):
self.timeout = timeout
self.proxy_timeout = proxy_timeout
self._user_agent_pool = queue.LifoQueue()
- self.upload_counter = Counter()
- self.download_counter = Counter()
- self.put_counter = Counter()
- self.get_counter = Counter()
- self.hits_counter = Counter()
- self.misses_counter = Counter()
+ self.upload_counter = _Counter()
+ self.download_counter = _Counter()
+ self.put_counter = _Counter()
+ self.get_counter = _Counter()
+ self.hits_counter = _Counter()
+ self.misses_counter = _Counter()
self._storage_classes_unsupported_warning = False
self._default_classes = []
if num_prefetch_threads is not None:
@@ -978,7 +941,7 @@ class KeepClient(object):
"""
# TODO(twp): the timeout should be a property of a
- # KeepService, not a KeepClient. See #4488.
+ # _KeepService, not a KeepClient. See #4488.
t = self.proxy_timeout if self.using_proxy else self.timeout
if len(t) == 2:
return (t[0] * (1 << attempt_number), t[1])
@@ -1078,14 +1041,14 @@ class KeepClient(object):
def map_new_services(self, roots_map, locator, force_rebuild, need_writable, headers):
# roots_map is a dictionary, mapping Keep service root strings
- # to KeepService objects. Poll for Keep services, and add any
+ # to _KeepService objects. Poll for Keep services, and add any
# new ones to roots_map. Return the current list of local
# root strings.
- headers.setdefault('Authorization', "OAuth2 %s" % (self.api_token,))
+ headers.setdefault('Authorization', "Bearer %s" % (self.api_token,))
local_roots = self.weighted_service_roots(locator, force_rebuild, need_writable)
for root in local_roots:
if root not in roots_map:
- roots_map[root] = self.KeepService(
+ roots_map[root] = self._KeepService(
root, self._user_agent_pool,
upload_counter=self.upload_counter,
download_counter=self.download_counter,
@@ -1218,9 +1181,9 @@ class KeepClient(object):
len(hint) == 29 and
self._gateway_services.get(hint[2:])
)])
- # Map root URLs to their KeepService objects.
+ # Map root URLs to their _KeepService objects.
roots_map = {
- root: self.KeepService(root, self._user_agent_pool,
+ root: self._KeepService(root, self._user_agent_pool,
upload_counter=self.upload_counter,
download_counter=self.download_counter,
headers=headers,
@@ -1248,7 +1211,7 @@ class KeepClient(object):
loop.save_result(error)
continue
- # Query KeepService objects that haven't returned
+ # Query _KeepService objects that haven't returned
# permanent failure, in our specified shuffle order.
services_to_try = [roots_map[root]
for root in sorted_roots
@@ -1344,12 +1307,14 @@ class KeepClient(object):
pending_classes = []
if done_classes is not None:
pending_classes = list(set(classes) - set(done_classes))
- writer_pool = KeepClient.KeepWriterThreadPool(data=data,
- data_hash=data_hash,
- copies=copies - done_copies,
- max_service_replicas=self.max_replicas_per_service,
- timeout=self.current_timeout(num_retries - tries_left),
- classes=pending_classes)
+ writer_pool = KeepClient._KeepWriterThreadPool(
+ data=data,
+ data_hash=data_hash,
+ copies=copies - done_copies,
+ max_service_replicas=self.max_replicas_per_service,
+ timeout=self.current_timeout(num_retries - tries_left),
+ classes=pending_classes,
+ )
for service_root, ks in [(root, roots_map[root])
for root in sorted_roots]:
if ks.finished():
diff --git a/sdk/python/arvados/safeapi.py b/sdk/python/arvados/safeapi.py
index 56b92e8f08..874fb7d13c 100644
--- a/sdk/python/arvados/safeapi.py
+++ b/sdk/python/arvados/safeapi.py
@@ -1,81 +1,13 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
-"""Thread-safe wrapper for an Arvados API client
+"""arvados.safeapi - Shim compatibility module
-This module provides `ThreadSafeApiCache`, a thread-safe, API-compatible
-Arvados API client.
-"""
-
-import sys
-import threading
-
-from typing import (
- Any,
- Mapping,
- Optional,
-)
-
-from . import config
-from . import keep
-from . import util
-
-api = sys.modules['arvados.api']
-
-class ThreadSafeApiCache(object):
- """Thread-safe wrapper for an Arvados API client
-
- This class takes all the arguments necessary to build a lower-level
- Arvados API client `googleapiclient.discovery.Resource`, then
- transparently builds and wraps a unique object per thread. This works
- around the fact that the client's underlying HTTP client object is not
- thread-safe.
+This module used to define `arvados.safeapi.ThreadSafeApiCache`. Now it only
+exists to provide backwards compatible imports. New code should prefer to
+import `arvados.api`.
- Arguments:
-
- * apiconfig: Mapping[str, str] | None --- A mapping with entries for
- `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
- `ARVADOS_API_HOST_INSECURE`. If not provided, uses
- `arvados.config.settings` to get these parameters from user
- configuration. You can pass an empty mapping to build the client
- solely from `api_params`.
-
- * keep_params: Mapping[str, Any] --- Keyword arguments used to construct
- an associated `arvados.keep.KeepClient`.
-
- * api_params: Mapping[str, Any] --- Keyword arguments used to construct
- each thread's API client. These have the same meaning as in the
- `arvados.api.api` function.
-
- * version: str | None --- A string naming the version of the Arvados API
- to use. If not specified, the code will log a warning and fall back to
- `'v1'`.
- """
- def __init__(
- self,
- apiconfig: Optional[Mapping[str, str]]=None,
- keep_params: Optional[Mapping[str, Any]]={},
- api_params: Optional[Mapping[str, Any]]={},
- version: Optional[str]=None,
- ) -> None:
- if apiconfig or apiconfig is None:
- self._api_kwargs = api.api_kwargs_from_config(version, apiconfig, **api_params)
- else:
- self._api_kwargs = api.normalize_api_kwargs(version, **api_params)
- self.api_token = self._api_kwargs['token']
- self.request_id = self._api_kwargs.get('request_id')
- self.local = threading.local()
- self.keep = keep.KeepClient(api_client=self, **keep_params)
-
- def localapi(self) -> 'googleapiclient.discovery.Resource':
- try:
- client = self.local.api
- except AttributeError:
- client = api.api_client(**self._api_kwargs)
- client._http._request_id = lambda: self.request_id or util.new_request_id()
- self.local.api = client
- return client
+@private
+"""
- def __getattr__(self, name: str) -> Any:
- # Proxy nonexistent attributes to the thread-local API client.
- return getattr(self.localapi(), name)
+from .api import ThreadSafeAPIClient as ThreadSafeApiCache
diff --git a/sdk/python/arvados/stream.py b/sdk/python/arvados/stream.py
deleted file mode 100644
index 37cd5d7db8..0000000000
--- a/sdk/python/arvados/stream.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import print_function
-from __future__ import absolute_import
-from future.utils import listvalues
-from builtins import object
-import collections
-import hashlib
-import os
-import re
-import threading
-import functools
-import copy
-
-from ._ranges import locators_and_ranges, Range
-from .arvfile import StreamFileReader
-from arvados.retry import retry_method
-from arvados.keep import *
-from . import config
-from . import errors
-from . import util
-from ._normalize_stream import normalize_stream
-
-class StreamReader(object):
- @util._deprecated('3.0', 'arvados.collection.Collecttion')
- def __init__(self, tokens, keep=None, debug=False, _empty=False,
- num_retries=10):
- self._stream_name = None
- self._data_locators = []
- self._files = collections.OrderedDict()
- self._keep = keep
- self.num_retries = num_retries
-
- streamoffset = 0
-
- # parse stream
- for tok in tokens:
- if debug: print('tok', tok)
- if self._stream_name is None:
- self._stream_name = tok.replace('\\040', ' ')
- continue
-
- s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
- if s:
- blocksize = int(s.group(1))
- self._data_locators.append(Range(tok, streamoffset, blocksize, 0))
- streamoffset += blocksize
- continue
-
- s = re.search(r'^(\d+):(\d+):(\S+)', tok)
- if s:
- pos = int(s.group(1))
- size = int(s.group(2))
- name = s.group(3).replace('\\040', ' ')
- if name not in self._files:
- self._files[name] = StreamFileReader(self, [Range(pos, 0, size, 0)], name)
- else:
- filereader = self._files[name]
- filereader.segments.append(Range(pos, filereader.size(), size))
- continue
-
- raise errors.SyntaxError("Invalid manifest format")
-
- def name(self):
- return self._stream_name
-
- def files(self):
- return self._files
-
- def all_files(self):
- return listvalues(self._files)
-
- def size(self):
- n = self._data_locators[-1]
- return n.range_start + n.range_size
-
- def locators_and_ranges(self, range_start, range_size):
- return locators_and_ranges(self._data_locators, range_start, range_size)
-
- @retry_method
- def _keepget(self, locator, num_retries=None):
- return self._keep.get(locator, num_retries=num_retries)
-
- @retry_method
- def readfrom(self, start, size, num_retries=None):
- """Read up to 'size' bytes from the stream, starting at 'start'"""
- if size == 0:
- return b''
- if self._keep is None:
- self._keep = KeepClient(num_retries=self.num_retries)
- data = []
- for lr in locators_and_ranges(self._data_locators, start, size):
- data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
- return b''.join(data)
-
- def manifest_text(self, strip=False):
- manifest_text = [self.name().replace(' ', '\\040')]
- if strip:
- for d in self._data_locators:
- m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
- manifest_text.append(m.group(0))
- else:
- manifest_text.extend([d.locator for d in self._data_locators])
- manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
- for seg in f.segments])
- for f in listvalues(self._files)])
- return ' '.join(manifest_text) + '\n'
diff --git a/sdk/python/arvados/timer.py b/sdk/python/arvados/timer.py
deleted file mode 100644
index 97bc38add0..0000000000
--- a/sdk/python/arvados/timer.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import print_function
-from builtins import object
-import time
-
-class Timer(object):
- def __init__(self, verbose=False):
- self.verbose = verbose
-
- def __enter__(self):
- self.start = time.time()
- return self
-
- def __exit__(self, *args):
- self.end = time.time()
- self.secs = self.end - self.start
- self.msecs = self.secs * 1000 # millisecs
- if self.verbose:
- print('elapsed time: %f ms' % self.msecs)
diff --git a/sdk/python/arvados/util.py b/sdk/python/arvados/util.py
index 050c67f68d..c234fa5e10 100644
--- a/sdk/python/arvados/util.py
+++ b/sdk/python/arvados/util.py
@@ -9,23 +9,24 @@ of Arvados resource types, or extend the Arvados API client (see `arvados.api`).
import errno
import fcntl
-import functools
import hashlib
import httplib2
+import operator
import os
import random
import re
import subprocess
import sys
-import warnings
import arvados.errors
from typing import (
Any,
Callable,
+ Container,
Dict,
Iterator,
+ List,
TypeVar,
Union,
)
@@ -66,65 +67,6 @@ link_uuid_pattern = re.compile(r'[a-z0-9]{5}-o0j2j-[a-z0-9]{15}')
"""Regular expression to match any Arvados link UUID"""
user_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')
"""Regular expression to match any Arvados user UUID"""
-job_uuid_pattern = re.compile(r'[a-z0-9]{5}-8i9sb-[a-z0-9]{15}')
-"""Regular expression to match any Arvados job UUID
-
-.. WARNING:: Deprecated
- Arvados job resources are deprecated and will be removed in a future
- release. Prefer the containers API instead.
-"""
-
-def _deprecated(version=None, preferred=None):
- """Mark a callable as deprecated in the SDK
-
- This will wrap the callable to emit as a DeprecationWarning
- and add a deprecation notice to its docstring.
-
- If the following arguments are given, they'll be included in the
- notices:
-
- * preferred: str | None --- The name of an alternative that users should
- use instead.
-
- * version: str | None --- The version of Arvados when the callable is
- scheduled to be removed.
- """
- if version is None:
- version = ''
- else:
- version = f' and scheduled to be removed in Arvados {version}'
- if preferred is None:
- preferred = ''
- else:
- preferred = f' Prefer {preferred} instead.'
- def deprecated_decorator(func):
- fullname = f'{func.__module__}.{func.__qualname__}'
- parent, _, name = fullname.rpartition('.')
- if name == '__init__':
- fullname = parent
- warning_msg = f'{fullname} is deprecated{version}.{preferred}'
- @functools.wraps(func)
- def deprecated_wrapper(*args, **kwargs):
- warnings.warn(warning_msg, DeprecationWarning, 2)
- return func(*args, **kwargs)
- # Get func's docstring without any trailing newline or empty lines.
- func_doc = re.sub(r'\n\s*$', '', func.__doc__ or '')
- match = re.search(r'\n([ \t]+)\S', func_doc)
- indent = '' if match is None else match.group(1)
- warning_doc = f'\n\n{indent}.. WARNING:: Deprecated\n{indent} {warning_msg}'
- # Make the deprecation notice the second "paragraph" of the
- # docstring if possible. Otherwise append it.
- docstring, count = re.subn(
- rf'\n[ \t]*\n{indent}',
- f'{warning_doc}\n\n{indent}',
- func_doc,
- count=1,
- )
- if not count:
- docstring = f'{func_doc.lstrip()}{warning_doc}'
- deprecated_wrapper.__doc__ = docstring
- return deprecated_wrapper
- return deprecated_decorator
def is_hex(s: str, *length_args: int) -> bool:
"""Indicate whether a string is a hexadecimal number
@@ -156,11 +98,13 @@ def is_hex(s: str, *length_args: int) -> bool:
good_len = True
return bool(good_len and HEX_RE.match(s))
+
def keyset_list_all(
fn: Callable[..., 'arvados.api_resources.ArvadosAPIRequest'],
order_key: str="created_at",
num_retries: int=0,
ascending: bool=True,
+ key_fields: Container[str]=('uuid',),
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
"""Iterate all Arvados resources from an API list call
@@ -191,29 +135,41 @@ def keyset_list_all(
all fields will be sorted in `'asc'` (ascending) order. Otherwise, all
fields will be sorted in `'desc'` (descending) order.
+ * key_fields: Container[str] --- One or two fields that constitute
+ a unique key for returned items. Normally this should be the
+ default value `('uuid',)`, unless `fn` returns
+ computed_permissions records, in which case it should be
+ `('user_uuid', 'target_uuid')`. If two fields are given, one of
+ them must be equal to `order_key`.
+
Additional keyword arguments will be passed directly to `fn` for each API
call. Note that this function sets `count`, `limit`, and `order` as part of
its work.
+
"""
+ tiebreak_keys = set(key_fields) - {order_key}
+ if len(tiebreak_keys) == 0:
+ tiebreak_key = 'uuid'
+ elif len(tiebreak_keys) == 1:
+ tiebreak_key = tiebreak_keys.pop()
+ else:
+ raise arvados.errors.ArgumentError(
+ "key_fields can have at most one entry that is not order_key")
+
pagesize = 1000
kwargs["limit"] = pagesize
kwargs["count"] = 'none'
asc = "asc" if ascending else "desc"
- kwargs["order"] = ["%s %s" % (order_key, asc), "uuid %s" % asc]
+ kwargs["order"] = [f"{order_key} {asc}", f"{tiebreak_key} {asc}"]
other_filters = kwargs.get("filters", [])
- try:
- select = set(kwargs['select'])
- except KeyError:
- pass
- else:
- select.add(order_key)
- select.add('uuid')
- kwargs['select'] = list(select)
+ if 'select' in kwargs:
+ kwargs['select'] = list({*kwargs['select'], *key_fields, order_key})
nextpage = []
tot = 0
expect_full_page = True
+ key_getter = operator.itemgetter(*key_fields)
seen_prevpage = set()
seen_thispage = set()
lastitem = None
@@ -238,9 +194,10 @@ def keyset_list_all(
# In cases where there's more than one record with the
# same order key, the result could include records we
# already saw in the last page. Skip them.
- if i["uuid"] in seen_prevpage:
+ seen_key = key_getter(i)
+ if seen_key in seen_prevpage:
continue
- seen_thispage.add(i["uuid"])
+ seen_thispage.add(seen_key)
yield i
firstitem = items["items"][0]
@@ -248,8 +205,8 @@ def keyset_list_all(
if firstitem[order_key] == lastitem[order_key]:
# Got a page where every item has the same order key.
- # Switch to using uuid for paging.
- nextpage = [[order_key, "=", lastitem[order_key]], ["uuid", ">" if ascending else "<", lastitem["uuid"]]]
+ # Switch to using tiebreak key for paging.
+ nextpage = [[order_key, "=", lastitem[order_key]], [tiebreak_key, ">" if ascending else "<", lastitem[tiebreak_key]]]
prev_page_all_same_order_key = True
else:
# Start from the last order key seen, but skip the last
@@ -258,9 +215,53 @@ def keyset_list_all(
# still likely we'll end up retrieving duplicate rows.
# That's handled by tracking the "seen" rows for each page
# so they can be skipped if they show up on the next page.
- nextpage = [[order_key, ">=" if ascending else "<=", lastitem[order_key]], ["uuid", "!=", lastitem["uuid"]]]
+ nextpage = [[order_key, ">=" if ascending else "<=", lastitem[order_key]]]
+ if tiebreak_key == "uuid":
+ nextpage += [[tiebreak_key, "!=", lastitem[tiebreak_key]]]
prev_page_all_same_order_key = False
+
+def iter_computed_permissions(
+ fn: Callable[..., 'arvados.api_resources.ArvadosAPIRequest'],
+ order_key: str='user_uuid',
+ num_retries: int=0,
+ ascending: bool=True,
+ key_fields: Container[str]=('user_uuid', 'target_uuid'),
+ **kwargs: Any,
+) -> Iterator[Dict[str, Any]]:
+ """Iterate all `computed_permission` resources
+
+ This method is the same as `keyset_list_all`, except that its
+ default arguments are suitable for the computed_permissions API.
+
+ Arguments:
+
+ * fn: Callable[..., arvados.api_resources.ArvadosAPIRequest] ---
+ see `keyset_list_all`. Typically this is an instance of
+ `arvados.api_resources.ComputedPermissions.list`. Given an
+ Arvados API client named `arv`, typical usage is
+ `iter_computed_permissions(arv.computed_permissions().list)`.
+
+ * order_key: str --- see `keyset_list_all`. Default
+ `'user_uuid'`.
+
+ * num_retries: int --- see `keyset_list_all`.
+
+ * ascending: bool --- see `keyset_list_all`.
+
+ * key_fields: Container[str] --- see `keyset_list_all`. Default
+ `('user_uuid', 'target_uuid')`.
+
+ """
+ return keyset_list_all(
+ fn=fn,
+ order_key=order_key,
+ num_retries=num_retries,
+ ascending=ascending,
+ key_fields=key_fields,
+ **kwargs)
+
+
def ca_certs_path(fallback: T=httplib2.CA_CERTS) -> Union[str, T]:
"""Return the path of the best available source of CA certificates
@@ -299,6 +300,7 @@ def ca_certs_path(fallback: T=httplib2.CA_CERTS) -> Union[str, T]:
return ca_certs_path
return fallback
+
def new_request_id() -> str:
"""Return a random request ID
@@ -317,6 +319,7 @@ def new_request_id() -> str:
n = n // 36
return rid
+
def get_config_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:
"""Return an Arvados cluster's configuration, with caching
@@ -336,6 +339,7 @@ def get_config_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str,
svc._cached_config = svc.configs().get().execute()
return svc._cached_config
+
def get_vocabulary_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:
"""Return an Arvados cluster's vocabulary, with caching
@@ -359,6 +363,7 @@ def get_vocabulary_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[s
svc._cached_vocabulary = svc.vocabularies().get().execute()
return svc._cached_vocabulary
+
def trim_name(collectionname: str) -> str:
"""Limit the length of a name to fit within Arvados API limits
@@ -382,354 +387,22 @@ def trim_name(collectionname: str) -> str:
return collectionname
-@_deprecated('3.0', 'arvados.util.keyset_list_all')
-def list_all(fn, num_retries=0, **kwargs):
- # Default limit to (effectively) api server's MAX_LIMIT
- kwargs.setdefault('limit', sys.maxsize)
- items = []
- offset = 0
- items_available = sys.maxsize
- while len(items) < items_available:
- c = fn(offset=offset, **kwargs).execute(num_retries=num_retries)
- items += c['items']
- items_available = c['items_available']
- offset = c['offset'] + len(c['items'])
- return items
-
-@_deprecated('3.0')
-def clear_tmpdir(path=None):
- """
- Ensure the given directory (or TASK_TMPDIR if none given)
- exists and is empty.
- """
- from arvados import current_task
- if path is None:
- path = current_task().tmpdir
- if os.path.exists(path):
- p = subprocess.Popen(['rm', '-rf', path])
- stdout, stderr = p.communicate(None)
- if p.returncode != 0:
- raise Exception('rm -rf %s: %s' % (path, stderr))
- os.mkdir(path)
-
-@_deprecated('3.0', 'subprocess.run')
-def run_command(execargs, **kwargs):
- kwargs.setdefault('stdin', subprocess.PIPE)
- kwargs.setdefault('stdout', subprocess.PIPE)
- kwargs.setdefault('stderr', sys.stderr)
- kwargs.setdefault('close_fds', True)
- kwargs.setdefault('shell', False)
- p = subprocess.Popen(execargs, **kwargs)
- stdoutdata, stderrdata = p.communicate(None)
- if p.returncode != 0:
- raise arvados.errors.CommandFailedError(
- "run_command %s exit %d:\n%s" %
- (execargs, p.returncode, stderrdata))
- return stdoutdata, stderrdata
-
-@_deprecated('3.0')
-def git_checkout(url, version, path):
- from arvados import current_job
- if not re.search('^/', path):
- path = os.path.join(current_job().tmpdir, path)
- if not os.path.exists(path):
- run_command(["git", "clone", url, path],
- cwd=os.path.dirname(path))
- run_command(["git", "checkout", version],
- cwd=path)
- return path
-
-@_deprecated('3.0')
-def tar_extractor(path, decompress_flag):
- return subprocess.Popen(["tar",
- "-C", path,
- ("-x%sf" % decompress_flag),
- "-"],
- stdout=None,
- stdin=subprocess.PIPE, stderr=sys.stderr,
- shell=False, close_fds=True)
-
-@_deprecated('3.0', 'arvados.collection.Collection.open and the tarfile module')
-def tarball_extract(tarball, path):
- """Retrieve a tarball from Keep and extract it to a local
- directory. Return the absolute path where the tarball was
- extracted. If the top level of the tarball contained just one
- file or directory, return the absolute path of that single
- item.
-
- tarball -- collection locator
- path -- where to extract the tarball: absolute, or relative to job tmp
- """
- from arvados import current_job
- from arvados.collection import CollectionReader
- if not re.search('^/', path):
- path = os.path.join(current_job().tmpdir, path)
- lockfile = open(path + '.lock', 'w')
- fcntl.flock(lockfile, fcntl.LOCK_EX)
- try:
- os.stat(path)
- except OSError:
- os.mkdir(path)
- already_have_it = False
- try:
- if os.readlink(os.path.join(path, '.locator')) == tarball:
- already_have_it = True
- except OSError:
- pass
- if not already_have_it:
-
- # emulate "rm -f" (i.e., if the file does not exist, we win)
- try:
- os.unlink(os.path.join(path, '.locator'))
- except OSError:
- if os.path.exists(os.path.join(path, '.locator')):
- os.unlink(os.path.join(path, '.locator'))
-
- for f in CollectionReader(tarball).all_files():
- f_name = f.name()
- if f_name.endswith(('.tbz', '.tar.bz2')):
- p = tar_extractor(path, 'j')
- elif f_name.endswith(('.tgz', '.tar.gz')):
- p = tar_extractor(path, 'z')
- elif f_name.endswith('.tar'):
- p = tar_extractor(path, '')
- else:
- raise arvados.errors.AssertionError(
- "tarball_extract cannot handle filename %s" % f.name())
- while True:
- buf = f.read(2**20)
- if len(buf) == 0:
- break
- p.stdin.write(buf)
- p.stdin.close()
- p.wait()
- if p.returncode != 0:
- lockfile.close()
- raise arvados.errors.CommandFailedError(
- "tar exited %d" % p.returncode)
- os.symlink(tarball, os.path.join(path, '.locator'))
- tld_extracts = [f for f in os.listdir(path) if f != '.locator']
- lockfile.close()
- if len(tld_extracts) == 1:
- return os.path.join(path, tld_extracts[0])
- return path
-
-@_deprecated('3.0', 'arvados.collection.Collection.open and the zipfile module')
-def zipball_extract(zipball, path):
- """Retrieve a zip archive from Keep and extract it to a local
- directory. Return the absolute path where the archive was
- extracted. If the top level of the archive contained just one
- file or directory, return the absolute path of that single
- item.
-
- zipball -- collection locator
- path -- where to extract the archive: absolute, or relative to job tmp
- """
- from arvados import current_job
- from arvados.collection import CollectionReader
- if not re.search('^/', path):
- path = os.path.join(current_job().tmpdir, path)
- lockfile = open(path + '.lock', 'w')
- fcntl.flock(lockfile, fcntl.LOCK_EX)
- try:
- os.stat(path)
- except OSError:
- os.mkdir(path)
- already_have_it = False
- try:
- if os.readlink(os.path.join(path, '.locator')) == zipball:
- already_have_it = True
- except OSError:
- pass
- if not already_have_it:
-
- # emulate "rm -f" (i.e., if the file does not exist, we win)
- try:
- os.unlink(os.path.join(path, '.locator'))
- except OSError:
- if os.path.exists(os.path.join(path, '.locator')):
- os.unlink(os.path.join(path, '.locator'))
-
- for f in CollectionReader(zipball).all_files():
- if not f.name().endswith('.zip'):
- raise arvados.errors.NotImplementedError(
- "zipball_extract cannot handle filename %s" % f.name())
- zip_filename = os.path.join(path, os.path.basename(f.name()))
- zip_file = open(zip_filename, 'wb')
- while True:
- buf = f.read(2**20)
- if len(buf) == 0:
- break
- zip_file.write(buf)
- zip_file.close()
-
- p = subprocess.Popen(["unzip",
- "-q", "-o",
- "-d", path,
- zip_filename],
- stdout=None,
- stdin=None, stderr=sys.stderr,
- shell=False, close_fds=True)
- p.wait()
- if p.returncode != 0:
- lockfile.close()
- raise arvados.errors.CommandFailedError(
- "unzip exited %d" % p.returncode)
- os.unlink(zip_filename)
- os.symlink(zipball, os.path.join(path, '.locator'))
- tld_extracts = [f for f in os.listdir(path) if f != '.locator']
- lockfile.close()
- if len(tld_extracts) == 1:
- return os.path.join(path, tld_extracts[0])
- return path
-
-@_deprecated('3.0', 'arvados.collection.Collection')
-def collection_extract(collection, path, files=[], decompress=True):
- """Retrieve a collection from Keep and extract it to a local
- directory. Return the absolute path where the collection was
- extracted.
-
- collection -- collection locator
- path -- where to extract: absolute, or relative to job tmp
- """
- from arvados import current_job
- from arvados.collection import CollectionReader
- matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
- if matches:
- collection_hash = matches.group(1)
- else:
- collection_hash = hashlib.md5(collection).hexdigest()
- if not re.search('^/', path):
- path = os.path.join(current_job().tmpdir, path)
- lockfile = open(path + '.lock', 'w')
- fcntl.flock(lockfile, fcntl.LOCK_EX)
- try:
- os.stat(path)
- except OSError:
- os.mkdir(path)
- already_have_it = False
- try:
- if os.readlink(os.path.join(path, '.locator')) == collection_hash:
- already_have_it = True
- except OSError:
- pass
-
- # emulate "rm -f" (i.e., if the file does not exist, we win)
- try:
- os.unlink(os.path.join(path, '.locator'))
- except OSError:
- if os.path.exists(os.path.join(path, '.locator')):
- os.unlink(os.path.join(path, '.locator'))
-
- files_got = []
- for s in CollectionReader(collection).all_streams():
- stream_name = s.name()
- for f in s.all_files():
- if (files == [] or
- ((f.name() not in files_got) and
- (f.name() in files or
- (decompress and f.decompressed_name() in files)))):
- outname = f.decompressed_name() if decompress else f.name()
- files_got += [outname]
- if os.path.exists(os.path.join(path, stream_name, outname)):
- continue
- mkdir_dash_p(os.path.dirname(os.path.join(path, stream_name, outname)))
- outfile = open(os.path.join(path, stream_name, outname), 'wb')
- for buf in (f.readall_decompressed() if decompress
- else f.readall()):
- outfile.write(buf)
- outfile.close()
- if len(files_got) < len(files):
- raise arvados.errors.AssertionError(
- "Wanted files %s but only got %s from %s" %
- (files, files_got,
- [z.name() for z in CollectionReader(collection).all_files()]))
- os.symlink(collection_hash, os.path.join(path, '.locator'))
-
- lockfile.close()
- return path
-
-@_deprecated('3.0', 'pathlib.Path().mkdir(parents=True, exist_ok=True)')
-def mkdir_dash_p(path):
- if not os.path.isdir(path):
- try:
- os.makedirs(path)
- except OSError as e:
- if e.errno == errno.EEXIST and os.path.isdir(path):
- # It is not an error if someone else creates the
- # directory between our exists() and makedirs() calls.
- pass
- else:
- raise
-@_deprecated('3.0', 'arvados.collection.Collection')
-def stream_extract(stream, path, files=[], decompress=True):
- """Retrieve a stream from Keep and extract it to a local
- directory. Return the absolute path where the stream was
- extracted.
+def iter_storage_classes(
+ config: Dict[str, Any],
+ check: Callable[[Dict[str, Any]], bool]=operator.methodcaller('get', 'Default'),
+ fallback: str="default",
+) -> Iterator[str]:
+ """Read storage classes from the API client config
- stream -- StreamReader object
- path -- where to extract: absolute, or relative to job tmp
- """
- from arvados import current_job
- if not re.search('^/', path):
- path = os.path.join(current_job().tmpdir, path)
- lockfile = open(path + '.lock', 'w')
- fcntl.flock(lockfile, fcntl.LOCK_EX)
- try:
- os.stat(path)
- except OSError:
- os.mkdir(path)
-
- files_got = []
- for f in stream.all_files():
- if (files == [] or
- ((f.name() not in files_got) and
- (f.name() in files or
- (decompress and f.decompressed_name() in files)))):
- outname = f.decompressed_name() if decompress else f.name()
- files_got += [outname]
- if os.path.exists(os.path.join(path, outname)):
- os.unlink(os.path.join(path, outname))
- mkdir_dash_p(os.path.dirname(os.path.join(path, outname)))
- outfile = open(os.path.join(path, outname), 'wb')
- for buf in (f.readall_decompressed() if decompress
- else f.readall()):
- outfile.write(buf)
- outfile.close()
- if len(files_got) < len(files):
- raise arvados.errors.AssertionError(
- "Wanted files %s but only got %s from %s" %
- (files, files_got, [z.name() for z in stream.all_files()]))
- lockfile.close()
- return path
-
-@_deprecated('3.0', 'os.walk')
-def listdir_recursive(dirname, base=None, max_depth=None):
- """listdir_recursive(dirname, base, max_depth)
-
- Return a list of file and directory names found under dirname.
-
- If base is not None, prepend "{base}/" to each returned name.
-
- If max_depth is None, descend into directories and return only the
- names of files found in the directory tree.
-
- If max_depth is a non-negative integer, stop descending into
- directories at the given depth, and at that point return directory
- names instead.
-
- If max_depth==0 (and base is None) this is equivalent to
- sorted(os.listdir(dirname)).
+ This function iterates storage class names for classes in `config` that
+ pass `check`. If no matches are found but `fallback` is given, it is
+ yielded.
"""
- allfiles = []
- for ent in sorted(os.listdir(dirname)):
- ent_path = os.path.join(dirname, ent)
- ent_base = os.path.join(base, ent) if base else ent
- if os.path.isdir(ent_path) and max_depth != 0:
- allfiles += listdir_recursive(
- ent_path, base=ent_base,
- max_depth=(max_depth-1 if max_depth else None))
- else:
- allfiles += [ent_base]
- return allfiles
+ any_found = False
+ for key, value in config.get("StorageClasses", {}).items():
+ if check(value):
+ any_found = True
+ yield key
+ if fallback and not any_found:
+ yield fallback
diff --git a/sdk/python/arvados_version.py b/sdk/python/arvados_version.py
index 794b6afe42..cafc7391b4 100644
--- a/sdk/python/arvados_version.py
+++ b/sdk/python/arvados_version.py
@@ -26,6 +26,7 @@ PACKAGE_DEPENDENCY_MAP = {
'arvados-user-activity': ['arvados-python-client'],
'arvados_fuse': ['arvados-python-client'],
'crunchstat_summary': ['arvados-python-client'],
+ 'arvados_cluster_activity': ['arvados-python-client'],
}
PACKAGE_MODULE_MAP = {
'arvados-cwl-runner': 'arvados_cwl',
@@ -34,6 +35,7 @@ PACKAGE_MODULE_MAP = {
'arvados-user-activity': 'arvados_user_activity',
'arvados_fuse': 'arvados_fuse',
'crunchstat_summary': 'crunchstat_summary',
+ 'arvados_cluster_activity': 'arvados_cluster_activity',
}
PACKAGE_SRCPATH_MAP = {
'arvados-cwl-runner': Path('sdk', 'cwl'),
@@ -42,6 +44,7 @@ PACKAGE_SRCPATH_MAP = {
'arvados-user-activity': Path('tools', 'user-activity'),
'arvados_fuse': Path('services', 'fuse'),
'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+ 'arvados_cluster_activity': Path('tools', 'cluster-activity'),
}
ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -72,14 +75,6 @@ if REPO_PATH is None:
if (SETUP_DIR / mod_name).is_dir()
)
-def short_tests_only(arglist=sys.argv):
- try:
- arglist.remove('--short-tests-only')
- except ValueError:
- return False
- else:
- return True
-
def git_log_output(path, *args):
return subprocess.check_output(
['git', '-C', str(REPO_PATH),
@@ -120,7 +115,7 @@ def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
return read_version(setup_dir, module)
else:
version = git_version_at_commit()
- version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ version = version.replace("~dev", ".dev").replace("~rc", "rc").lstrip("development-")
save_version(setup_dir, module, version)
return version
diff --git a/sdk/python/discovery2pydoc.py b/sdk/python/discovery2pydoc.py
index 70a51371ac..df837eaac0 100755
--- a/sdk/python/discovery2pydoc.py
+++ b/sdk/python/discovery2pydoc.py
@@ -32,11 +32,33 @@ import urllib.request
from typing import (
Any,
Callable,
+ Iterator,
Mapping,
Optional,
Sequence,
)
+RESOURCE_SCHEMA_MAP = {
+ # Special cases for iter_resource_schemas that can't be generated
+ # automatically. Note these schemas may not actually be defined.
+ 'sys': 'Sys',
+ 'vocabularies': 'Vocabulary',
+}
+
+def iter_resource_schemas(name: str) -> Iterator[str]:
+ try:
+ schema_name = RESOURCE_SCHEMA_MAP[name]
+ except KeyError:
+ # Remove trailing 's'
+ schema_name = name[:-1]
+ schema_name = re.sub(
+ r'(^|_)(\w)',
+ lambda match: match.group(2).capitalize(),
+ schema_name,
+ )
+ yield schema_name
+ yield f'{schema_name}List'
+
LOWERCASE = operator.methodcaller('lower')
NAME_KEY = operator.attrgetter('name')
STDSTREAM_PATH = pathlib.Path('-')
@@ -52,27 +74,29 @@ _DEPRECATED_NOTICE = '''
.. WARNING:: Deprecated
This resource is deprecated in the Arvados API.
'''
-_DEPRECATED_RESOURCES = frozenset([
- 'Humans',
- 'JobTasks',
- 'Jobs',
- 'KeepDisks',
- 'Nodes',
- 'PipelineInstances',
- 'PipelineTemplates',
- 'Specimens'
- 'Traits',
-])
-_DEPRECATED_SCHEMAS = frozenset([
- *(name[:-1] for name in _DEPRECATED_RESOURCES),
- *(f'{name[:-1]}List' for name in _DEPRECATED_RESOURCES),
-])
+# _DEPRECATED_RESOURCES contains string keys of resources in the discovery
+# document that are currently deprecated.
+_DEPRECATED_RESOURCES = frozenset()
+_DEPRECATED_SCHEMAS = frozenset(
+ schema_name
+ for resource_name in _DEPRECATED_RESOURCES
+ for schema_name in iter_resource_schemas(resource_name)
+)
-_LIST_PYDOC = '''
+_LIST_UTIL_METHODS = {
+ 'ComputedPermissionList': 'arvados.util.iter_computed_permissions',
+ 'ComputedPermissions': 'arvados.util.iter_computed_permissions',
+}
+_LIST_METHOD_PYDOC = '''
+This method returns a single page of `{cls_name}` objects that match your search
+criteria. If you just want to iterate all objects that match your search
+criteria, consider using `{list_util_func}`.
+'''
+_LIST_SCHEMA_PYDOC = '''
This is the dictionary object returned when you call `{cls_name}s.list`.
If you just want to iterate all objects that match your search criteria,
-consider using `arvados.util.keyset_list_all`.
+consider using `{list_util_func}`.
If you work with this raw object, the keys of the dictionary are documented
below, along with their types. The `items` key maps to a list of matching
`{cls_name}` objects.
@@ -106,11 +130,7 @@ import googleapiclient.discovery
import googleapiclient.http
import httplib2
import sys
-from typing import Any, Dict, Generic, List, Optional, TypeVar
-if sys.version_info < (3, 8):
- from typing_extensions import TypedDict
-else:
- from typing import TypedDict
+from typing import Any, Dict, Generic, List, Literal, Optional, TypedDict, TypeVar
# ST represents an API response type
ST = TypeVar('ST', bound=TypedDict)
@@ -190,53 +210,74 @@ class Parameter(inspect.Parameter):
self._spec = spec
if keyword.iskeyword(name):
name += '_'
+ annotation = get_type_annotation(self._spec['type'])
+ if self.is_required():
+ default = inspect.Parameter.empty
+ else:
+ default = self.default_value()
+ if default is None:
+ annotation = f'Optional[{annotation}]'
super().__init__(
name,
inspect.Parameter.KEYWORD_ONLY,
- annotation=get_type_annotation(self._spec['type']),
- # In normal Python the presence of a default tells you whether or
- # not an argument is required. In the API the `required` flag tells
- # us that, and defaults are specified inconsistently. Don't show
- # defaults in the signature: it adds noise and makes things more
- # confusing for the reader about what's required and what's
- # optional. The docstring can explain in better detail, including
- # the default value.
- default=inspect.Parameter.empty,
+ annotation=annotation,
+ default=default,
)
+ @classmethod
+ def from_request(cls, spec: Mapping[str, Any]) -> 'Parameter':
+ try:
+ # Unpack the single key and value out of properties
+ (key, val_spec), = spec['properties'].items()
+ except (KeyError, ValueError):
+ # ValueError if there was not exactly one property
+ raise NotImplementedError(
+ "only exactly one request parameter is currently supported",
+ ) from None
+ val_type = get_type_annotation(val_spec['$ref'])
+ return cls('body', {
+ 'description': f"""A dictionary with a single item `{key!r}`.
+Its value is a `{val_type}` dictionary defining the attributes to set.""",
+ 'required': spec['required'],
+ 'type': f'Dict[Literal[{key!r}], {val_type}]',
+ })
+
def default_value(self) -> object:
try:
src_value: str = self._spec['default']
except KeyError:
return None
- if src_value == 'true':
- return True
- elif src_value == 'false':
- return False
- elif src_value.isdigit():
- return int(src_value)
- else:
+ try:
+ return json.loads(src_value)
+ except ValueError:
return src_value
def is_required(self) -> bool:
return self._spec['required']
def doc(self) -> str:
- default_value = self.default_value()
- if default_value is None:
+ if self.default is None or self.default is inspect.Parameter.empty:
default_doc = ''
else:
- default_doc = f"Default {default_value!r}."
- description = self._spec['description']
- doc_parts = [f'{self.api_name}: {self.annotation}']
- if description or default_doc:
- doc_parts.append('---')
- if description:
- doc_parts.append(description)
- if default_doc:
- doc_parts.append(default_doc)
+ default_doc = f"Default `{self.default!r}`."
+ description = self._spec['description'].rstrip()
+ # Does the description contain multiple paragraphs of real text
+ # (excluding, e.g., hyperlink targets)?
+ if re.search(r'\n\s*\n\s*[\w*]', description):
+ # Yes: append the default doc as a separate paragraph.
+ description += f'\n\n{default_doc}'
+ else:
+ # No: append the default doc to the first (and only) paragraph.
+ description = re.sub(
+ r'(\n\s*\n|\s*$)',
+ rf' {default_doc}\1',
+ description,
+ count=1,
+ )
+ # Align all lines with the list bullet we're formatting it in.
+ description = re.sub(r'\n(\S)', r'\n \1', description)
return f'''
-* {' '.join(doc_parts)}
+* {self.api_name}: {self.annotation} --- {description}
'''
@@ -245,15 +286,16 @@ class Method:
self,
name: str,
spec: Mapping[str, Any],
+ cls_name: Optional[str]=None,
annotate: Callable[[Annotation], Annotation]=str,
) -> None:
self.name = name
self._spec = spec
+ self.cls_name = cls_name
self._annotate = annotate
self._required_params = []
self._optional_params = []
- for param_name, param_spec in spec['parameters'].items():
- param = Parameter(param_name, param_spec)
+ for param in self._iter_parameters():
if param.is_required():
param_list = self._required_params
else:
@@ -262,6 +304,16 @@ class Method:
self._required_params.sort(key=NAME_KEY)
self._optional_params.sort(key=NAME_KEY)
+ def _iter_parameters(self) -> Iterator[Parameter]:
+ try:
+ body = self._spec['request']
+ except KeyError:
+ pass
+ else:
+ yield Parameter.from_request(body)
+ for name, spec in self._spec['parameters'].items():
+ yield Parameter(name, spec)
+
def signature(self) -> inspect.Signature:
parameters = [
inspect.Parameter('self', inspect.Parameter.POSITIONAL_OR_KEYWORD),
@@ -279,6 +331,15 @@ class Method:
doc_lines = self._spec['description'].splitlines(keepends=True)[doc_slice]
if not doc_lines[-1].endswith('\n'):
doc_lines.append('\n')
+ try:
+ returns_list = self._spec['response']['$ref'].endswith('List')
+ except KeyError:
+ returns_list = False
+ if returns_list and self.cls_name is not None:
+ doc_lines.append(_LIST_METHOD_PYDOC.format(
+ cls_name=self.cls_name[:-1],
+ list_util_func=_LIST_UTIL_METHODS.get(self.cls_name, 'arvados.util.keyset_list_all'),
+ ))
if self._required_params:
doc_lines.append("\nRequired parameters:\n")
doc_lines.extend(param.doc() for param in self._required_params)
@@ -296,12 +357,12 @@ def document_schema(name: str, spec: Mapping[str, Any]) -> str:
if name in _DEPRECATED_SCHEMAS:
description += _DEPRECATED_NOTICE
if name.endswith('List'):
- desc_fmt = _LIST_PYDOC
- cls_name = name[:-4]
+ description += _LIST_SCHEMA_PYDOC.format(
+ cls_name=name[:-4],
+ list_util_func=_LIST_UTIL_METHODS.get(name, 'arvados.util.keyset_list_all'),
+ )
else:
- desc_fmt = _SCHEMA_PYDOC
- cls_name = name
- description += desc_fmt.format(cls_name=cls_name)
+ description += _SCHEMA_PYDOC.format(cls_name=name)
lines = [
f"class {name}(TypedDict, total=False):",
to_docstring(description, 4),
@@ -324,7 +385,7 @@ def document_schema(name: str, spec: Mapping[str, Any]) -> str:
field_doc: str = field_spec.get('description', '')
if field_spec['type'] == 'datetime':
- field_doc += "\n\nString in ISO 8601 datetime format. Pass it to `ciso8601.parse_datetime` to build a `datetime.datetime`."
+ field_doc += " Pass this to `ciso8601.parse_datetime` to build a `datetime.datetime`."
if field_doc:
lines.append(to_docstring(field_doc, 4))
lines.append('\n')
@@ -336,13 +397,13 @@ def document_resource(name: str, spec: Mapping[str, Any]) -> str:
if class_name in _DEPRECATED_RESOURCES:
docstring += _DEPRECATED_NOTICE
methods = [
- Method(key, meth_spec, 'ArvadosAPIRequest[{}]'.format)
+ Method(key, meth_spec, class_name, 'ArvadosAPIRequest[{}]'.format)
for key, meth_spec in spec['methods'].items()
if key not in _ALIASED_METHODS
]
return f'''class {class_name}:
{to_docstring(docstring, 4)}
-{''.join(method.doc(slice(1)) for method in sorted(methods, key=NAME_KEY))}
+{''.join(method.doc() for method in sorted(methods, key=NAME_KEY))}
'''
def parse_arguments(arglist: Optional[Sequence[str]]) -> argparse.Namespace:
@@ -394,19 +455,25 @@ def main(arglist: Optional[Sequence[str]]=None) -> int:
print(
to_docstring(_MODULE_PYDOC, indent=0),
_MODULE_PRELUDE,
+ _REQUEST_CLASS,
sep='\n', file=args.out_file,
)
- schemas = sorted(discovery_document['schemas'].items())
- for name, schema_spec in schemas:
- print(document_schema(name, schema_spec), file=args.out_file)
-
+ schemas = dict(discovery_document['schemas'])
resources = sorted(discovery_document['resources'].items())
for name, resource_spec in resources:
+ for schema_name in iter_resource_schemas(name):
+ try:
+ schema_spec = schemas.pop(schema_name)
+ except KeyError:
+ pass
+ else:
+ print(document_schema(schema_name, schema_spec), file=args.out_file)
print(document_resource(name, resource_spec), file=args.out_file)
+ for name, schema_spec in sorted(schemas.items()):
+ print(document_schema(name, schema_spec), file=args.out_file)
print(
- _REQUEST_CLASS,
'''class ArvadosAPIClient(googleapiclient.discovery.Resource):''',
sep='\n', file=args.out_file,
)
@@ -422,7 +489,7 @@ def main(arglist: Optional[Sequence[str]]=None) -> int:
'$ref': class_name,
},
}
- print(Method(name, method_spec).doc(), file=args.out_file)
+ print(Method(name, method_spec).doc(), end='', file=args.out_file)
args.out_file.close()
return os.EX_OK
diff --git a/sdk/python/fpm-info.sh b/sdk/python/fpm-info.sh
index 7a89cf03a0..9a068f878d 100644
--- a/sdk/python/fpm-info.sh
+++ b/sdk/python/fpm-info.sh
@@ -4,6 +4,6 @@
case "$TARGET" in
debian* | ubuntu*)
- fpm_depends+=(libcurl3-gnutls)
+ fpm_depends+=(libcurl4)
;;
esac
diff --git a/sdk/python/bin/arv-migrate-docker19 b/sdk/python/pytest.ini
old mode 100755
new mode 100644
similarity index 70%
rename from sdk/python/bin/arv-migrate-docker19
rename to sdk/python/pytest.ini
index 6aee15254a..9b1bbfd50a
--- a/sdk/python/bin/arv-migrate-docker19
+++ b/sdk/python/pytest.ini
@@ -3,5 +3,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from arvados.commands.migrate19 import main
-main()
+[pytest]
+testpaths =
+ tests
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
index e13e51609a..e2e26e4cca 100644
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import os
import sys
import re
@@ -14,7 +13,6 @@ from setuptools.command import build_py
import arvados_version
version = arvados_version.get_version()
-short_tests_only = arvados_version.short_tests_only()
README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
class BuildPython(build_py.build_py):
@@ -99,7 +97,6 @@ setup(name='arvados-python-client',
'bin/arv-get',
'bin/arv-keepdocker',
'bin/arv-ls',
- 'bin/arv-migrate-docker19',
'bin/arv-federation-migrate',
'bin/arv-normalize',
'bin/arv-put',
@@ -111,23 +108,19 @@ setup(name='arvados-python-client',
install_requires=[
*arvados_version.iter_dependencies(version),
'ciso8601 >=2.0.0',
- 'future',
- 'google-api-core <2.11.0', # 2.11.0rc1 is incompatible with google-auth<2
'google-api-python-client >=2.1.0',
- 'google-auth <2',
- 'httplib2 >=0.9.2, <0.20.2',
- 'protobuf <4.0.0dev',
- 'pycurl >=7.19.5.1, <7.45.0',
- 'pyparsing <3',
- 'ruamel.yaml >=0.15.54, <0.17.22',
+ 'google-auth',
+ 'httplib2 >=0.9.2',
+ 'pycurl >=7.19.5.1',
'setuptools >=40.3.0',
'websockets >=11.0',
+ 'boto3',
],
python_requires="~=3.8",
classifiers=[
'Programming Language :: Python :: 3',
],
test_suite='tests',
- tests_require=['pbr<1.7.0', 'mock>=1.0,<4', 'PyYAML', 'parameterized'],
+ tests_require=['PyYAML', 'parameterized'],
zip_safe=False
)
diff --git a/sdk/python/tests/arvados_testutil.py b/sdk/python/tests/arvados_testutil.py
index 35e85d1195..e6a334c611 100644
--- a/sdk/python/tests/arvados_testutil.py
+++ b/sdk/python/tests/arvados_testutil.py
@@ -2,11 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-from builtins import range
-from builtins import object
import arvados
import contextlib
import errno
@@ -14,7 +9,6 @@ import hashlib
import http.client
import httplib2
import io
-import mock
import os
import pycurl
import queue
@@ -23,11 +17,8 @@ import sys
import tempfile
import unittest
-if sys.version_info >= (3, 0):
- from io import StringIO, BytesIO
-else:
- from cStringIO import StringIO
- BytesIO = StringIO
+from io import StringIO, BytesIO
+from unittest import mock
# Use this hostname when you want to make sure the traffic will be
# instantly refused. 100::/64 is a dedicated black hole.
@@ -86,13 +77,8 @@ def redirected_streams(stdout=None, stderr=None):
class VersionChecker(object):
def assertVersionOutput(self, out, err):
- if sys.version_info >= (3, 0):
- self.assertEqual(err.getvalue(), '')
- v = out.getvalue()
- else:
- # Python 2 writes version info on stderr.
- self.assertEqual(out.getvalue(), '')
- v = err.getvalue()
+ self.assertEqual(err.getvalue(), '')
+ v = out.getvalue()
self.assertRegex(v, r"[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+)?$\n")
@@ -148,6 +134,7 @@ class FakeCurl(object):
return self._resp_code
raise Exception
+
def mock_keep_responses(body, *codes, **headers):
"""Patch pycurl to return fake responses and raise exceptions.
@@ -173,21 +160,6 @@ def mock_keep_responses(body, *codes, **headers):
return mock.patch('pycurl.Curl', cm)
-class MockStreamReader(object):
- def __init__(self, name='.', *data):
- self._name = name
- self._data = b''.join([
- b if isinstance(b, bytes) else b.encode()
- for b in data])
- self._data_locators = [str_keep_locator(d) for d in data]
- self.num_retries = 0
-
- def name(self):
- return self._name
-
- def readfrom(self, start, size, num_retries=None):
- return self._data[start:start + size]
-
class ApiClientMock(object):
def api_client_mock(self):
api_mock = mock.MagicMock(name='api_client_mock')
@@ -271,16 +243,6 @@ class ArvadosBaseTestCase(unittest.TestCase):
testfile.flush()
return testfile
-if sys.version_info < (3, 0):
- # There is no assert[Not]Regex that works in both Python 2 and 3,
- # so we backport Python 3 style to Python 2.
- def assertRegex(self, *args, **kwargs):
- return self.assertRegexpMatches(*args, **kwargs)
- def assertNotRegex(self, *args, **kwargs):
- return self.assertNotRegexpMatches(*args, **kwargs)
- unittest.TestCase.assertRegex = assertRegex
- unittest.TestCase.assertNotRegex = assertNotRegex
-
def binary_compare(a, b):
if len(a) != len(b):
return False
@@ -289,14 +251,6 @@ def binary_compare(a, b):
return False
return True
-def make_block_cache(disk_cache):
- if disk_cache:
- disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
- shutil.rmtree(disk_cache_dir, ignore_errors=True)
- block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache)
- return block_cache
-
-
class DiskCacheBase:
def make_block_cache(self, disk_cache):
self.disk_cache_dir = tempfile.mkdtemp() if disk_cache else None
diff --git a/sdk/python/tests/keepstub.py b/sdk/python/tests/keepstub.py
index 6be8d8b646..a4deb5384b 100644
--- a/sdk/python/tests/keepstub.py
+++ b/sdk/python/tests/keepstub.py
@@ -2,10 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
import http.server
import hashlib
import os
@@ -20,9 +16,7 @@ from . import arvados_testutil as tutil
_debug = os.environ.get('ARVADOS_DEBUG', None)
-
class StubKeepServers(tutil.ApiClientMock):
-
def setUp(self):
super(StubKeepServers, self).setUp()
sock = socket.socket()
diff --git a/sdk/python/tests/manifest_examples.py b/sdk/python/tests/manifest_examples.py
index 050d69093c..6f448c0e58 100644
--- a/sdk/python/tests/manifest_examples.py
+++ b/sdk/python/tests/manifest_examples.py
@@ -2,10 +2,8 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import range
-from builtins import object
import arvados
+
from . import arvados_testutil as tutil
class ManifestExamples(object):
@@ -14,13 +12,13 @@ class ManifestExamples(object):
blocks_per_file=1,
files_per_stream=1,
streams=1):
- datablip = 'x' * bytes_per_block
+ datablip = b'x' * bytes_per_block
data_loc = tutil.str_keep_locator(datablip)
with tutil.mock_keep_responses(data_loc, 200):
- coll = arvados.CollectionWriter()
+ coll = arvados.collection.Collection()
for si in range(0, streams):
for fi in range(0, files_per_stream):
- with coll.open("stream{}/file{}.txt".format(si, fi)) as f:
+ with coll.open("stream{}/file{}.txt".format(si, fi), 'wb') as f:
for bi in range(0, blocks_per_file):
f.write(datablip)
return coll.manifest_text()
diff --git a/sdk/python/tests/nginx.conf b/sdk/python/tests/nginx.conf
index 446b95ca42..d935f87f89 100644
--- a/sdk/python/tests/nginx.conf
+++ b/sdk/python/tests/nginx.conf
@@ -27,7 +27,7 @@ http {
}
server {
listen {{LISTENHOST}}:{{CONTROLLERSSLPORT}} ssl;
- server_name controller ~.*;
+ server_name controller ~\.containers\. ~.*;
ssl_certificate "{{SSLCERT}}";
ssl_certificate_key "{{SSLKEY}}";
client_max_body_size 0;
@@ -46,22 +46,6 @@ http {
proxy_http_version 1.1;
}
}
- upstream arv-git-http {
- server {{UPSTREAMHOST}}:{{GITPORT}};
- }
- server {
- listen {{LISTENHOST}}:{{GITSSLPORT}} ssl;
- server_name arv-git-http git.*;
- ssl_certificate "{{SSLCERT}}";
- ssl_certificate_key "{{SSLKEY}}";
- location / {
- proxy_pass http://arv-git-http;
- proxy_set_header Host $http_host;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto https;
- proxy_redirect off;
- }
- }
upstream keepproxy {
server {{UPSTREAMHOST}}:{{KEEPPROXYPORT}};
}
diff --git a/sdk/python/tests/performance/test_a_sample.py b/sdk/python/tests/performance/test_a_sample.py
index 65015dc872..9e54b1f5d2 100644
--- a/sdk/python/tests/performance/test_a_sample.py
+++ b/sdk/python/tests/performance/test_a_sample.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import absolute_import
-from builtins import range
import unittest
from .performance_profiler import profiled
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
index 787837b723..7d279c8486 100644
--- a/sdk/python/tests/run_test_server.py
+++ b/sdk/python/tests/run_test_server.py
@@ -193,11 +193,20 @@ def find_available_port():
_already_used_port[port] = True
return port
-def _wait_until_port_listens(port, timeout=10, warn=True):
+def _wait_until_port_listens(port, *, timeout=300, pid=None,
+ listener_must_be_pid=True):
"""Wait for a process to start listening on the given port.
If nothing listens on the port within the specified timeout (given
- in seconds), print a warning on stderr before returning.
+ in seconds), raise an exception.
+
+ If the `pid` argument is given and `listener_must_be_pid` is True,
+ wait for that specific process to listen on the port, not just any
+ process.
+
+ If the `pid` argument is given, give up early if that process
+ exits; also, terminate that process if timing out.
+
"""
try:
subprocess.check_output(['which', 'netstat'])
@@ -207,17 +216,36 @@ def _wait_until_port_listens(port, timeout=10, warn=True):
file=sys.stderr)
time.sleep(0.5)
return
+ if pid and listener_must_be_pid:
+ matchpid = str(pid)
+ else:
+ matchpid = r'\d+'
deadline = time.time() + timeout
+ logged = False
+ slept = 0
while time.time() < deadline:
- if re.search(r'\ntcp.*:'+str(port)+' .* LISTEN *\n', subprocess.check_output(['netstat', '-Wln']).decode()):
+ if re.search(r'\ntcp.*:'+str(port)+' .* LISTEN +'+matchpid+'/',
+ subprocess.check_output(
+ ['netstat', '-Wlnp'],
+ stderr=subprocess.DEVNULL,
+ ).decode()):
return True
+ if pid and not os.path.exists('/proc/{}/stat'.format(pid)):
+ raise Exception("process {} does not exist"
+ " -- giving up on port {}".format(
+ pid or '', port))
+ if slept > 5 and not logged:
+ print("waiting for port {}...".format(port), file=sys.stderr)
+ logged = True
time.sleep(0.1)
- if warn:
- print(
- "WARNING: Nothing is listening on port {} (waited {} seconds).".
- format(port, timeout),
- file=sys.stderr)
- return False
+ slept += 1
+ if pid:
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except ProcessLookupError:
+ pass
+ raise Exception("process {} never listened on port {}".format(
+ pid or '', port))
def _logfilename(label):
"""Set up a labelled log file, and return a path to write logs to.
@@ -247,7 +275,7 @@ def _logfilename(label):
# us.
cat = subprocess.Popen(
stdbuf+['cat', fifo],
- stdin=open('/dev/null'),
+ stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE)
_detachedSubprocesses.append(cat)
tee = subprocess.Popen(
@@ -262,6 +290,13 @@ def _logfilename(label):
_detachedSubprocesses.append(sed)
return fifo
+def _service_environ():
+ """Return an environment mapping suitable for running an arvados
+ service process."""
+ env = dict(os.environ)
+ env['ARVADOS_USE_KEEP_ACCESSIBLE_API'] = 'true'
+ return env
+
def run(leave_running_atexit=False):
"""Ensure an API server is running, and ARVADOS_API_* env vars have
admin credentials for it.
@@ -328,13 +363,6 @@ def run(leave_running_atexit=False):
if not os.path.exists('tmp/logs'):
os.makedirs('tmp/logs')
- # Install the git repository fixtures.
- gitdir = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git')
- gittarball = os.path.join(SERVICES_SRC_DIR, 'api', 'test', 'test.git.tar')
- if not os.path.isdir(gitdir):
- os.makedirs(gitdir)
- subprocess.check_output(['tar', '-xC', gitdir, '-f', gittarball])
-
# Customizing the passenger config template is the only documented
# way to override the default passenger_stat_throttle_rate (10 s).
# In the testing environment, we want restart.txt to take effect
@@ -344,12 +372,12 @@ def run(leave_running_atexit=False):
template = f.read()
newtemplate = re.sub(r'http \{', 'http {\n passenger_stat_throttle_rate 0;', template)
if newtemplate == template:
- raise "template edit failed"
+ raise Exception("template edit failed")
with open('tmp/passenger-nginx.conf.erb', 'w') as f:
f.write(newtemplate)
port = internal_port_from_config("RailsAPI")
- env = os.environ.copy()
+ env = _service_environ()
env['RAILS_ENV'] = 'test'
env['ARVADOS_RAILS_LOG_TO_STDOUT'] = '1'
env.pop('ARVADOS_WEBSOCKETS', None)
@@ -370,7 +398,10 @@ def run(leave_running_atexit=False):
'--ssl',
'--ssl-certificate', 'tmp/self-signed.pem',
'--ssl-certificate-key', 'tmp/self-signed.key'],
- env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+ env=env,
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf)
_detachedSubprocesses.append(railsapi)
if not leave_running_atexit:
@@ -379,9 +410,9 @@ def run(leave_running_atexit=False):
my_api_host = "127.0.0.1:"+str(port)
os.environ['ARVADOS_API_HOST'] = my_api_host
- # Make sure the server has written its pid file and started
- # listening on its TCP port
- _wait_until_port_listens(port)
+ # Make sure the server is listening on its TCP port.
+ _wait_until_port_listens(port, pid=railsapi.pid, listener_must_be_pid=False)
+ # Make sure the server has written its pid file.
find_server_pid(pid_file)
reset()
@@ -403,7 +434,7 @@ def reset():
httpclient.request(
'https://{}/database/reset'.format(existing_api_host),
'POST',
- headers={'Authorization': 'OAuth2 {}'.format(token), 'Connection':'close'})
+ headers={'Authorization': 'Bearer {}'.format(token), 'Connection':'close'})
os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
os.environ['ARVADOS_API_TOKEN'] = token
@@ -448,11 +479,15 @@ def run_controller():
port = internal_port_from_config("Controller")
controller = subprocess.Popen(
["arvados-server", "controller"],
- stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ env=_service_environ(),
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf,
+ close_fds=True)
_detachedSubprocesses.append(controller)
with open(_pidfile('controller'), 'w') as f:
f.write(str(controller.pid))
- _wait_until_port_listens(port)
+ _wait_until_port_listens(port, pid=controller.pid)
return port
def stop_controller():
@@ -468,11 +503,15 @@ def run_ws():
logf = open(_logfilename('ws'), WRITE_MODE)
ws = subprocess.Popen(
["arvados-server", "ws"],
- stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ env=_service_environ(),
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf,
+ close_fds=True)
_detachedSubprocesses.append(ws)
with open(_pidfile('ws'), 'w') as f:
f.write(str(ws.pid))
- _wait_until_port_listens(port)
+ _wait_until_port_listens(port, pid=ws.pid)
return port
def stop_ws():
@@ -487,29 +526,35 @@ def _start_keep(n, blob_signing=False):
os.mkdir(datadir)
port = internal_port_from_config("Keepstore", idx=n)
- # Currently, if there are multiple InternalURLs for a single host,
- # the only way to tell a keepstore process which one it's supposed
- # to listen on is to supply a redacted version of the config, with
- # the other InternalURLs removed.
+ # Make a copy of the config file with BlobSigning set to the
+ # requested value.
conf = os.path.join(TEST_TMPDIR, "keep%d.yaml"%n)
confdata = get_config()
- confdata['Clusters']['zzzzz']['Services']['Keepstore']['InternalURLs'] = {"http://127.0.0.1:%d"%port: {}}
confdata['Clusters']['zzzzz']['Collections']['BlobSigning'] = blob_signing
with open(conf, 'w') as f:
yaml.safe_dump(confdata, f)
keep_cmd = ["arvados-server", "keepstore", "-config", conf]
- with open(_logfilename('keep{}'.format(n)), WRITE_MODE) as logf:
- with open('/dev/null') as _stdin:
- child = subprocess.Popen(
- keep_cmd, stdin=_stdin, stdout=logf, stderr=logf, close_fds=True)
- _detachedSubprocesses.append(child)
+ # Tell keepstore which of the InternalURLs it's supposed to listen
+ # on.
+ env = _service_environ()
+ env['ARVADOS_SERVICE_INTERNAL_URL'] = "http://127.0.0.1:%d"%port
- print('child.pid is %d'%child.pid, file=sys.stderr)
+ with open(_logfilename('keep{}'.format(n)), WRITE_MODE) as logf:
+ child = subprocess.Popen(
+ keep_cmd,
+ env=env,
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf,
+ close_fds=True)
+ _detachedSubprocesses.append(child)
+
+ print('keep{}.pid is {}'.format(n, child.pid), file=sys.stderr)
with open(_pidfile('keep{}'.format(n)), 'w') as f:
f.write(str(child.pid))
- _wait_until_port_listens(port)
+ _wait_until_port_listens(port, pid=child.pid)
return port
@@ -524,8 +569,6 @@ def run_keep(num_servers=2, **kwargs):
for d in api.keep_services().list(filters=[['service_type','=','disk']]).execute()['items']:
api.keep_services().delete(uuid=d['uuid']).execute()
- for d in api.keep_disks().list().execute()['items']:
- api.keep_disks().delete(uuid=d['uuid']).execute()
for d in range(0, num_servers):
port = _start_keep(d, **kwargs)
@@ -536,9 +579,6 @@ def run_keep(num_servers=2, **kwargs):
'service_type': 'disk',
'service_ssl_flag': False,
}}).execute()
- api.keep_disks().create(body={
- 'keep_disk': {'keep_service_uuid': svc['uuid'] }
- }).execute()
# If keepproxy and/or keep-web is running, send SIGHUP to make
# them discover the new keepstore services.
@@ -565,16 +605,21 @@ def run_keep_proxy():
stop_keep_proxy()
port = internal_port_from_config("Keepproxy")
- env = os.environ.copy()
+ env = _service_environ()
env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
logf = open(_logfilename('keepproxy'), WRITE_MODE)
kp = subprocess.Popen(
- ['arvados-server', 'keepproxy'], env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ ['arvados-server', 'keepproxy'],
+ env=env,
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf,
+ close_fds=True)
_detachedSubprocesses.append(kp)
with open(_pidfile('keepproxy'), 'w') as f:
f.write(str(kp.pid))
- _wait_until_port_listens(port)
+ _wait_until_port_listens(port, pid=kp.pid)
print("Using API %s token %s" % (os.environ['ARVADOS_API_HOST'], auth_token('admin')), file=sys.stdout)
api = arvados.api(
@@ -592,49 +637,29 @@ def run_keep_proxy():
'service_ssl_flag': False,
}}).execute()
os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(port)
- _wait_until_port_listens(port)
def stop_keep_proxy():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
return
kill_server_pid(_pidfile('keepproxy'))
-def run_arv_git_httpd():
- if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
- return
- stop_arv_git_httpd()
-
- gitport = internal_port_from_config("GitHTTP")
- env = os.environ.copy()
- env.pop('ARVADOS_API_TOKEN', None)
- logf = open(_logfilename('githttpd'), WRITE_MODE)
- agh = subprocess.Popen(['arvados-server', 'git-httpd'],
- env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
- _detachedSubprocesses.append(agh)
- with open(_pidfile('githttpd'), 'w') as f:
- f.write(str(agh.pid))
- _wait_until_port_listens(gitport)
-
-def stop_arv_git_httpd():
- if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
- return
- kill_server_pid(_pidfile('githttpd'))
-
def run_keep_web():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
return
stop_keep_web()
keepwebport = internal_port_from_config("WebDAV")
- env = os.environ.copy()
logf = open(_logfilename('keep-web'), WRITE_MODE)
keepweb = subprocess.Popen(
['arvados-server', 'keep-web'],
- env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+ env=_service_environ(),
+ stdin=subprocess.DEVNULL,
+ stdout=logf,
+ stderr=logf)
_detachedSubprocesses.append(keepweb)
with open(_pidfile('keep-web'), 'w') as f:
f.write(str(keepweb.pid))
- _wait_until_port_listens(keepwebport)
+ _wait_until_port_listens(keepwebport, pid=keepweb.pid)
def stop_keep_web():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
@@ -656,8 +681,6 @@ def run_nginx():
nginxconf['KEEPWEBSSLPORT'] = external_port_from_config("WebDAV")
nginxconf['KEEPPROXYPORT'] = internal_port_from_config("Keepproxy")
nginxconf['KEEPPROXYSSLPORT'] = external_port_from_config("Keepproxy")
- nginxconf['GITPORT'] = internal_port_from_config("GitHTTP")
- nginxconf['GITSSLPORT'] = external_port_from_config("GitHTTP")
nginxconf['HEALTHPORT'] = internal_port_from_config("Health")
nginxconf['HEALTHSSLPORT'] = external_port_from_config("Health")
nginxconf['WSPORT'] = internal_port_from_config("Websocket")
@@ -685,11 +708,13 @@ def run_nginx():
nginx = subprocess.Popen(
['nginx',
- '-g', 'error_log stderr info; pid '+_pidfile('nginx')+';',
+ '-g', 'error_log stderr notice; pid '+_pidfile('nginx')+';',
'-c', conffile],
- env=env, stdin=open('/dev/null'), stdout=sys.stderr)
+ env=env,
+ stdin=subprocess.DEVNULL,
+ stdout=sys.stderr)
_detachedSubprocesses.append(nginx)
- _wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'])
+ _wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'], pid=nginx.pid)
def setup_config():
rails_api_port = find_available_port()
@@ -700,8 +725,6 @@ def setup_config():
workbench1_external_port = find_available_port()
workbench2_port = find_available_port()
workbench2_external_port = find_available_port()
- git_httpd_port = find_available_port()
- git_httpd_external_port = find_available_port()
health_httpd_port = find_available_port()
health_httpd_external_port = find_available_port()
keepproxy_port = find_available_port()
@@ -722,6 +745,7 @@ def setup_config():
pgconnection = {
"client_encoding": "utf8",
"host": "localhost",
+ "port": "5432",
"dbname": "arvados_test",
"user": "arvados",
"password": "insecure_arvados_test",
@@ -755,12 +779,6 @@ def setup_config():
"http://%s:%s"%(localhost, workbench2_port): {},
},
},
- "GitHTTP": {
- "ExternalURL": "https://%s:%s" % (localhost, git_httpd_external_port),
- "InternalURLs": {
- "http://%s:%s"%(localhost, git_httpd_port): {}
- },
- },
"Health": {
"ExternalURL": "https://%s:%s" % (localhost, health_httpd_external_port),
"InternalURLs": {
@@ -790,6 +808,12 @@ def setup_config():
"http://%s:%s"%(localhost, keep_web_port): {},
},
},
+ "ContainerWebServices": {
+ "ExternalURL": "https://*.containers.zzzzz.example.com",
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, controller_port): {},
+ },
+ },
}
config = {
@@ -811,6 +835,24 @@ def setup_config():
}
}
},
+ "LDAP": {
+ "Enable": False,
+ # Hostname used by lib/controller/localdb/login_docker_test
+ # Other settings are the defaults for the
+ # bitnami/openldap Docker image it uses
+ "URL": "ldap://arvados-test-openldap:1389/",
+ "StartTLS": False,
+ "SearchBase": "dc=example,dc=org",
+ "SearchBindUser": "cn=admin,dc=example,dc=org",
+ "SearchBindPassword": "adminpassword",
+ },
+ "PAM": {
+ "Enable": False,
+ # Without this specific DefaultEmailDomain, inserted users
+ # would prevent subsequent database/reset from working (see
+ # database_controller.rb).
+ "DefaultEmailDomain": "example.com",
+ },
},
"SystemLogs": {
"LogLevel": ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
@@ -831,19 +873,10 @@ def setup_config():
"BlobSigningKey": "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc",
"TrustAllContent": False,
"ForwardSlashNameSubstitution": "/",
- "TrashSweepInterval": "-1s",
- },
- "Git": {
- "Repositories": os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git', 'test'),
+ "TrashSweepInterval": "-1s", # disable, otherwise test cases can't acquire dblock
},
"Containers": {
- "JobsAPI": {
- "GitInternalDir": os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'internal.git'),
- },
"LocalKeepBlobBuffersPerVCPU": 0,
- "Logging": {
- "SweepInterval": 0, # disable, otherwise test cases can't acquire dblock
- },
"SupportedDockerImageFormats": {"v1": {}},
"ShellAccess": {
"Admin": True,
@@ -971,22 +1004,20 @@ if __name__ == "__main__":
'start_keep', 'stop_keep',
'start_keep_proxy', 'stop_keep_proxy',
'start_keep-web', 'stop_keep-web',
- 'start_githttpd', 'stop_githttpd',
'start_nginx', 'stop_nginx', 'setup_config',
]
parser = argparse.ArgumentParser()
- parser.add_argument('action', type=str, help="one of {}".format(actions))
+ parser.add_argument(
+ 'action',
+ metavar='ACTION',
+ choices=actions,
+ help="one of %(choices)s",
+ )
parser.add_argument('--auth', type=str, metavar='FIXTURE_NAME', help='Print authorization info for given api_client_authorizations fixture')
parser.add_argument('--num-keep-servers', metavar='int', type=int, default=2, help="Number of keep servers desired")
parser.add_argument('--keep-blob-signing', action="store_true", help="Enable blob signing for keepstore servers")
args = parser.parse_args()
-
- if args.action not in actions:
- print("Unrecognized action '{}'. Actions are: {}.".
- format(args.action, actions),
- file=sys.stderr)
- sys.exit(1)
# Create a new process group so our child processes don't exit on
# ^C in run-tests.sh interactive mode.
os.setpgid(0, 0)
@@ -999,6 +1030,7 @@ if __name__ == "__main__":
print("export ARVADOS_API_TOKEN={}".format(shlex.quote(token)))
print("export ARVADOS_API_HOST={}".format(shlex.quote(host)))
print("export ARVADOS_API_HOST_INSECURE=true")
+ print("export ARVADOS_USE_KEEP_ACCESSIBLE_API=true")
else:
print(host)
elif args.action == 'stop':
@@ -1019,10 +1051,6 @@ if __name__ == "__main__":
run_keep_proxy()
elif args.action == 'stop_keep_proxy':
stop_keep_proxy()
- elif args.action == 'start_githttpd':
- run_arv_git_httpd()
- elif args.action == 'stop_githttpd':
- stop_arv_git_httpd()
elif args.action == 'start_keep-web':
run_keep_web()
elif args.action == 'stop_keep-web':
diff --git a/sdk/python/tests/slow_test.py b/sdk/python/tests/slow_test.py
deleted file mode 100644
index ae46f4e1bf..0000000000
--- a/sdk/python/tests/slow_test.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import __main__
-import os
-import unittest
-
-slow_test = lambda _: unittest.skipIf(
- __main__.short_tests_only,
- "running --short tests only")
diff --git a/sdk/python/tests/test_api.py b/sdk/python/tests/test_api.py
index 0f85e5520c..760bc7675f 100644
--- a/sdk/python/tests/test_api.py
+++ b/sdk/python/tests/test_api.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import str
-from builtins import range
import arvados
import collections
import contextlib
@@ -20,19 +17,21 @@ import sys
import unittest
import urllib.parse as urlparse
-import mock
+from unittest import mock
from . import run_test_server
from apiclient import errors as apiclient_errors
from apiclient import http as apiclient_http
from arvados.api import (
+ ThreadSafeAPIClient,
api_client,
normalize_api_kwargs,
api_kwargs_from_config,
- OrderedJsonModel,
_googleapiclient_log_lock,
)
from .arvados_testutil import fake_httplib2_response, mock_api_responses, queue_with
+
+import googleapiclient
import httplib2.error
if not mimetypes.inited:
@@ -70,8 +69,8 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
self.assertIsNot(*clients)
def test_empty_list(self):
- answer = arvados.api('v1').humans().list(
- filters=[['uuid', '=', None]]).execute()
+ answer = arvados.api('v1').collections().list(
+ filters=[['uuid', '=', 'abcdef']]).execute()
self.assertEqual(answer['items_available'], len(answer['items']))
def test_nonempty_list(self):
@@ -81,11 +80,11 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
def test_timestamp_inequality_filter(self):
api = arvados.api('v1')
- new_item = api.specimens().create(body={}).execute()
+ new_item = api.collections().create(body={}).execute()
for operator, should_include in [
['<', False], ['>', False],
['<=', True], ['>=', True], ['=', True]]:
- response = api.specimens().list(filters=[
+ response = api.collections().list(filters=[
['created_at', operator, new_item['created_at']],
# Also filter by uuid to ensure (if it matches) it's on page 0
['uuid', '=', new_item['uuid']]]).execute()
@@ -100,13 +99,13 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
def test_exceptions_include_errors(self):
mock_responses = {
- 'arvados.humans.get': self.api_error_response(
+ 'arvados.collections.get': self.api_error_response(
422, "Bad UUID format", "Bad output format"),
}
req_builder = apiclient_http.RequestMockBuilder(mock_responses)
api = arvados.api('v1', requestBuilder=req_builder)
with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
- api.humans().get(uuid='xyz-xyz-abcdef').execute()
+ api.collections().get(uuid='xyz-xyz-abcdef').execute()
err_s = str(err_ctx.exception)
for msg in ["Bad UUID format", "Bad output format"]:
self.assertIn(msg, err_s)
@@ -126,14 +125,14 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
def test_exceptions_without_errors_have_basic_info(self):
mock_responses = {
- 'arvados.humans.delete': (
+ 'arvados.collections.delete': (
fake_httplib2_response(500, **self.ERROR_HEADERS),
b"")
}
req_builder = apiclient_http.RequestMockBuilder(mock_responses)
api = arvados.api('v1', requestBuilder=req_builder)
with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
- api.humans().delete(uuid='xyz-xyz-abcdef').execute()
+ api.collections().delete(uuid='xyz-xyz-abcdef').execute()
self.assertIn("500", str(err_ctx.exception))
def test_request_too_large(self):
@@ -204,21 +203,6 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
self.assertEqual(response.status, code)
self.assertEqual(response.get('status'), str(code))
- def test_ordered_json_model(self):
- mock_responses = {
- 'arvados.humans.get': (
- None,
- json.dumps(collections.OrderedDict(
- (c, int(c, 16)) for c in string.hexdigits
- )).encode(),
- ),
- }
- req_builder = apiclient_http.RequestMockBuilder(mock_responses)
- api = arvados.api('v1',
- requestBuilder=req_builder, model=OrderedJsonModel())
- result = api.humans().get(uuid='test').execute()
- self.assertEqual(string.hexdigits, ''.join(list(result.keys())))
-
def test_api_is_threadsafe(self):
api_kwargs = {
'host': os.environ['ARVADOS_API_HOST'],
@@ -542,5 +526,61 @@ class PreCloseSocketTestCase(unittest.TestCase):
self.assertEqual(c.close.call_count, expect)
+class ThreadSafeAPIClientTestCase(run_test_server.TestCaseWithServers):
+ MAIN_SERVER = {}
+
+ def test_constructor(self):
+ env_mapping = {
+ key: value
+ for key, value in os.environ.items()
+ if key.startswith('ARVADOS_API_')
+ }
+ extra_params = {
+ 'timeout': 299,
+ }
+ base_params = {
+ key[12:].lower(): value
+ for key, value in env_mapping.items()
+ }
+ try:
+ base_params['insecure'] = base_params.pop('host_insecure')
+ except KeyError:
+ pass
+ expected_keep_params = {}
+ for config, params, subtest in [
+ (None, {}, "default arguments"),
+ (None, extra_params, "extra params"),
+ (env_mapping, {}, "explicit config"),
+ (env_mapping, extra_params, "explicit config and params"),
+ ({}, base_params, "params only"),
+ ]:
+ with self.subTest(f"test constructor with {subtest}"):
+ expected_timeout = params.get('timeout', 300)
+ expected_params = dict(params)
+ keep_params = dict(expected_keep_params)
+ client = ThreadSafeAPIClient(config, keep_params, params, 'v1')
+ self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
+ self.assertEqual(client._http.timeout, expected_timeout)
+ self.assertEqual(params, expected_params,
+ "api_params was modified in-place")
+ self.assertEqual(keep_params, expected_keep_params,
+ "keep_params was modified in-place")
+
+ def test_constructor_no_args(self):
+ client = ThreadSafeAPIClient()
+ self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
+ self.assertTrue(client.insecure)
+
+ def test_constructor_bad_version(self):
+ with self.assertRaises(googleapiclient.errors.UnknownApiNameOrVersion):
+ ThreadSafeAPIClient(version='BadTestVersion')
+
+ def test_pre_v3_0_name(self):
+ from arvados.safeapi import ThreadSafeApiCache
+ self.assertIs(ThreadSafeApiCache, ThreadSafeAPIClient)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/sdk/python/tests/test_arv_copy.py b/sdk/python/tests/test_arv_copy.py
index b853b33043..862670f63f 100644
--- a/sdk/python/tests/test_arv_copy.py
+++ b/sdk/python/tests/test_arv_copy.py
@@ -2,16 +2,20 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
+import itertools
import os
import sys
import tempfile
import unittest
import shutil
import arvados.api
+import arvados.util
from arvados.collection import Collection, CollectionReader
+import pytest
+
import arvados.commands.arv_copy as arv_copy
+from arvados._internal import basedirs
from . import arvados_testutil as tutil
from . import run_test_server
@@ -88,3 +92,80 @@ class ArvCopyVersionTestCase(run_test_server.TestCaseWithServers, tutil.VersionC
finally:
os.environ['HOME'] = home_was
shutil.rmtree(tmphome)
+
+
+class TestApiForInstance:
+ _token_counter = itertools.count(1)
+
+ class ApiObject:
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+
+ def config(self):
+ return {"ClusterID": "zzzzz"}
+
+ @staticmethod
+ def api_config(version, **kwargs):
+ assert version == 'v1'
+ return TestApiForInstance.ApiObject(**kwargs)
+
+ @pytest.fixture
+ def patch_api(self, monkeypatch):
+ monkeypatch.setattr(arvados, 'api', self.api_config)
+
+ @pytest.fixture
+ def config_file(self, tmp_path):
+ count = next(self._token_counter)
+ path = tmp_path / f'config{count}.conf'
+ with path.open('w') as config_file:
+ print(
+ "ARVADOS_API_HOST=localhost",
+ f"ARVADOS_API_TOKEN={self.expected_token(path)}",
+ sep="\n", file=config_file,
+ )
+ return path
+
+ @pytest.fixture
+ def patch_search(self, tmp_path, monkeypatch):
+ def search(self, name):
+ path = tmp_path / name
+ if path.exists():
+ yield path
+ monkeypatch.setattr(basedirs.BaseDirectories, 'search', search)
+
+ def expected_token(self, path):
+ return f"v2/zzzzz-gj3su-{path.stem:>015s}/{path.stem:>050s}"
+
+ def test_from_environ(self, patch_api):
+ actual = arv_copy.api_for_instance('', 0)
+ assert actual.kwargs == {"num_retries": 0}
+
+ def test_instance_matches_environ(self, patch_api):
+ actual = arv_copy.api_for_instance('zzzzz', 0)
+ assert actual.kwargs == {"num_retries": 0}
+
+ def test_relative_path(self, patch_api, config_file, monkeypatch):
+ monkeypatch.chdir(config_file.parent)
+ actual = arv_copy.api_for_instance(f'./{config_file.name}', 0)
+ assert actual.kwargs['host'] == 'localhost'
+ assert actual.kwargs['token'] == self.expected_token(config_file)
+
+ def test_absolute_path(self, patch_api, config_file):
+ actual = arv_copy.api_for_instance(str(config_file), 0)
+ assert actual.kwargs['host'] == 'localhost'
+ assert actual.kwargs['token'] == self.expected_token(config_file)
+
+ def test_search_path(self, patch_api, patch_search, config_file):
+ actual = arv_copy.api_for_instance(config_file.stem, 0)
+ assert actual.kwargs['host'] == 'localhost'
+ assert actual.kwargs['token'] == self.expected_token(config_file)
+
+ def test_search_failed(self, patch_api, patch_search):
+ with pytest.raises(SystemExit) as exc_info:
+ arv_copy.api_for_instance('NotFound', 0)
+ assert exc_info.value.code > 0
+
+ def test_path_unreadable(self, patch_api, tmp_path):
+ with pytest.raises(SystemExit) as exc_info:
+ arv_copy.api_for_instance(str(tmp_path / 'nonexistent.conf'), 0)
+ assert exc_info.value.code > 0
diff --git a/sdk/python/tests/test_arv_get.py b/sdk/python/tests/test_arv_get.py
index d12739f6f6..aefcbd7b22 100644
--- a/sdk/python/tests/test_arv_get.py
+++ b/sdk/python/tests/test_arv_get.py
@@ -2,16 +2,15 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from future.utils import listitems
import io
import logging
-import mock
import os
import re
import shutil
import tempfile
+from unittest import mock
+
import arvados
import arvados.collection as collection
import arvados.commands.get as arv_get
@@ -51,7 +50,7 @@ class ArvadosGetTestCase(run_test_server.TestCaseWithServers,
}):
api = arvados.api()
c = collection.Collection(api_client=api)
- for path, data in listitems(contents):
+ for path, data in contents.items():
with c.open(path, 'wb') as f:
f.write(data)
c.save_new()
diff --git a/sdk/python/tests/test_arv_keepdocker.py b/sdk/python/tests/test_arv_keepdocker.py
index 9aebc03504..33c050ef86 100644
--- a/sdk/python/tests/test_arv_keepdocker.py
+++ b/sdk/python/tests/test_arv_keepdocker.py
@@ -8,15 +8,17 @@ import collections.abc
import copy
import hashlib
import logging
-import mock
import os
import subprocess
import sys
import tempfile
import unittest
+
from pathlib import Path
+from unittest import mock
import parameterized
+import pytest
import arvados.commands.keepdocker as arv_keepdocker
from . import arvados_testutil as tutil
@@ -40,7 +42,7 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
with tutil.redirected_streams(stdout=out, stderr=out), \
self.assertRaises(SystemExit):
self.run_arv_keepdocker(['-x=unknown'], sys.stderr)
- self.assertRegex(out.getvalue(), 'unrecognized arguments')
+ self.assertRegex(out.getvalue(), r'unrecognized arguments')
def test_version_argument(self):
with tutil.redirected_streams(
@@ -93,16 +95,16 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
self.assertEqual(out.getvalue(), '')
if expect_ok:
self.assertNotRegex(
- err.getvalue(), "refusing to store",
+ err.getvalue(), r"refusing to store",
msg=repr((supported, img_id)))
else:
self.assertRegex(
- err.getvalue(), "refusing to store",
+ err.getvalue(), r"refusing to store",
msg=repr((supported, img_id)))
if not supported:
self.assertRegex(
err.getvalue(),
- "server does not specify supported image formats",
+ r"server does not specify supported image formats",
msg=repr((supported, img_id)))
fakeDD = arvados.api('v1')._rootDesc
@@ -121,13 +123,13 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
api()._rootDesc = fakeDD
self.run_arv_keepdocker(
['--force', '--force-image-format', 'testimage'], err)
- self.assertRegex(err.getvalue(), "forcing incompatible image")
+ self.assertRegex(err.getvalue(), r"forcing incompatible image")
def test_tag_given_twice(self):
with tutil.redirected_streams(stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
with self.assertRaises(SystemExit):
self.run_arv_keepdocker(['myrepo:mytag', 'extratag'], sys.stderr)
- self.assertRegex(err.getvalue(), "cannot add tag argument 'extratag'")
+ self.assertRegex(err.getvalue(), r"cannot add tag argument 'extratag'")
def test_image_given_as_repo_colon_tag(self):
with self.assertRaises(StopTest), \
@@ -179,7 +181,7 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
out = tutil.StringIO()
with self.assertRaises(SystemExit):
self.run_arv_keepdocker([], sys.stderr, stdout=out)
- self.assertRegex(out.getvalue(), '\nregistry.example:1234/repo +latest ')
+ self.assertRegex(out.getvalue(), r'\nregistry.example:1234/repo +latest ')
finally:
api.links().delete(uuid=taglink['uuid']).execute()
@@ -254,3 +256,12 @@ class ImageMetadataTestCase(unittest.TestCase):
def test_image_config(self):
self.assertIsInstance(self.config, collections.abc.Mapping)
self.assertEqual(self.config.get('created'), '2023-05-02T16:49:27Z')
+
+
+def test_get_cache_dir(tmp_path):
+ actual = arv_keepdocker.get_cache_dir(lambda: tmp_path)
+ assert isinstance(actual, str)
+ actual = Path(actual)
+ assert actual.is_dir()
+ assert actual.name == 'docker'
+ assert actual.parent == tmp_path
diff --git a/sdk/python/tests/test_arv_ls.py b/sdk/python/tests/test_arv_ls.py
index 635c6254ad..c99f21c8bf 100644
--- a/sdk/python/tests/test_arv_ls.py
+++ b/sdk/python/tests/test_arv_ls.py
@@ -2,15 +2,13 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import str
-from builtins import range
import os
import random
import sys
-import mock
import tempfile
+from unittest import mock
+
import arvados.errors as arv_error
import arvados.commands.ls as arv_ls
from . import run_test_server
@@ -89,9 +87,8 @@ class ArvLsTestCase(run_test_server.TestCaseWithServers, tutil.VersionChecker):
self.assertEqual(1, error_mock.call_count)
def test_version_argument(self):
- if sys.version_info >= (3, 0):
- import warnings
- warnings.simplefilter("ignore")
+ import warnings
+ warnings.simplefilter("ignore")
with redirected_streams(stdout=StringIO, stderr=StringIO) as (out, err):
with self.assertRaises(SystemExit):
self.run_ls(['--version'], None)
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
index afdf2238a7..d854fd7062 100644
--- a/sdk/python/tests/test_arv_put.py
+++ b/sdk/python/tests/test_arv_put.py
@@ -4,19 +4,12 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-from builtins import range
-from functools import partial
import apiclient
import ciso8601
+import copy
import datetime
import json
import logging
-import mock
import multiprocessing
import os
import pwd
@@ -32,10 +25,17 @@ import time
import unittest
import uuid
+import pytest
+from functools import partial
+from pathlib import Path
+from unittest import mock
+
import arvados
import arvados.commands.put as arv_put
-from . import arvados_testutil as tutil
+import arvados.util
+from arvados._internal import basedirs
+from . import arvados_testutil as tutil
from .arvados_testutil import ArvadosBaseTestCase, fake_httplib2_response
from . import run_test_server
@@ -249,6 +249,76 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
arv_put.ResumeCache, path)
+class TestArvadosPutResumeCacheDir:
+ @pytest.fixture
+ def args(self, tmp_path):
+ return arv_put.parse_arguments([str(tmp_path)])
+
+ @pytest.mark.parametrize('cache_dir', [None, 'test-put'])
+ def test_cache_subdir(self, tmp_path, monkeypatch, cache_dir, args):
+ if cache_dir is None:
+ cache_dir = arv_put.ResumeCache.CACHE_DIR
+ else:
+ monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', cache_dir)
+ monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', tmp_path.__truediv__)
+ actual = arv_put.ResumeCache.make_path(args)
+ assert isinstance(actual, str)
+ assert Path(actual).parent == (tmp_path / cache_dir)
+
+ def test_cache_relative_dir(self, tmp_path, monkeypatch, args):
+ expected = Path('rel', 'dir')
+ monkeypatch.setattr(Path, 'home', lambda: tmp_path)
+ monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', str(expected))
+ actual = arv_put.ResumeCache.make_path(args)
+ assert isinstance(actual, str)
+ parent = Path(actual).parent
+ assert parent == (tmp_path / expected)
+ assert parent.is_dir()
+
+ def test_cache_absolute_dir(self, tmp_path, monkeypatch, args):
+ expected = tmp_path / 'arv-put'
+ monkeypatch.setattr(Path, 'home', lambda: tmp_path / 'home')
+ monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', str(expected))
+ actual = arv_put.ResumeCache.make_path(args)
+ assert isinstance(actual, str)
+ parent = Path(actual).parent
+ assert parent == expected
+ assert parent.is_dir()
+
+
+class TestArvadosPutUploadJobCacheDir:
+ @pytest.mark.parametrize('cache_dir', [None, 'test-put'])
+ def test_cache_subdir(self, tmp_path, monkeypatch, cache_dir):
+ def storage_path(self, subdir='.', mode=0o700):
+ path = tmp_path / subdir
+ path.mkdir(mode=mode)
+ return path
+ if cache_dir is None:
+ cache_dir = arv_put.ArvPutUploadJob.CACHE_DIR
+ else:
+ monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', cache_dir)
+ monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', storage_path)
+ job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)
+ job.destroy_cache()
+ assert Path(job._cache_filename).parent == (tmp_path / cache_dir)
+
+ def test_cache_relative_dir(self, tmp_path, monkeypatch):
+ expected = Path('rel', 'dir')
+ monkeypatch.setattr(Path, 'home', lambda: tmp_path)
+ monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', str(expected))
+ job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)
+ job.destroy_cache()
+ assert Path(job._cache_filename).parent == (tmp_path / expected)
+
+ def test_cache_absolute_dir(self, tmp_path, monkeypatch):
+ expected = tmp_path / 'arv-put'
+ monkeypatch.setattr(Path, 'home', lambda: tmp_path / 'home')
+ monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', str(expected))
+ job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)
+ job.destroy_cache()
+ assert Path(job._cache_filename).parent == expected
+
+
class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
ArvadosBaseTestCase):
@@ -573,7 +643,7 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
class CachedManifestValidationTest(ArvadosBaseTestCase):
class MockedPut(arv_put.ArvPutUploadJob):
def __init__(self, cached_manifest=None):
- self._state = arv_put.ArvPutUploadJob.EMPTY_STATE
+ self._state = copy.deepcopy(arv_put.ArvPutUploadJob.EMPTY_STATE)
self._state['manifest'] = cached_manifest
self._api_client = mock.MagicMock()
self.logger = mock.MagicMock()
@@ -1372,6 +1442,11 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
self.assertEqual(len(collection['storage_classes_desired']), 1)
self.assertEqual(collection['storage_classes_desired'][0], 'default')
+ def test_put_collection_with_duplicate_and_malformed_storage_classes_specified(self):
+ collection = self.run_and_find_collection("", ['--storage-classes', ' foo, bar ,baz,, bar, foo, , ,'])
+ self.assertEqual(len(collection['storage_classes_desired']), 3)
+ self.assertEqual(collection['storage_classes_desired'], ['foo', 'bar', 'baz'])
+
def test_exclude_filename_pattern(self):
tmpdir = self.make_tmpdir()
tmpsubdir = os.path.join(tmpdir, 'subdir')
diff --git a/sdk/python/tests/test_arv_ws.py b/sdk/python/tests/test_arv_ws.py
index 521c46ee34..4e67db2184 100644
--- a/sdk/python/tests/test_arv_ws.py
+++ b/sdk/python/tests/test_arv_ws.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import os
import sys
import tempfile
diff --git a/sdk/python/tests/test_arvfile.py b/sdk/python/tests/test_arvfile.py
index 600f17baad..0a0ba23757 100644
--- a/sdk/python/tests/test_arvfile.py
+++ b/sdk/python/tests/test_arvfile.py
@@ -2,25 +2,22 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import hex
-from builtins import str
-from builtins import range
-from builtins import object
import datetime
-import mock
import os
-import unittest
import time
+import unittest
+
+from unittest import mock
import arvados
-from arvados._ranges import Range
-from arvados.keep import KeepLocator
-from arvados.collection import Collection
+
+from arvados._internal.streams import Range
from arvados.arvfile import ArvadosFile, ArvadosFileReader
+from arvados.collection import Collection
+from arvados.keep import KeepLocator
from . import arvados_testutil as tutil
-from .test_stream import StreamFileReaderTestCase, StreamRetryTestMixin
+from .test_stream import StreamFileReaderTestMixin, StreamRetryTestMixin
class ArvadosFileWriterTestCase(unittest.TestCase):
class MockKeep(object):
@@ -624,7 +621,7 @@ class ArvadosFileWriterTestCase(unittest.TestCase):
self.assertEqual(b"01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
-class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
+class ArvadosFileReaderTestCase(unittest.TestCase, StreamFileReaderTestMixin):
class MockParent(object):
class MockBlockMgr(object):
def __init__(self, blocks, nocache):
@@ -653,6 +650,11 @@ class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
return ArvadosFileReaderTestCase.MockParent.MockBlockMgr(self.blocks, self.nocache)
+ def make_file_reader(self, name='emptyfile', data='', nocache=False):
+ loc = tutil.str_keep_locator(data)
+ af = ArvadosFile(ArvadosFileReaderTestCase.MockParent({loc: data}, nocache=nocache), name, stream=[Range(loc, 0, len(data))], segments=[Range(0, len(data), len(data))])
+ return ArvadosFileReader(af, mode='rb')
+
def make_count_reader(self, nocache=False):
stream = []
n = 0
@@ -662,7 +664,21 @@ class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
blocks[loc] = d
stream.append(Range(loc, n, len(d)))
n += len(d)
- af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache), "count.txt", stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])
+ af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache=nocache), "count.txt", stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])
+ return ArvadosFileReader(af, mode="rb")
+
+ def make_newlines_reader(self, nocache=False):
+ stream = []
+ segments = []
+ n = 0
+ blocks = {}
+ for d in [b'one\ntwo\n\nth', b'ree\nfour\n\n']:
+ loc = tutil.str_keep_locator(d)
+ blocks[loc] = d
+ stream.append(Range(loc, n, len(d)))
+ segments.append(Range(n, len(d), n+len(d)))
+ n += len(d)
+ af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache=nocache), "count.txt", stream=stream, segments=segments)
return ArvadosFileReader(af, mode="rb")
def test_read_block_crossing_behavior(self):
@@ -671,16 +687,7 @@ class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
sfile = self.make_count_reader(nocache=True)
self.assertEqual(b'12345678', sfile.read(8))
- def test_successive_reads(self):
- # Override StreamFileReaderTestCase.test_successive_reads
- sfile = self.make_count_reader(nocache=True)
- self.assertEqual(b'1234', sfile.read(4))
- self.assertEqual(b'5678', sfile.read(4))
- self.assertEqual(b'9', sfile.read(4))
- self.assertEqual(b'', sfile.read(4))
-
def test_tell_after_block_read(self):
- # Override StreamFileReaderTestCase.test_tell_after_block_read
sfile = self.make_count_reader(nocache=True)
self.assertEqual(b'12345678', sfile.read(8))
self.assertEqual(8, sfile.tell())
diff --git a/sdk/python/tests/test_basedirs.py b/sdk/python/tests/test_basedirs.py
new file mode 100644
index 0000000000..ec75ef4b36
--- /dev/null
+++ b/sdk/python/tests/test_basedirs.py
@@ -0,0 +1,196 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import stat
+
+import pytest
+
+from pathlib import Path
+
+from arvados._internal import basedirs
+
+class TestBaseDirectories:
+ SELF_PATH = Path(__file__)
+
+ @pytest.fixture
+ def dir_spec(self, tmp_path):
+ return basedirs.BaseDirectorySpec(
+ 'TEST_DIRECTORY',
+ 'XDG_TEST_HOME',
+ Path('.test'),
+ 'XDG_TEST_DIRS',
+ f"{tmp_path / '.test1'}:{tmp_path / '.test2'}",
+ )
+
+ @pytest.fixture
+ def env(self, tmp_path):
+ return {'HOME': str(tmp_path)}
+
+ @pytest.fixture
+ def umask(self):
+ orig_umask = os.umask(0o002)
+ try:
+ yield
+ finally:
+ os.umask(orig_umask)
+
+ def test_search_systemd_dirs(self, dir_spec, env, tmp_path):
+ env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'
+ dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')
+ actual = list(dirs.search(self.SELF_PATH.name))
+ assert actual == [self.SELF_PATH]
+
+ def test_search_xdg_home(self, dir_spec, env, tmp_path):
+ env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)
+ dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')
+ actual = list(dirs.search(self.SELF_PATH.name))
+ assert actual == [self.SELF_PATH]
+
+ def test_search_xdg_dirs(self, dir_spec, env, tmp_path):
+ env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'
+ dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')
+ actual = list(dirs.search(self.SELF_PATH.name))
+ assert actual == [self.SELF_PATH]
+
+ def test_search_all_dirs(self, dir_spec, env, tmp_path):
+ env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'
+ env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)
+ env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'
+ dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')
+ actual = list(dirs.search(self.SELF_PATH.name))
+ assert actual == [self.SELF_PATH, self.SELF_PATH, self.SELF_PATH]
+
+ def test_search_paths(self, dir_spec, env, tmp_path):
+ env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'
+ env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)
+ env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'
+ dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')
+ actual = list(dirs.search_paths())
+ assert actual == [tmp_path, self.SELF_PATH.parent, self.SELF_PATH.parent, tmp_path / 'tests', self.SELF_PATH.parent.parent / 'tests']
+
+ def test_search_default_home(self, dir_spec, env, tmp_path):
+ expected = tmp_path / dir_spec.xdg_home_default / 'default_home'
+ expected.parent.mkdir()
+ expected.touch()
+ dirs = basedirs.BaseDirectories(dir_spec, env, '.')
+ actual = list(dirs.search(expected.name))
+ assert actual == [expected]
+
+ def test_search_default_dirs(self, dir_spec, env, tmp_path):
+ _, _, default_dir = dir_spec.xdg_dirs_default.rpartition(':')
+ expected = Path(default_dir, 'default_dirs')
+ expected.parent.mkdir()
+ expected.touch()
+ dirs = basedirs.BaseDirectories(dir_spec, env, '.')
+ actual = list(dirs.search(expected.name))
+ assert actual == [expected]
+
+ def test_search_no_default_dirs(self, dir_spec, env, tmp_path):
+ dir_spec.xdg_dirs_key = None
+ dir_spec.xdg_dirs_default = None
+ for subdir in ['.test1', '.test2', dir_spec.xdg_home_default]:
+ expected = tmp_path / subdir / 'no_dirs'
+ expected.parent.mkdir()
+ expected.touch()
+ dirs = basedirs.BaseDirectories(dir_spec, env, '.')
+ actual = list(dirs.search(expected.name))
+ assert actual == [expected]
+
+ def test_ignore_relative_directories(self, dir_spec, env, tmp_path):
+ test_path = Path(*self.SELF_PATH.parts[-2:])
+ assert test_path.exists(), "test setup problem: need an existing file in a subdirectory of ."
+ parent_path = str(test_path.parent)
+ env['TEST_DIRECTORY'] = '.'
+ env['XDG_TEST_HOME'] = parent_path
+ env['XDG_TEST_DIRS'] = parent_path
+ dirs = basedirs.BaseDirectories(dir_spec, env, parent_path)
+ assert not list(dirs.search(test_path.name))
+
+ def test_search_warns_nondefault_home(self, dir_spec, env, tmp_path, caplog):
+ search_path = tmp_path / dir_spec.xdg_home_default / 'Search' / 'SearchConfig'
+ search_path.parent.mkdir(parents=True)
+ search_path.touch()
+ env[dir_spec.xdg_home_key] = str(tmp_path / '.nonexistent')
+ dirs = basedirs.BaseDirectories(dir_spec, env, search_path.parent.name)
+ results = list(dirs.search(search_path.name))
+ expect_msg = "{} was not found under your configured ${} ({}), but does exist at the default location ({})".format(
+ Path(*search_path.parts[-2:]),
+ dir_spec.xdg_home_key,
+ env[dir_spec.xdg_home_key],
+ Path(*search_path.parts[:-2]),
+ )
+ assert caplog.messages
+ assert any(msg.startswith(expect_msg) for msg in caplog.messages)
+ assert not results
+
+ def test_storage_path_systemd(self, dir_spec, env, tmp_path):
+ expected = tmp_path / 'rwsystemd'
+ expected.mkdir(0o700)
+ env['TEST_DIRECTORY'] = str(expected)
+ dirs = basedirs.BaseDirectories(dir_spec, env)
+ assert dirs.storage_path() == expected
+
+ def test_storage_path_systemd_mixed_modes(self, dir_spec, env, tmp_path):
+ rodir = tmp_path / 'rodir'
+ rodir.mkdir(0o500)
+ expected = tmp_path / 'rwdir'
+ expected.mkdir(0o700)
+ env['TEST_DIRECTORY'] = f'{rodir}:{expected}'
+ dirs = basedirs.BaseDirectories(dir_spec, env)
+ assert dirs.storage_path() == expected
+
+ def test_storage_path_xdg_home(self, dir_spec, env, tmp_path):
+ expected = tmp_path / '.xdghome' / 'arvados'
+ env['XDG_TEST_HOME'] = str(expected.parent)
+ dirs = basedirs.BaseDirectories(dir_spec, env)
+ assert dirs.storage_path() == expected
+ exp_mode = stat.S_IFDIR | stat.S_IWUSR
+ assert (expected.stat().st_mode & exp_mode) == exp_mode
+
+ def test_storage_path_default(self, dir_spec, env, tmp_path):
+ expected = tmp_path / dir_spec.xdg_home_default / 'arvados'
+ dirs = basedirs.BaseDirectories(dir_spec, env)
+ assert dirs.storage_path() == expected
+ exp_mode = stat.S_IFDIR | stat.S_IWUSR
+ assert (expected.stat().st_mode & exp_mode) == exp_mode
+
+ @pytest.mark.parametrize('subdir,mode', [
+ ('str/dir', 0o750),
+ (Path('sub', 'path'), 0o770),
+ ])
+ def test_storage_path_subdir(self, dir_spec, env, umask, tmp_path, subdir, mode):
+ expected = tmp_path / dir_spec.xdg_home_default / 'arvados' / subdir
+ dirs = basedirs.BaseDirectories(dir_spec, env)
+ actual = dirs.storage_path(subdir, mode)
+ assert actual == expected
+ expect_mode = mode | stat.S_IFDIR
+ actual_mode = actual.stat().st_mode
+ assert (actual_mode & expect_mode) == expect_mode
+ assert not (actual_mode & stat.S_IRWXO)
+
+ def test_empty_xdg_home(self, dir_spec, env, tmp_path):
+ env['XDG_TEST_HOME'] = ''
+ expected = tmp_path / dir_spec.xdg_home_default / 'emptyhome'
+ dirs = basedirs.BaseDirectories(dir_spec, env, expected.name)
+ assert dirs.storage_path() == expected
+
+ def test_empty_xdg_dirs(self, dir_spec, env, tmp_path):
+ env['XDG_TEST_DIRS'] = ''
+ _, _, default_dir = dir_spec.xdg_dirs_default.rpartition(':')
+ expected = Path(default_dir, 'empty_dirs')
+ expected.parent.mkdir()
+ expected.touch()
+ dirs = basedirs.BaseDirectories(dir_spec, env, '.')
+ actual = list(dirs.search(expected.name))
+ assert actual == [expected]
+
+ def test_spec_key_lookup(self):
+ dirs = basedirs.BaseDirectories('CACHE')
+ assert dirs._spec.systemd_key == 'CACHE_DIRECTORY'
+ assert dirs._spec.xdg_dirs_key is None
+
+ def test_spec_enum_lookup(self):
+ dirs = basedirs.BaseDirectories(basedirs.BaseDirectorySpecs.CONFIG)
+ assert dirs._spec.systemd_key == 'CONFIGURATION_DIRECTORY'
diff --git a/sdk/python/tests/test_benchmark_collections.py b/sdk/python/tests/test_benchmark_collections.py
index fc062e791c..0014e94af2 100644
--- a/sdk/python/tests/test_benchmark_collections.py
+++ b/sdk/python/tests/test_benchmark_collections.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import arvados
import sys
diff --git a/sdk/python/tests/test_cmd_util.py b/sdk/python/tests/test_cmd_util.py
index ffd45aa4b7..96c3e00a2d 100644
--- a/sdk/python/tests/test_cmd_util.py
+++ b/sdk/python/tests/test_cmd_util.py
@@ -12,6 +12,7 @@ import unittest
from pathlib import Path
+import pytest
from parameterized import parameterized
import arvados.commands._util as cmd_util
@@ -192,3 +193,42 @@ class JSONArgumentValidationTestCase(unittest.TestCase):
with self.assertRaises(ValueError) as exc_check:
parser(json_value)
self.assertEqual(exc_check.exception.args, (json_value,))
+
+
+class TestRangedValue:
+ @pytest.fixture(scope='class')
+ def cmpint(self):
+ return cmd_util.RangedValue(int, range(-1, 2))
+
+ @pytest.mark.parametrize('s', ['-1', '0', '1'])
+ def test_valid_values(self, cmpint, s):
+ assert cmpint(s) == int(s)
+
+ @pytest.mark.parametrize('s', ['foo', '-2', '2', '0.2', '', ' '])
+ def test_invalid_values(self, cmpint, s):
+ with pytest.raises(ValueError):
+ cmpint(s)
+
+
+class TestUniqueSplit:
+ @pytest.fixture(scope='class')
+ def argtype(self):
+ return cmd_util.UniqueSplit()
+
+ @pytest.mark.parametrize('arg', [
+ 'foo',
+ 'foo,bar',
+ 'foo, bar, baz',
+ 'foo , bar , baz , quux',
+ ])
+ def test_basic_parse(self, arg, argtype):
+ expected = ['foo', 'bar', 'baz', 'quux'][:arg.count(',') + 1]
+ assert argtype(arg) == expected
+
+ @pytest.mark.parametrize('arg', [
+ 'foo, foo, bar',
+ 'foo, bar, foo',
+ 'foo, bar, bar',
+ ])
+ def test_uniqueness(self, arg, argtype):
+ assert argtype(arg) == ['foo', 'bar']
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
index 9e753506b3..5939daf13e 100644
--- a/sdk/python/tests/test_collections.py
+++ b/sdk/python/tests/test_collections.py
@@ -2,33 +2,28 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-
-from builtins import object
-import arvados
+import ciso8601
import copy
-import mock
+import datetime
import os
import random
import re
+import shutil
import sys
-import datetime
-import ciso8601
+import tempfile
import time
import unittest
+
+import arvados
+import arvados.keep
import parameterized
-from . import run_test_server
-from arvados._ranges import Range, LocatorAndRange
+from arvados._internal.streams import Range, LocatorAndRange, locators_and_ranges
from arvados.collection import Collection, CollectionReader
-from . import arvados_testutil as tutil
-from .arvados_testutil import make_block_cache
-
-class TestResumableWriter(arvados.ResumableCollectionWriter):
- KEEP_BLOCK_SIZE = 1024 # PUT to Keep every 1K.
- def current_state(self):
- return self.dump_state(copy.deepcopy)
+from . import arvados_testutil as tutil
+from . import run_test_server
+from unittest import mock
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
@@ -41,28 +36,34 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
super(ArvadosCollectionsTest, cls).setUpClass()
# need admin privileges to make collections with unsigned blocks
run_test_server.authorize_with('admin')
+ if cls.disk_cache:
+ cls._disk_cache_dir = tempfile.mkdtemp(prefix='CollectionsTest-')
+ else:
+ cls._disk_cache_dir = None
+ block_cache = arvados.keep.KeepBlockCache(
+ disk_cache=cls.disk_cache,
+ disk_cache_dir=cls._disk_cache_dir,
+ )
cls.api_client = arvados.api('v1')
cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
local_store=cls.local_store,
- block_cache=make_block_cache(cls.disk_cache))
+ block_cache=block_cache)
+
+ @classmethod
+ def tearDownClass(cls):
+ if cls._disk_cache_dir:
+ shutil.rmtree(cls._disk_cache_dir)
def write_foo_bar_baz(self):
- cw = arvados.CollectionWriter(self.api_client)
- self.assertEqual(cw.current_stream_name(), '.',
- 'current_stream_name() should be "." now')
- cw.set_current_file_name('foo.txt')
- cw.write(b'foo')
- self.assertEqual(cw.current_file_name(), 'foo.txt',
- 'current_file_name() should be foo.txt now')
- cw.start_new_file('bar.txt')
- cw.write(b'bar')
- cw.start_new_stream('baz')
- cw.write(b'baz')
- cw.set_current_file_name('baz.txt')
- self.assertEqual(cw.manifest_text(),
- ". 3858f62230ac3c915f300c664312c63f+6 0:3:foo.txt 3:3:bar.txt\n" +
- "./baz 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n",
- "wrong manifest: got {}".format(cw.manifest_text()))
+ with arvados.collection.Collection(api_client=self.api_client).open('zzz', 'wb') as f:
+ f.write(b'foobar')
+ f.flush()
+ f.write(b'baz')
+ cw = arvados.collection.Collection(
+ api_client=self.api_client,
+ manifest_locator_or_text=
+ ". 3858f62230ac3c915f300c664312c63f+6 0:3:foo.txt 3:3:bar.txt\n" +
+ "./baz 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n")
cw.save_new()
return cw.portable_data_hash()
@@ -79,101 +80,34 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
'23ca013983d6239e98931cc779e68426+114',
'wrong locator hash: ' + self.write_foo_bar_baz())
- def test_local_collection_reader(self):
- foobarbaz = self.write_foo_bar_baz()
- cr = arvados.CollectionReader(
- foobarbaz + '+Xzizzle', self.api_client)
- got = []
- for s in cr.all_streams():
- for f in s.all_files():
- got += [[f.size(), f.stream_name(), f.name(), f.read(2**26)]]
- expected = [[3, '.', 'foo.txt', b'foo'],
- [3, '.', 'bar.txt', b'bar'],
- [3, './baz', 'baz.txt', b'baz']]
- self.assertEqual(got,
- expected)
- stream0 = cr.all_streams()[0]
- self.assertEqual(stream0.readfrom(0, 0),
- b'',
- 'reading zero bytes should have returned empty string')
- self.assertEqual(stream0.readfrom(0, 2**26),
- b'foobar',
- 'reading entire stream failed')
- self.assertEqual(stream0.readfrom(2**26, 0),
- b'',
- 'reading zero bytes should have returned empty string')
- self.assertEqual(3, len(cr))
- self.assertTrue(cr)
-
- def _test_subset(self, collection, expected):
- cr = arvados.CollectionReader(collection, self.api_client)
- for s in cr.all_streams():
- for ex in expected:
- if ex[0] == s:
- f = s.files()[ex[2]]
- got = [f.size(), f.stream_name(), f.name(), "".join(f.readall(2**26))]
- self.assertEqual(got,
- ex,
- 'all_files|as_manifest did not preserve manifest contents: got %s expected %s' % (got, ex))
-
- def test_collection_manifest_subset(self):
- foobarbaz = self.write_foo_bar_baz()
- self._test_subset(foobarbaz,
- [[3, '.', 'bar.txt', b'bar'],
- [3, '.', 'foo.txt', b'foo'],
- [3, './baz', 'baz.txt', b'baz']])
- self._test_subset((". %s %s 0:3:foo.txt 3:3:bar.txt\n" %
- (self.keep_client.put(b"foo"),
- self.keep_client.put(b"bar"))),
- [[3, '.', 'bar.txt', b'bar'],
- [3, '.', 'foo.txt', b'foo']])
- self._test_subset((". %s %s 0:2:fo.txt 2:4:obar.txt\n" %
- (self.keep_client.put(b"foo"),
- self.keep_client.put(b"bar"))),
- [[2, '.', 'fo.txt', b'fo'],
- [4, '.', 'obar.txt', b'obar']])
- self._test_subset((". %s %s 0:2:fo.txt 2:0:zero.txt 2:2:ob.txt 4:2:ar.txt\n" %
- (self.keep_client.put(b"foo"),
- self.keep_client.put(b"bar"))),
- [[2, '.', 'ar.txt', b'ar'],
- [2, '.', 'fo.txt', b'fo'],
- [2, '.', 'ob.txt', b'ob'],
- [0, '.', 'zero.txt', b'']])
-
def test_collection_empty_file(self):
- cw = arvados.CollectionWriter(self.api_client)
- cw.start_new_file('zero.txt')
- cw.write(b'')
+ cw = arvados.collection.Collection(api_client=self.api_client)
+ with cw.open('zero.txt', 'wb') as f:
+ pass
self.assertEqual(cw.manifest_text(), ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:zero.txt\n")
self.check_manifest_file_sizes(cw.manifest_text(), [0])
- cw = arvados.CollectionWriter(self.api_client)
- cw.start_new_file('zero.txt')
- cw.write(b'')
- cw.start_new_file('one.txt')
- cw.write(b'1')
- cw.start_new_stream('foo')
- cw.start_new_file('zero.txt')
- cw.write(b'')
- self.check_manifest_file_sizes(cw.manifest_text(), [0,1,0])
-
- def test_no_implicit_normalize(self):
- cw = arvados.CollectionWriter(self.api_client)
- cw.start_new_file('b')
- cw.write(b'b')
- cw.start_new_file('a')
- cw.write(b'')
- self.check_manifest_file_sizes(cw.manifest_text(), [1,0])
- self.check_manifest_file_sizes(
- arvados.CollectionReader(
- cw.manifest_text()).manifest_text(normalize=True),
- [0,1])
+
+ cw = arvados.collection.Collection(api_client=self.api_client)
+ with cw.open('zero.txt', 'wb') as f:
+ pass
+ with cw.open('one.txt', 'wb') as f:
+ f.write(b'1')
+ with cw.open('foo/zero.txt', 'wb') as f:
+ pass
+ # sorted, that's: [./one.txt, ./zero.txt, foo/zero.txt]
+ self.check_manifest_file_sizes(cw.manifest_text(), [1,0,0])
def check_manifest_file_sizes(self, manifest_text, expect_sizes):
- cr = arvados.CollectionReader(manifest_text, self.api_client)
got_sizes = []
- for f in cr.all_files():
- got_sizes += [f.size()]
+ def walk(subdir):
+ for fnm in subdir:
+ if isinstance(subdir[fnm], arvados.arvfile.ArvadosFile):
+ got_sizes.append(subdir[fnm].size())
+ else:
+ walk(subdir[fnm])
+ cr = arvados.CollectionReader(manifest_text, self.api_client)
+ walk(cr)
self.assertEqual(got_sizes, expect_sizes, "got wrong file sizes %s, expected %s" % (got_sizes, expect_sizes))
def test_normalized_collection(self):
@@ -235,30 +169,30 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
Range('e', 40, 10),
Range('f', 50, 10)]
- self.assertEqual(arvados.locators_and_ranges(blocks2, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 62, 2), [])
- self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
-
- self.assertEqual(arvados.locators_and_ranges(blocks2, 0, 2), [LocatorAndRange('a', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 60, 2), [])
- self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
-
- self.assertEqual(arvados.locators_and_ranges(blocks2, 9, 2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 62, 2), [])
+ self.assertEqual(locators_and_ranges(blocks2, -2, 2), [])
+
+ self.assertEqual(locators_and_ranges(blocks2, 0, 2), [LocatorAndRange('a', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks2, 60, 2), [])
+ self.assertEqual(locators_and_ranges(blocks2, -2, 2), [])
+
+ self.assertEqual(locators_and_ranges(blocks2, 9, 2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])
blocks3 = [Range('a', 0, 10),
@@ -269,56 +203,56 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
Range('f', 50, 10),
Range('g', 60, 10)]
- self.assertEqual(arvados.locators_and_ranges(blocks3, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+ self.assertEqual(locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])
blocks = [Range('a', 0, 10),
Range('b', 10, 15),
Range('c', 25, 5)]
- self.assertEqual(arvados.locators_and_ranges(blocks, 1, 0), [])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),
- LocatorAndRange('b', 15, 0, 1)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),
- LocatorAndRange('b', 15, 0, 2)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),
- LocatorAndRange('b', 15, 0, 15)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),
- LocatorAndRange('b', 15, 0, 15),
- LocatorAndRange('c', 5, 0, 5)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),
- LocatorAndRange('b', 15, 0, 15),
- LocatorAndRange('c', 5, 0, 5)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),
- LocatorAndRange('b', 15, 0, 15),
- LocatorAndRange('c', 5, 0, 5)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),
- LocatorAndRange('b', 15, 0, 15)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),
- LocatorAndRange('b', 15, 0, 15),
- LocatorAndRange('c', 5, 0, 3)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),
- LocatorAndRange('b', 15, 0, 14)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])
- self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),
- LocatorAndRange('c', 5, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks, 1, 0), [])
+ self.assertEqual(locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])
+ self.assertEqual(locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])
+ self.assertEqual(locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])
+
+ self.assertEqual(locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 1)])
+ self.assertEqual(locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),
+ LocatorAndRange('b', 15, 0, 2)])
+ self.assertEqual(locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15)])
+
+ self.assertEqual(locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+ self.assertEqual(locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+ self.assertEqual(locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+
+ self.assertEqual(locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])
+
+ self.assertEqual(locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),
+ LocatorAndRange('b', 15, 0, 15)])
+
+ self.assertEqual(locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 3)])
+
+ self.assertEqual(locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])
+
+ self.assertEqual(locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),
+ LocatorAndRange('b', 15, 0, 14)])
+ self.assertEqual(locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])
+ self.assertEqual(locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),
+ LocatorAndRange('c', 5, 0, 1)])
class MockKeep(object):
def __init__(self, content, num_retries=0):
@@ -328,32 +262,6 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
def get(self, locator, num_retries=0, prefetch=False):
return self.content[locator]
- def test_stream_reader(self):
- keepblocks = {
- 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10': b'abcdefghij',
- 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15': b'klmnopqrstuvwxy',
- 'cccccccccccccccccccccccccccccccc+5': b'z0123',
- }
- mk = self.MockKeep(keepblocks)
-
- sr = arvados.StreamReader([".", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15", "cccccccccccccccccccccccccccccccc+5", "0:30:foo"], mk)
-
- content = b'abcdefghijklmnopqrstuvwxyz0123456789'
-
- self.assertEqual(sr.readfrom(0, 30), content[0:30])
- self.assertEqual(sr.readfrom(2, 30), content[2:30])
-
- self.assertEqual(sr.readfrom(2, 8), content[2:10])
- self.assertEqual(sr.readfrom(0, 10), content[0:10])
-
- self.assertEqual(sr.readfrom(0, 5), content[0:5])
- self.assertEqual(sr.readfrom(5, 5), content[5:10])
- self.assertEqual(sr.readfrom(10, 5), content[10:15])
- self.assertEqual(sr.readfrom(15, 5), content[15:20])
- self.assertEqual(sr.readfrom(20, 5), content[20:25])
- self.assertEqual(sr.readfrom(25, 5), content[25:30])
- self.assertEqual(sr.readfrom(30, 5), b'')
-
def test_extract_file(self):
m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
. 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt
@@ -361,156 +269,19 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 47:80:md8sum.txt
. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt
"""
-
- m2 = arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True)
-
+ coll = arvados.CollectionReader(m1, self.api_client)
+ m2 = coll.manifest_text(normalize=True)
self.assertEqual(m2,
". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt 43:41:md6sum.txt 84:43:md7sum.txt 6:37:md8sum.txt 84:43:md8sum.txt 83:1:md9sum.txt 0:43:md9sum.txt 84:36:md9sum.txt\n")
- files = arvados.CollectionReader(
- m2, self.api_client).all_streams()[0].files()
-
- self.assertEqual(files['md5sum.txt'].as_manifest(),
+ self.assertEqual(coll['md5sum.txt'].manifest_text(),
". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n")
- self.assertEqual(files['md6sum.txt'].as_manifest(),
+ self.assertEqual(coll['md6sum.txt'].manifest_text(),
". 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt\n")
- self.assertEqual(files['md7sum.txt'].as_manifest(),
+ self.assertEqual(coll['md7sum.txt'].manifest_text(),
". 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt\n")
- self.assertEqual(files['md9sum.txt'].as_manifest(),
+ self.assertEqual(coll['md9sum.txt'].manifest_text(),
". 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt\n")
- def test_write_directory_tree(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- cwriter.write_directory_tree(self.build_directory_tree(
- ['basefile', 'subdir/subfile']))
- self.assertEqual(cwriter.manifest_text(),
- """. c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
-./subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
-
- def test_write_named_directory_tree(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- cwriter.write_directory_tree(self.build_directory_tree(
- ['basefile', 'subdir/subfile']), 'root')
- self.assertEqual(
- cwriter.manifest_text(),
- """./root c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
-./root/subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
-
- def test_write_directory_tree_in_one_stream(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- cwriter.write_directory_tree(self.build_directory_tree(
- ['basefile', 'subdir/subfile']), max_manifest_depth=0)
- self.assertEqual(cwriter.manifest_text(),
- """. 4ace875ffdc6824a04950f06858f4465+22 0:8:basefile 8:14:subdir/subfile\n""")
-
- def test_write_directory_tree_with_limited_recursion(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- cwriter.write_directory_tree(
- self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
- max_manifest_depth=1)
- self.assertEqual(cwriter.manifest_text(),
- """. bd19836ddb62c11c55ab251ccaca5645+2 0:2:f1
-./d1 50170217e5b04312024aa5cd42934494+13 0:8:d2/f3 8:5:f2\n""")
-
- def test_write_directory_tree_with_zero_recursion(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- content = 'd1/d2/f3d1/f2f1'
- blockhash = tutil.str_keep_locator(content)
- cwriter.write_directory_tree(
- self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
- max_manifest_depth=0)
- self.assertEqual(
- cwriter.manifest_text(),
- ". {} 0:8:d1/d2/f3 8:5:d1/f2 13:2:f1\n".format(blockhash))
-
- def test_write_one_file(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name)
- self.assertEqual(
- cwriter.manifest_text(),
- ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:{}\n".format(
- os.path.basename(testfile.name)))
-
- def test_write_named_file(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name, 'foo')
- self.assertEqual(cwriter.manifest_text(),
- ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:foo\n")
-
- def test_write_multiple_files(self):
- cwriter = arvados.CollectionWriter(self.api_client)
- for letter in 'ABC':
- with self.make_test_file(letter.encode()) as testfile:
- cwriter.write_file(testfile.name, letter)
- self.assertEqual(
- cwriter.manifest_text(),
- ". 902fbdd2b1df0c4f70b4a5d23525e932+3 0:1:A 1:1:B 2:1:C\n")
-
- def test_basic_resume(self):
- cwriter = TestResumableWriter()
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name, 'test')
- resumed = TestResumableWriter.from_state(cwriter.current_state())
- self.assertEqual(cwriter.manifest_text(), resumed.manifest_text(),
- "resumed CollectionWriter had different manifest")
-
- def test_resume_fails_when_missing_dependency(self):
- cwriter = TestResumableWriter()
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name, 'test')
- self.assertRaises(arvados.errors.StaleWriterStateError,
- TestResumableWriter.from_state,
- cwriter.current_state())
-
- def test_resume_fails_when_dependency_mtime_changed(self):
- cwriter = TestResumableWriter()
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name, 'test')
- os.utime(testfile.name, (0, 0))
- self.assertRaises(arvados.errors.StaleWriterStateError,
- TestResumableWriter.from_state,
- cwriter.current_state())
-
- def test_resume_fails_when_dependency_is_nonfile(self):
- cwriter = TestResumableWriter()
- cwriter.write_file('/dev/null', 'empty')
- self.assertRaises(arvados.errors.StaleWriterStateError,
- TestResumableWriter.from_state,
- cwriter.current_state())
-
- def test_resume_fails_when_dependency_size_changed(self):
- cwriter = TestResumableWriter()
- with self.make_test_file() as testfile:
- cwriter.write_file(testfile.name, 'test')
- orig_mtime = os.fstat(testfile.fileno()).st_mtime
- testfile.write(b'extra')
- testfile.flush()
- os.utime(testfile.name, (orig_mtime, orig_mtime))
- self.assertRaises(arvados.errors.StaleWriterStateError,
- TestResumableWriter.from_state,
- cwriter.current_state())
-
- def test_resume_fails_with_expired_locator(self):
- cwriter = TestResumableWriter()
- state = cwriter.current_state()
- # Add an expired locator to the state.
- state['_current_stream_locators'].append(''.join([
- 'a' * 32, '+1+A', 'b' * 40, '@', '10000000']))
- self.assertRaises(arvados.errors.StaleWriterStateError,
- TestResumableWriter.from_state, state)
-
- def test_arbitrary_objects_not_resumable(self):
- cwriter = TestResumableWriter()
- with open('/dev/null') as badfile:
- self.assertRaises(arvados.errors.AssertionError,
- cwriter.write_file, badfile)
-
- def test_arbitrary_writes_not_resumable(self):
- cwriter = TestResumableWriter()
- self.assertRaises(arvados.errors.AssertionError,
- cwriter.write, "badtext")
-
class CollectionTestMixin(tutil.ApiClientMock):
API_COLLECTIONS = run_test_server.fixture('collections')
@@ -580,8 +351,7 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,
num_retries=3)
with tutil.mock_keep_responses('foo', 500, 500, 200):
- self.assertEqual(b'foo',
- b''.join(f.read(9) for f in reader.all_files()))
+ self.assertEqual('foo', reader.open('foo', 'r').read())
def test_read_nonnormalized_manifest_with_collection_reader(self):
# client should be able to use CollectionReader on a manifest without normalizing it
@@ -597,12 +367,6 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
reader.stripped_manifest())
# Ensure stripped_manifest() didn't mutate our reader.
self.assertEqual(nonnormal, reader.manifest_text())
- # Ensure the files appear in the order given in the manifest.
- self.assertEqual(
- [[6, '.', 'foo.txt'],
- [0, '.', 'bar.txt']],
- [[f.size(), f.stream_name(), f.name()]
- for f in reader.all_streams()[0].all_files()])
def test_read_empty_collection(self):
client = self.api_client_mock(200)
@@ -651,140 +415,6 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
self.assertRaises(IOError, reader.open, 'nonexistent')
-@tutil.skip_sleep
-class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
- def mock_keep(self, body, *codes, **headers):
- headers.setdefault('x-keep-replicas-stored', 2)
- return tutil.mock_keep_responses(body, *codes, **headers)
-
- def foo_writer(self, **kwargs):
- kwargs.setdefault('api_client', self.api_client_mock())
- writer = arvados.CollectionWriter(**kwargs)
- writer.start_new_file('foo')
- writer.write(b'foo')
- return writer
-
- def test_write_whole_collection(self):
- writer = self.foo_writer()
- with self.mock_keep(self.DEFAULT_DATA_HASH, 200, 200):
- self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
-
- def test_write_no_default(self):
- writer = self.foo_writer()
- with self.mock_keep(None, 500):
- with self.assertRaises(arvados.errors.KeepWriteError):
- writer.finish()
-
- def test_write_insufficient_replicas_via_proxy(self):
- writer = self.foo_writer(replication=3)
- with self.mock_keep(None, 200, **{'x-keep-replicas-stored': 2}):
- with self.assertRaises(arvados.errors.KeepWriteError):
- writer.manifest_text()
-
- def test_write_insufficient_replicas_via_disks(self):
- client = mock.MagicMock(name='api_client')
- with self.mock_keep(
- None, 200, 200,
- **{'x-keep-replicas-stored': 1}) as keepmock:
- self.mock_keep_services(client, status=200, service_type='disk', count=2)
- writer = self.foo_writer(api_client=client, replication=3)
- with self.assertRaises(arvados.errors.KeepWriteError):
- writer.manifest_text()
-
- def test_write_three_replicas(self):
- client = mock.MagicMock(name='api_client')
- with self.mock_keep(
- "", 500, 500, 500, 200, 200, 200,
- **{'x-keep-replicas-stored': 1}) as keepmock:
- self.mock_keep_services(client, status=200, service_type='disk', count=6)
- writer = self.foo_writer(api_client=client, replication=3)
- writer.manifest_text()
- self.assertEqual(6, keepmock.call_count)
-
- def test_write_whole_collection_through_retries(self):
- writer = self.foo_writer(num_retries=2)
- with self.mock_keep(self.DEFAULT_DATA_HASH,
- 500, 500, 200, 500, 500, 200):
- self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
-
- def test_flush_data_retries(self):
- writer = self.foo_writer(num_retries=2)
- foo_hash = self.DEFAULT_MANIFEST.split()[1]
- with self.mock_keep(foo_hash, 500, 200):
- writer.flush_data()
- self.assertEqual(self.DEFAULT_MANIFEST, writer.manifest_text())
-
- def test_one_open(self):
- client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- with writer.open('out') as out_file:
- self.assertEqual('.', writer.current_stream_name())
- self.assertEqual('out', writer.current_file_name())
- out_file.write(b'test data')
- data_loc = tutil.str_keep_locator('test data')
- self.assertTrue(out_file.closed, "writer file not closed after context")
- self.assertRaises(ValueError, out_file.write, 'extra text')
- with self.mock_keep(data_loc, 200) as keep_mock:
- self.assertEqual(". {} 0:9:out\n".format(data_loc),
- writer.manifest_text())
-
- def test_open_writelines(self):
- client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- with writer.open('six') as out_file:
- out_file.writelines(['12', '34', '56'])
- data_loc = tutil.str_keep_locator('123456')
- with self.mock_keep(data_loc, 200) as keep_mock:
- self.assertEqual(". {} 0:6:six\n".format(data_loc),
- writer.manifest_text())
-
- def test_open_flush(self):
- client = self.api_client_mock()
- data_loc1 = tutil.str_keep_locator('flush1')
- data_loc2 = tutil.str_keep_locator('flush2')
- with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
- writer = arvados.CollectionWriter(client)
- with writer.open('flush_test') as out_file:
- out_file.write(b'flush1')
- out_file.flush()
- out_file.write(b'flush2')
- self.assertEqual(". {} {} 0:12:flush_test\n".format(data_loc1,
- data_loc2),
- writer.manifest_text())
-
- def test_two_opens_same_stream(self):
- client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- with writer.open('.', '1') as out_file:
- out_file.write(b'1st')
- with writer.open('.', '2') as out_file:
- out_file.write(b'2nd')
- data_loc = tutil.str_keep_locator('1st2nd')
- with self.mock_keep(data_loc, 200) as keep_mock:
- self.assertEqual(". {} 0:3:1 3:3:2\n".format(data_loc),
- writer.manifest_text())
-
- def test_two_opens_two_streams(self):
- client = self.api_client_mock()
- data_loc1 = tutil.str_keep_locator('file')
- data_loc2 = tutil.str_keep_locator('indir')
- with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
- writer = arvados.CollectionWriter(client)
- with writer.open('file') as out_file:
- out_file.write(b'file')
- with writer.open('./dir', 'indir') as out_file:
- out_file.write(b'indir')
- expected = ". {} 0:4:file\n./dir {} 0:5:indir\n".format(
- data_loc1, data_loc2)
- self.assertEqual(expected, writer.manifest_text())
-
- def test_dup_open_fails(self):
- client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- file1 = writer.open('one')
- self.assertRaises(arvados.errors.AssertionError, writer.open, 'two')
-
-
class CollectionMethods(run_test_server.TestCaseWithServers):
def test_keys_values_items_support_indexing(self):
@@ -794,12 +424,7 @@ class CollectionMethods(run_test_server.TestCaseWithServers):
with c.open('bar', 'wb') as f:
f.write(b'bar')
self.assertEqual(2, len(c.keys()))
- if sys.version_info < (3, 0):
- # keys() supports indexing only for python2 callers.
- fn0 = c.keys()[0]
- fn1 = c.keys()[1]
- else:
- fn0, fn1 = c.keys()
+ fn0, fn1 = c.keys()
self.assertEqual(2, len(c.values()))
f0 = c.values()[0]
f1 = c.values()[1]
@@ -852,13 +477,8 @@ class TextModes(run_test_server.TestCaseWithServers):
def setUp(self):
arvados.config.KEEP_BLOCK_SIZE = 4
- if sys.version_info < (3, 0):
- import unicodedata
- self.sailboat = unicodedata.lookup('SAILBOAT')
- self.snowman = unicodedata.lookup('SNOWMAN')
- else:
- self.sailboat = '\N{SAILBOAT}'
- self.snowman = '\N{SNOWMAN}'
+ self.sailboat = '\N{SAILBOAT}'
+ self.snowman = '\N{SNOWMAN}'
def tearDown(self):
arvados.config.KEEP_BLOCK_SIZE = 2 ** 26
diff --git a/sdk/python/tests/test_computed_permissions.py b/sdk/python/tests/test_computed_permissions.py
new file mode 100644
index 0000000000..27cfa7797c
--- /dev/null
+++ b/sdk/python/tests/test_computed_permissions.py
@@ -0,0 +1,78 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.util
+from . import run_test_server
+from .test_util import KeysetTestHelper
+
+class ComputedPermissionTest(run_test_server.TestCaseWithServers):
+ def test_computed_permission(self):
+ run_test_server.authorize_with('admin')
+ api_client = arvados.api('v1')
+ active_user_uuid = run_test_server.fixture('users')['active']['uuid']
+ resp = api_client.computed_permissions().list(
+ filters=[['user_uuid', '=', active_user_uuid]],
+ ).execute()
+ assert len(resp['items']) > 0
+ for item in resp['items']:
+ assert item['user_uuid'] == active_user_uuid
+
+ def test_keyset_list_all(self):
+ run_test_server.authorize_with('admin')
+ api_client = arvados.api('v1')
+ seen = {}
+ for item in arvados.util.keyset_list_all(api_client.computed_permissions().list, order_key='user_uuid', key_fields=('user_uuid', 'target_uuid')):
+ assert (item['user_uuid'], item['target_uuid']) not in seen
+ seen[(item['user_uuid'], item['target_uuid'])] = True
+
+ def test_iter_computed_permissions(self):
+ run_test_server.authorize_with('admin')
+ api_client = arvados.api('v1')
+ seen = {}
+ for item in arvados.util.iter_computed_permissions(api_client.computed_permissions().list):
+ assert item['perm_level']
+ assert (item['user_uuid'], item['target_uuid']) not in seen
+ seen[(item['user_uuid'], item['target_uuid'])] = True
+
+ def test_iter_computed_permissions_defaults(self):
+ ks = KeysetTestHelper([[
+ {"limit": 1000, "count": "none", "order": ["user_uuid asc", "target_uuid asc"], "filters": []},
+ {"items": [{"user_uuid": "u", "target_uuid": "t"}]}
+ ], [
+ {"limit": 1000, "count": "none", "order": ["user_uuid asc", "target_uuid asc"], "filters": [['user_uuid', '=', 'u'], ['target_uuid', '>', 't']]},
+ {"items": []},
+ ], [
+ {"limit": 1000, "count": "none", "order": ["user_uuid asc", "target_uuid asc"], "filters": [['user_uuid', '>', 'u']]},
+ {"items": []},
+ ]])
+ ls = list(arvados.util.iter_computed_permissions(ks.fn))
+ assert ls == ks.expect[0][1]['items']
+
+ def test_iter_computed_permissions_order_key(self):
+ ks = KeysetTestHelper([[
+ {"limit": 1000, "count": "none", "order": ["target_uuid desc", "user_uuid desc"], "filters": []},
+ {"items": [{"user_uuid": "u", "target_uuid": "t"}]}
+ ], [
+ {"limit": 1000, "count": "none", "order": ["target_uuid desc", "user_uuid desc"], "filters": [['target_uuid', '=', 't'], ['user_uuid', '<', 'u']]},
+ {"items": []},
+ ], [
+ {"limit": 1000, "count": "none", "order": ["target_uuid desc", "user_uuid desc"], "filters": [['target_uuid', '<', 't']]},
+ {"items": []},
+ ]])
+ ls = list(arvados.util.iter_computed_permissions(ks.fn, order_key='target_uuid', ascending=False))
+ assert ls == ks.expect[0][1]['items']
+
+ def test_iter_computed_permissions_num_retries(self):
+ ks = KeysetTestHelper([[
+ {"limit": 1000, "count": "none", "order": ["user_uuid asc", "target_uuid asc"], "filters": []},
+ {"items": []}
+ ]], expect_num_retries=33)
+ assert list(arvados.util.iter_computed_permissions(ks.fn, num_retries=33)) == []
+
+ def test_iter_computed_permissions_invalid_key_fields(self):
+ ks = KeysetTestHelper([])
+ with self.assertRaises(arvados.errors.ArgumentError) as exc:
+ _ = list(arvados.util.iter_computed_permissions(ks.fn, key_fields=['target_uuid', 'perm_level']))
+ assert exc.exception.args[0] == 'key_fields can have at most one entry that is not order_key'
diff --git a/sdk/python/tests/test_config.py b/sdk/python/tests/test_config.py
new file mode 100644
index 0000000000..4b5bca2e82
--- /dev/null
+++ b/sdk/python/tests/test_config.py
@@ -0,0 +1,58 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+
+import pytest
+
+from arvados import config as arv_config
+
+class TestInitialize:
+ @pytest.fixture(autouse=True)
+ def setup(self, monkeypatch):
+ arv_config._settings = None
+ monkeypatch.delenv('ARVADOS_API_HOST', raising=False)
+ monkeypatch.delenv('ARVADOS_API_TOKEN', raising=False)
+ try:
+ yield
+ finally:
+ arv_config._settings = None
+
+ @pytest.fixture
+ def tmp_settings(self, tmp_path):
+ path = tmp_path / 'settings.conf'
+ with path.open('w') as settings_file:
+ print("ARVADOS_API_HOST=localhost", file=settings_file)
+ print("ARVADOS_API_TOKEN=TestInitialize", file=settings_file)
+ return path
+
+ def test_static_path(self, tmp_settings):
+ arv_config.initialize(tmp_settings)
+ actual = arv_config.settings()
+ assert actual['ARVADOS_API_HOST'] == 'localhost'
+ assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'
+
+ def test_search_path(self, tmp_settings):
+ def search(filename):
+ assert filename == tmp_settings.name
+ yield tmp_settings
+ arv_config.initialize(search)
+ actual = arv_config.settings()
+ assert actual['ARVADOS_API_HOST'] == 'localhost'
+ assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'
+
+ def test_default_search(self, tmp_settings, monkeypatch):
+ monkeypatch.setenv('CONFIGURATION_DIRECTORY', str(tmp_settings.parent))
+ monkeypatch.setenv('XDG_CONFIG_HOME', str(tmp_settings.parent))
+ monkeypatch.delenv('XDG_CONFIG_DIRS', raising=False)
+ actual = arv_config.settings()
+ assert actual['ARVADOS_API_HOST'] == 'localhost'
+ assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'
+
+ def test_environ_override(self, monkeypatch):
+ monkeypatch.setenv('ARVADOS_API_TOKEN', 'test_environ_override')
+ arv_config.initialize('')
+ actual = arv_config.settings()
+ assert actual.get('ARVADOS_API_HOST') is None
+ assert actual['ARVADOS_API_TOKEN'] == 'test_environ_override'
diff --git a/sdk/python/tests/test_crunch.py b/sdk/python/tests/test_crunch.py
deleted file mode 100644
index 809e229b20..0000000000
--- a/sdk/python/tests/test_crunch.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import arvados.crunch
-import os
-import shutil
-import tempfile
-import unittest
-
-class TaskOutputDirTest(unittest.TestCase):
- def setUp(self):
- self.tmp = tempfile.mkdtemp()
- os.environ['TASK_KEEPMOUNT_TMP'] = self.tmp
-
- def tearDown(self):
- os.environ.pop('TASK_KEEPMOUNT_TMP')
- shutil.rmtree(self.tmp)
-
- def test_env_var(self):
- out = arvados.crunch.TaskOutputDir()
- self.assertEqual(out.path, self.tmp)
-
- with open(os.path.join(self.tmp, '.arvados#collection'), 'w') as f:
- f.write('{\n "manifest_text":"",\n "uuid":null\n}\n')
- self.assertEqual(out.manifest_text(), '')
-
- # Special file must be re-read on each call to manifest_text().
- with open(os.path.join(self.tmp, '.arvados#collection'), 'w') as f:
- f.write(r'{"manifest_text":". unparsed 0:3:foo\n","uuid":null}')
- self.assertEqual(out.manifest_text(), ". unparsed 0:3:foo\n")
diff --git a/sdk/python/tests/test_errors.py b/sdk/python/tests/test_errors.py
index 4ee68ba285..02f316bf79 100644
--- a/sdk/python/tests/test_errors.py
+++ b/sdk/python/tests/test_errors.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import traceback
import unittest
diff --git a/sdk/python/tests/test_events.py b/sdk/python/tests/test_events.py
index b4e6a0b1cd..c5f3310015 100644
--- a/sdk/python/tests/test_events.py
+++ b/sdk/python/tests/test_events.py
@@ -4,13 +4,14 @@
import json
import logging
-import mock
import queue
import sys
import threading
import time
import unittest
+from unittest import mock
+
import websockets.exceptions as ws_exc
import arvados
@@ -96,9 +97,9 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
# Create ancestor before subscribing.
# When listening with start_time in the past, this should also be retrieved.
# However, when start_time is omitted in subscribe, this should not be fetched.
- ancestor = arvados.api('v1').humans().create(body={}).execute()
+ ancestor = arvados.api('v1').collections().create(body={}).execute()
- filters = [['object_uuid', 'is_a', 'arvados#human']]
+ filters = [['object_uuid', 'is_a', 'arvados#collection']]
if start_time:
filters.append(['created_at', '>=', start_time])
@@ -117,11 +118,11 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
while not self.ws._skip_old_events:
self.assertLess(time.time(), deadline)
time.sleep(0.1)
- human = arvados.api('v1').humans().create(body={}).execute()
+ collection = arvados.api('v1').collections().create(body={}).execute()
want_uuids = []
if expected > 0:
- want_uuids.append(human['uuid'])
+ want_uuids.append(collection['uuid'])
if expected > 1:
want_uuids.append(ancestor['uuid'])
log_object_uuids = []
@@ -227,7 +228,7 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
- filters = [['object_uuid', 'is_a', 'arvados#human']]
+ filters = [['object_uuid', 'is_a', 'arvados#collection']]
filters.append(['created_at', '>=', self.localiso(self.TIME_PAST)])
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
@@ -238,10 +239,10 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
self.assertEqual(200, events.get(True, 5)['status'])
# create obj
- human = arvados.api('v1').humans().create(body={}).execute()
+ collection = arvados.api('v1').collections().create(body={}).execute()
# expect an event
- self.assertIn(human['uuid'], events.get(True, 5)['object_uuid'])
+ self.assertIn(collection['uuid'], events.get(True, 5)['object_uuid'])
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
@@ -252,7 +253,7 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
self.ws.close()
# create one more obj
- human2 = arvados.api('v1').humans().create(body={}).execute()
+ collection2 = arvados.api('v1').collections().create(body={}).execute()
# (un)expect the object creation event
if close_unexpected:
@@ -263,8 +264,8 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
log_object_uuids.append(event['object_uuid'])
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
- self.assertNotIn(human['uuid'], log_object_uuids)
- self.assertIn(human2['uuid'], log_object_uuids)
+ self.assertNotIn(collection['uuid'], log_object_uuids)
+ self.assertIn(collection2['uuid'], log_object_uuids)
else:
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
diff --git a/sdk/python/tests/test_cache.py b/sdk/python/tests/test_http_cache.py
similarity index 65%
rename from sdk/python/tests/test_cache.py
rename to sdk/python/tests/test_http_cache.py
index 259acd0a30..49feb0615c 100644
--- a/sdk/python/tests/test_cache.py
+++ b/sdk/python/tests/test_http_cache.py
@@ -2,13 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import absolute_import
-
-from builtins import str
-from builtins import range
import hashlib
-import mock
import os
import random
import shutil
@@ -17,22 +11,26 @@ import tempfile
import threading
import unittest
+import pytest
+from unittest import mock
+
import arvados
-import arvados.cache
-from . import run_test_server
+import arvados.api
+import arvados.util
+from arvados._internal import basedirs
+from . import run_test_server
def _random(n):
return bytearray(random.getrandbits(8) for _ in range(n))
-
class CacheTestThread(threading.Thread):
def __init__(self, dir):
super(CacheTestThread, self).__init__()
self._dir = dir
def run(self):
- c = arvados.cache.SafeHTTPCache(self._dir)
+ c = arvados.api.ThreadSafeHTTPCache(self._dir)
url = 'http://example.com/foo'
self.ok = True
for x in range(16):
@@ -50,6 +48,26 @@ class CacheTestThread(threading.Thread):
raise
+class TestAPIHTTPCache:
+ @pytest.mark.parametrize('data_type', ['discovery', 'keep'])
+ def test_good_storage(self, tmp_path, monkeypatch, data_type):
+ def storage_path(self, subdir='.', mode=0o700):
+ path = tmp_path / subdir
+ path.mkdir(mode=mode)
+ return path
+ monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', storage_path)
+ actual = arvados.http_cache(data_type)
+ assert str(actual) == str(tmp_path / data_type)
+
+ @pytest.mark.parametrize('error', [RuntimeError, FileExistsError, PermissionError])
+ def test_unwritable_storage(self, monkeypatch, error):
+ def fail(self, subdir='.', mode=0o700):
+ raise error()
+ monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', fail)
+ actual = arvados.http_cache('unwritable')
+ assert actual is None
+
+
class CacheTest(unittest.TestCase):
def setUp(self):
self._dir = tempfile.mkdtemp()
@@ -57,19 +75,8 @@ class CacheTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self._dir)
- def test_cache_create_error(self):
- _, filename = tempfile.mkstemp()
- home_was = os.environ['HOME']
- os.environ['HOME'] = filename
- try:
- c = arvados.http_cache('test')
- self.assertEqual(None, c)
- finally:
- os.environ['HOME'] = home_was
- os.unlink(filename)
-
def test_cache_crud(self):
- c = arvados.cache.SafeHTTPCache(self._dir, max_age=0)
+ c = arvados.api.ThreadSafeHTTPCache(self._dir, max_age=0)
url = 'https://example.com/foo?bar=baz'
data1 = _random(256)
data2 = _random(128)
@@ -98,6 +105,6 @@ class CacheIntegrationTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
def test_cache_used_by_default_client(self):
- with mock.patch('arvados.cache.SafeHTTPCache.get') as getter:
+ with mock.patch('arvados.api.ThreadSafeHTTPCache.get') as getter:
arvados.api('v1')._rootDesc.get('foobar')
getter.assert_called()
diff --git a/sdk/python/tests/test_http.py b/sdk/python/tests/test_http_to_keep.py
similarity index 94%
rename from sdk/python/tests/test_http.py
rename to sdk/python/tests/test_http_to_keep.py
index bce57eda61..b8d4679029 100644
--- a/sdk/python/tests/test_http.py
+++ b/sdk/python/tests/test_http_to_keep.py
@@ -2,28 +2,24 @@
#
# SPDX-License-Identifier: Apache-2.0
-from future import standard_library
-standard_library.install_aliases()
-
import copy
import io
import functools
import hashlib
import json
import logging
-import mock
import sys
import unittest
import datetime
+from unittest import mock
+
import arvados
import arvados.collection
import arvados.keep
import pycurl
-from arvados.http_to_keep import http_to_keep
-
-import ruamel.yaml as yaml
+from arvados._internal import http_to_keep
# Turns out there was already "FakeCurl" that serves the same purpose, but
# I wrote this before I knew that. Whoops.
@@ -96,7 +92,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 15)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
datetime.datetime(2018, 5, 15, 0, 0)))
@@ -147,7 +143,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 16)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
datetime.datetime(2018, 5, 16, 0, 0)))
@@ -188,7 +184,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 16)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
'http://example.com/file1.txt', datetime.datetime(2018, 5, 16, 0, 0)))
@@ -228,7 +224,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999997+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz4',
'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
@@ -285,7 +281,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
datetime.datetime(2018, 5, 17, 0, 0)))
@@ -324,7 +320,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 15)
- r = http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz3',
'http://example.com/download?fn=/file1.txt',
@@ -381,7 +377,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
datetime.datetime(2018, 5, 17, 0, 0)))
@@ -432,7 +428,7 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
- r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow, prefer_cached_downloads=True)
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow, prefer_cached_downloads=True)
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
@@ -479,8 +475,8 @@ class TestHttpToKeep(unittest.TestCase):
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
- r = http_to_keep(api, None, "http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789",
- utcnow=utcnow, varying_url_params="KeyId,Signature,Expires")
+ r = http_to_keep.http_to_keep(api, None, "http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789",
+ utcnow=utcnow, varying_url_params="KeyId,Signature,Expires")
self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
diff --git a/sdk/python/tests/test_internal.py b/sdk/python/tests/test_internal.py
new file mode 100644
index 0000000000..d24bfbb775
--- /dev/null
+++ b/sdk/python/tests/test_internal.py
@@ -0,0 +1,76 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import re
+
+import pytest
+
+from arvados import _internal
+
+class TestDeprecated:
+ @staticmethod
+ @_internal.deprecated('TestVersion', 'arvados.noop')
+ def noop_func():
+ """Do nothing
+
+ This function returns None.
+ """
+
+ @pytest.mark.parametrize('pattern', [
+ r'^Do nothing$',
+ r'^ *.. WARNING:: Deprecated$',
+ r' removed in Arvados TestVersion\.',
+ r' Prefer arvados\.noop\b',
+ r'^ *This function returns None\.$',
+ ])
+ def test_docstring(self, pattern):
+ assert re.search(pattern, self.noop_func.__doc__, re.MULTILINE) is not None
+
+ def test_deprecation_warning(self):
+ with pytest.warns(DeprecationWarning) as check:
+ self.noop_func()
+ actual = str(check[0].message)
+ assert ' removed in Arvados TestVersion.' in actual
+ assert ' Prefer arvados.noop ' in actual
+
+
+class TestParseSeq:
+ @pytest.mark.parametrize('s', [
+ 'foo,bar',
+ 'foo, bar',
+ 'foo , bar',
+ ])
+ def test_default_split(self, s):
+ assert list(_internal.parse_seq(s)) == ['foo', 'bar']
+
+ @pytest.mark.parametrize('s', [
+ 'foo',
+ ',foo',
+ 'foo ,',
+ ' foo ',
+ ',foo,',
+ ', foo ,',
+ ])
+ def test_empty_filtering(self, s):
+ assert list(_internal.parse_seq(s)) == ['foo']
+
+ @pytest.mark.parametrize('s', [
+ '',
+ ' ',
+ ',',
+ ' , ',
+ ])
+ def test_empty_list(self, s):
+ assert list(_internal.parse_seq(s)) == []
+
+
+class TestUniq:
+ @pytest.mark.parametrize('arg', [
+ 'abcde',
+ 'aabbccddee',
+ 'abcdeabcde',
+ 'ababcbabcdcbabcdedcbae',
+ ])
+ def test_uniq(self, arg):
+ assert list(_internal.uniq(iter(arg))) == list('abcde')
diff --git a/sdk/python/tests/test_keep_client.py b/sdk/python/tests/test_keep_client.py
index 8c0f096b61..9a8057c749 100644
--- a/sdk/python/tests/test_keep_client.py
+++ b/sdk/python/tests/test_keep_client.py
@@ -2,36 +2,32 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-from builtins import range
-from builtins import object
+import errno
import hashlib
-import mock
-from mock import patch
+import mmap
import os
-import errno
-import pycurl
import random
import re
import shutil
import socket
-import sys
import stat
+import sys
import tempfile
import time
import unittest
import urllib.parse
-import mmap
+
+from pathlib import Path
+from unittest import mock
+from unittest.mock import patch
import parameterized
+import pycurl
import arvados
import arvados.retry
import arvados.util
+
from . import arvados_testutil as tutil
from . import keepstub
from . import run_test_server
@@ -65,7 +61,7 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
foo_locator = self.keep_client.put('foo')
self.assertRegex(
foo_locator,
- '^acbd18db4cc2f85cedef654fccc4a4d8\+3',
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3',
'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
# 6 bytes because uploaded 2 copies
@@ -82,7 +78,7 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
blob_locator = self.keep_client.put(blob_str)
self.assertRegex(
blob_locator,
- '^7fc7c53b45e53926ba52821140fef396\+6',
+ r'^7fc7c53b45e53926ba52821140fef396\+6',
('wrong locator from Keep.put():' + blob_locator))
self.assertEqual(self.keep_client.get(blob_locator),
blob_str,
@@ -95,7 +91,7 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
blob_locator = self.keep_client.put(blob_data)
self.assertRegex(
blob_locator,
- '^84d90fc0d8175dd5dcfab04b999bc956\+67108864',
+ r'^84d90fc0d8175dd5dcfab04b999bc956\+67108864',
('wrong locator from Keep.put(): ' + blob_locator))
self.assertEqual(self.keep_client.get(blob_locator),
blob_data,
@@ -107,7 +103,7 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
blob_locator = self.keep_client.put(blob_data, copies=1)
self.assertRegex(
blob_locator,
- '^c902006bc98a3eb4a3663b65ab4a6fab\+8',
+ r'^c902006bc98a3eb4a3663b65ab4a6fab\+8',
('wrong locator from Keep.put(): ' + blob_locator))
self.assertEqual(self.keep_client.get(blob_locator),
blob_data,
@@ -117,22 +113,10 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
blob_locator = self.keep_client.put('', copies=1)
self.assertRegex(
blob_locator,
- '^d41d8cd98f00b204e9800998ecf8427e\+0',
+ r'^d41d8cd98f00b204e9800998ecf8427e\+0',
('wrong locator from Keep.put(""): ' + blob_locator))
- def test_unicode_must_be_ascii(self):
- # If unicode type, must only consist of valid ASCII
- foo_locator = self.keep_client.put(u'foo')
- self.assertRegex(
- foo_locator,
- '^acbd18db4cc2f85cedef654fccc4a4d8\+3',
- 'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
-
- if sys.version_info < (3, 0):
- with self.assertRaises(UnicodeEncodeError):
- # Error if it is not ASCII
- self.keep_client.put(u'\xe2')
-
+ def test_KeepPutDataType(self):
with self.assertRaises(AttributeError):
# Must be bytes or have an encode() method
self.keep_client.put({})
@@ -141,7 +125,7 @@ class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
locator = self.keep_client.put('test_head')
self.assertRegex(
locator,
- '^b9a772c7049325feb7130fff1f8333e9\+9',
+ r'^b9a772c7049325feb7130fff1f8333e9\+9',
'wrong md5 hash from Keep.put for "test_head": ' + locator)
self.assertEqual(True, self.keep_client.head(locator))
self.assertEqual(self.keep_client.get(locator),
@@ -182,8 +166,9 @@ class KeepPermissionTestCase(run_test_server.TestCaseWithServers, DiskCacheBase)
# GET from a different user => bad request
run_test_server.authorize_with('spectator')
+ keep_client2 = arvados.KeepClient(block_cache=self.make_block_cache(self.disk_cache))
self.assertRaises(arvados.errors.KeepReadError,
- arvados.Keep.get,
+ keep_client2.get,
bar_locator)
# Unauthenticated GET for a signed locator => bad request
@@ -196,6 +181,7 @@ class KeepPermissionTestCase(run_test_server.TestCaseWithServers, DiskCacheBase)
keep_client.get,
unsigned_bar_locator)
+
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepProxyTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
disk_cache = False
@@ -221,7 +207,7 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
baz_locator = keep_client.put('baz')
self.assertRegex(
baz_locator,
- '^73feffa4b7f6bb68e44cf984c85f6e88\+3',
+ r'^73feffa4b7f6bb68e44cf984c85f6e88\+3',
'wrong md5 hash from Keep.put("baz"): ' + baz_locator)
self.assertEqual(keep_client.get(baz_locator),
b'baz',
@@ -246,6 +232,7 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
local_store='',
block_cache=self.make_block_cache(self.disk_cache))
+
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
disk_cache = False
@@ -582,6 +569,7 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCach
self.assertEqual(pdh, actual)
self.assertEqual(1, req_mock.call_count)
+
@tutil.skip_sleep
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
@@ -596,7 +584,7 @@ class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheB
def tearDown(self):
DiskCacheBase.tearDown(self)
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_get_request_cache(self, get_mock):
with tutil.mock_keep_responses(self.data, 200, 200):
self.keep_client.get(self.locator)
@@ -604,7 +592,7 @@ class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheB
# Request already cached, don't require more than one request
get_mock.assert_called_once()
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_head_request_cache(self, get_mock):
with tutil.mock_keep_responses(self.data, 200, 200):
self.keep_client.head(self.locator)
@@ -612,7 +600,7 @@ class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheB
# Don't cache HEAD requests so that they're not confused with GET reqs
self.assertEqual(2, get_mock.call_count)
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_head_and_then_get_return_different_responses(self, get_mock):
head_resp = None
get_resp = None
@@ -625,9 +613,6 @@ class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheB
self.assertNotEqual(head_resp, get_resp)
-
-
-
@tutil.skip_sleep
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
@@ -881,6 +866,7 @@ class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock, DiskC
def test_put_error_shows_probe_order(self):
self.check_64_zeros_error_order('put', arvados.errors.KeepWriteError)
+
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase, DiskCacheBase):
disk_cache = False
@@ -1028,6 +1014,7 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase, DiskCacheBa
with self.assertRaises(arvados.errors.KeepWriteError):
kc.put(self.DATA, copies=1, num_retries=0)
+
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
disk_cache = False
@@ -1130,6 +1117,7 @@ class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCach
self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
MockCurl.return_value.getopt(pycurl.URL).decode())
+
class KeepClientRetryTestMixin(object):
disk_cache = False
@@ -1250,6 +1238,7 @@ class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase, Di
(self.DEFAULT_EXPECT, 200)):
self.check_success(locator=self.HINTED_LOCATOR)
+
@tutil.skip_sleep
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):
@@ -1292,6 +1281,7 @@ class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase, D
(self.DEFAULT_EXPECT, 200)):
self.check_success(locator=self.HINTED_LOCATOR)
+
@tutil.skip_sleep
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):
@@ -1312,7 +1302,6 @@ class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase, Di
class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
-
class FakeKeepService(object):
def __init__(self, delay, will_succeed=False, will_raise=None, replicas=1):
self.delay = delay
@@ -1339,9 +1328,10 @@ class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
def finished(self):
return False
+
def setUp(self):
self.copies = 3
- self.pool = arvados.KeepClient.KeepWriterThreadPool(
+ self.pool = arvados.KeepClient._KeepWriterThreadPool(
data = 'foo',
data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',
max_service_replicas = self.copies,
@@ -1424,6 +1414,7 @@ class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock, DiskCac
self.keep_client.put('foo', num_retries=1, copies=2)
self.assertEqual(2, req_mock.call_count)
+
@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class KeepClientAPIErrorTest(unittest.TestCase, DiskCacheBase):
disk_cache = False
@@ -1470,8 +1461,15 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
def tearDown(self):
shutil.rmtree(self.disk_cache_dir)
+ @mock.patch('arvados._internal.basedirs.BaseDirectories.storage_path')
+ def test_default_disk_cache_dir(self, storage_path):
+ expected = Path(self.disk_cache_dir)
+ storage_path.return_value = expected
+ cache = arvados.keep.KeepBlockCache(disk_cache=True)
+ storage_path.assert_called_with('keep')
+ self.assertEqual(cache._disk_cache_dir, str(expected))
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_disk_cache_read(self, get_mock):
# confirm it finds an existing cache block when the cache is
# initialized.
@@ -1489,8 +1487,7 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
get_mock.assert_not_called()
-
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_disk_cache_share(self, get_mock):
# confirm it finds a cache block written after the disk cache
# was initialized.
@@ -1508,7 +1505,6 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
get_mock.assert_not_called()
-
def test_disk_cache_write(self):
# confirm the cache block was created
@@ -1524,7 +1520,6 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "rb") as f:
self.assertTrue(tutil.binary_compare(f.read(), self.data))
-
def test_disk_cache_clean(self):
# confirm that a tmp file in the cache is cleaned up
@@ -1563,14 +1558,18 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC")))
self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC")))
-
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_disk_cache_cap(self, get_mock):
# confirm that the cache is kept to the desired limit
os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "wb") as f:
f.write(self.data)
+ # We want KeepBlockCache to consider this file older than the
+ # next file we write. Date it well in the past (a little over a
+ # day) to ensure that happens regardless of filesystem settings.
+ old_mtime = time.time() - 90000
+ os.utime(f.fileno(), (old_mtime, old_mtime))
os.makedirs(os.path.join(self.disk_cache_dir, "acb"))
with open(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock"), "wb") as f:
@@ -1586,8 +1585,7 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertFalse(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
-
- @mock.patch('arvados.KeepClient.KeepService.get')
+ @mock.patch('arvados.KeepClient._KeepService.get')
def test_disk_cache_share(self, get_mock):
# confirm that a second cache doesn't delete files that belong to the first cache.
@@ -1616,8 +1614,6 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
-
-
def test_disk_cache_error(self):
os.chmod(self.disk_cache_dir, stat.S_IRUSR)
@@ -1626,7 +1622,6 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
disk_cache_dir=self.disk_cache_dir)
-
def test_disk_cache_write_error(self):
block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
disk_cache_dir=self.disk_cache_dir)
@@ -1642,11 +1637,13 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
with tutil.mock_keep_responses(self.data, 200) as mock:
keep_client.get(self.locator)
-
def test_disk_cache_retry_write_error(self):
- block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
- disk_cache_dir=self.disk_cache_dir)
-
+ cache_max_before = 512 * 1024 * 1024
+ block_cache = arvados.keep.KeepBlockCache(
+ cache_max=cache_max_before,
+ disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir,
+ )
keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
called = False
@@ -1659,11 +1656,7 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
else:
return realmmap(*args, **kwargs)
- with patch('mmap.mmap') as mockmmap:
- mockmmap.side_effect = sideeffect_mmap
-
- cache_max_before = block_cache.cache_max
-
+ with patch('mmap.mmap', autospec=True, side_effect=sideeffect_mmap) as mockmmap:
with tutil.mock_keep_responses(self.data, 200) as mock:
self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
@@ -1673,8 +1666,7 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertTrue(tutil.binary_compare(f.read(), self.data))
# shrank the cache in response to ENOSPC
- self.assertTrue(cache_max_before > block_cache.cache_max)
-
+ self.assertGreater(cache_max_before, block_cache.cache_max)
def test_disk_cache_retry_write_error2(self):
block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
@@ -1692,9 +1684,7 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
else:
return realmmap(*args, **kwargs)
- with patch('mmap.mmap') as mockmmap:
- mockmmap.side_effect = sideeffect_mmap
-
+ with patch('mmap.mmap', autospec=True, side_effect=sideeffect_mmap) as mockmmap:
slots_before = block_cache._max_slots
with tutil.mock_keep_responses(self.data, 200) as mock:
@@ -1706,4 +1696,4 @@ class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertTrue(tutil.binary_compare(f.read(), self.data))
# shrank the cache in response to ENOMEM
- self.assertTrue(slots_before > block_cache._max_slots)
+ self.assertGreater(slots_before, block_cache._max_slots)
diff --git a/sdk/python/tests/test_keep_locator.py b/sdk/python/tests/test_keep_locator.py
index e47d64d337..bc93f403a0 100644
--- a/sdk/python/tests/test_keep_locator.py
+++ b/sdk/python/tests/test_keep_locator.py
@@ -2,10 +2,6 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import next
-from builtins import zip
-from builtins import str
-from builtins import range
import datetime
import itertools
import random
diff --git a/sdk/python/tests/test_retry.py b/sdk/python/tests/test_retry.py
index bcf784d130..c6e713244c 100644
--- a/sdk/python/tests/test_retry.py
+++ b/sdk/python/tests/test_retry.py
@@ -2,15 +2,13 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import zip
-from builtins import range
-from builtins import object
import itertools
import unittest
+from unittest import mock
+
import arvados.errors as arv_error
import arvados.retry as arv_retry
-import mock
class RetryLoopTestMixin(object):
@staticmethod
diff --git a/sdk/python/tests/test_retry_job_helpers.py b/sdk/python/tests/test_retry_job_helpers.py
index 9389b25c88..a5a6bb22c1 100644
--- a/sdk/python/tests/test_retry_job_helpers.py
+++ b/sdk/python/tests/test_retry_job_helpers.py
@@ -2,18 +2,17 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import object
-import mock
-import os
-import unittest
import hashlib
-from . import run_test_server
import json
-import arvados
-from . import arvados_testutil as tutil
+import os
+import unittest
+
from apiclient import http as apiclient_http
+from unittest import mock
+import arvados
+from . import run_test_server
+from . import arvados_testutil as tutil
@tutil.skip_sleep
class ApiClientRetryTestMixin(object):
@@ -57,22 +56,3 @@ class ApiClientRetryTestMixin(object):
def test_no_retry_after_immediate_success(self):
with tutil.mock_api_responses(self.api_client, '{}', [200, 400]):
self.run_method()
-
-
-class CurrentJobTestCase(ApiClientRetryTestMixin, unittest.TestCase):
-
- DEFAULT_EXCEPTION = arvados.errors.ApiError
-
- def setUp(self):
- super(CurrentJobTestCase, self).setUp()
- os.environ['JOB_UUID'] = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
- os.environ['JOB_WORK'] = '.'
-
- def tearDown(self):
- del os.environ['JOB_UUID']
- del os.environ['JOB_WORK']
- arvados._current_job = None
- super(CurrentJobTestCase, self).tearDown()
-
- def run_method(self):
- arvados.current_job()
diff --git a/sdk/python/tests/test_s3_to_keep.py b/sdk/python/tests/test_s3_to_keep.py
new file mode 100644
index 0000000000..06a33e1758
--- /dev/null
+++ b/sdk/python/tests/test_s3_to_keep.py
@@ -0,0 +1,75 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import copy
+import io
+import functools
+import hashlib
+import json
+import logging
+import sys
+import unittest
+import datetime
+
+from unittest import mock
+
+import arvados
+import arvados.collection
+import arvados.keep
+
+from arvados._internal import s3_to_keep
+import boto3.s3.transfer
+
+class TestS3ToKeep(unittest.TestCase):
+
+ @mock.patch("arvados.collection.Collection")
+ def test_s3_get(self, collectionmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": []
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ collectionmock.return_value = cm
+
+ mockfile = mock.MagicMock()
+ cm.open.return_value = mockfile
+
+ mockboto = mock.MagicMock()
+ mockbotoclient = mock.MagicMock()
+ mockboto.client.return_value = mockbotoclient
+
+ mockbotoclient.head_object.return_value = {
+ 'ResponseMetadata': {
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {
+ "Content-Length": 123
+ }
+ }
+ }
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+ r = s3_to_keep.s3_to_keep(api, mockboto, None, "s3://examplebucket/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3', 's3://examplebucket/file1.txt',
+ datetime.datetime(2018, 5, 15, 0, 0)))
+
+ cm.open.assert_called_with("file1.txt", "wb")
+ cm.save_new.assert_called_with(name="Downloaded from s3%3A%2F%2Fexamplebucket%2Ffile1.txt",
+ owner_uuid=None, ensure_unique_name=True)
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'s3://examplebucket/file1.txt': {'Content-Length': 123, 'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+ ])
+
+ kall = mockbotoclient.download_fileobj.call_args
+ assert kall.kwargs['Bucket'] == 'examplebucket'
+ assert kall.kwargs['Key'] == 'file1.txt'
+ assert kall.kwargs['Fileobj'] is mockfile
diff --git a/sdk/python/tests/test_safeapi.py b/sdk/python/tests/test_safeapi.py
deleted file mode 100644
index a41219e9c5..0000000000
--- a/sdk/python/tests/test_safeapi.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import os
-import unittest
-
-import googleapiclient
-
-from arvados import safeapi
-
-from . import run_test_server
-
-class SafeApiTest(run_test_server.TestCaseWithServers):
- MAIN_SERVER = {}
-
- def test_constructor(self):
- env_mapping = {
- key: value
- for key, value in os.environ.items()
- if key.startswith('ARVADOS_API_')
- }
- extra_params = {
- 'timeout': 299,
- }
- base_params = {
- key[12:].lower(): value
- for key, value in env_mapping.items()
- }
- try:
- base_params['insecure'] = base_params.pop('host_insecure')
- except KeyError:
- pass
- expected_keep_params = {}
- for config, params, subtest in [
- (None, {}, "default arguments"),
- (None, extra_params, "extra params"),
- (env_mapping, {}, "explicit config"),
- (env_mapping, extra_params, "explicit config and params"),
- ({}, base_params, "params only"),
- ]:
- with self.subTest(f"test constructor with {subtest}"):
- expected_timeout = params.get('timeout', 300)
- expected_params = dict(params)
- keep_params = dict(expected_keep_params)
- client = safeapi.ThreadSafeApiCache(config, keep_params, params, 'v1')
- self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
- self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
- self.assertEqual(client._http.timeout, expected_timeout)
- self.assertEqual(params, expected_params,
- "api_params was modified in-place")
- self.assertEqual(keep_params, expected_keep_params,
- "keep_params was modified in-place")
-
- def test_constructor_no_args(self):
- client = safeapi.ThreadSafeApiCache()
- self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
- self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
- self.assertTrue(client.insecure)
-
- def test_constructor_bad_version(self):
- with self.assertRaises(googleapiclient.errors.UnknownApiNameOrVersion):
- safeapi.ThreadSafeApiCache(version='BadTestVersion')
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
deleted file mode 100644
index 41add57c0e..0000000000
--- a/sdk/python/tests/test_sdk.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import mock
-import os
-import unittest
-
-import arvados
-import arvados.collection
-
-class TestSDK(unittest.TestCase):
-
- @mock.patch('arvados.current_task')
- @mock.patch('arvados.current_job')
- def test_one_task_per_input_file_normalize(self, mock_job, mock_task):
- mock_api = mock.MagicMock()
-
- # This manifest will be reduced from three lines to one when it is
- # normalized.
- nonnormalized_manifest = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
-. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
-. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
-"""
- dummy_hash = 'ffffffffffffffffffffffffffffffff+0'
-
- mock_job.return_value = {
- 'uuid': 'none',
- 'script_parameters': {
- 'input': dummy_hash
- }
- }
- mock_task.return_value = {
- 'uuid': 'none',
- 'sequence': 0,
- }
- # mock the API client to return a collection with a nonnormalized manifest.
- mock_api.collections().get().execute.return_value = {
- 'uuid': 'zzzzz-4zz18-mockcollection0',
- 'portable_data_hash': dummy_hash,
- 'manifest_text': nonnormalized_manifest,
- }
-
- # Because one_task_per_input_file normalizes this collection,
- # it should now create only one job task and not three.
- arvados.job_setup.one_task_per_input_file(and_end_task=False, api_client=mock_api)
- mock_api.job_tasks().create().execute.assert_called_once_with()
diff --git a/sdk/python/tests/test_stream.py b/sdk/python/tests/test_stream.py
index 12a3340eab..c157614287 100644
--- a/sdk/python/tests/test_stream.py
+++ b/sdk/python/tests/test_stream.py
@@ -2,29 +2,21 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
-from builtins import object
import bz2
import gzip
import io
-import mock
import os
import unittest
import hashlib
+from unittest import mock
+
import arvados
-from arvados import StreamReader, StreamFileReader
-from arvados._ranges import Range
from . import arvados_testutil as tutil
from . import run_test_server
-class StreamFileReaderTestCase(unittest.TestCase):
- def make_count_reader(self):
- stream = tutil.MockStreamReader('.', '01234', '34567', '67890')
- return StreamFileReader(stream, [Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)],
- 'count.txt')
-
+class StreamFileReaderTestMixin(object):
def test_read_block_crossing_behavior(self):
# read() calls will be aligned on block boundaries - see #3663.
sfile = self.make_count_reader()
@@ -36,8 +28,8 @@ class StreamFileReaderTestCase(unittest.TestCase):
def test_successive_reads(self):
sfile = self.make_count_reader()
- for expect in [b'123', b'456', b'789', b'']:
- self.assertEqual(expect, sfile.read(10))
+ for expect in [b'1234', b'5678', b'9', b'']:
+ self.assertEqual(expect, sfile.read(4))
def test_readfrom_spans_blocks(self):
sfile = self.make_count_reader()
@@ -88,11 +80,6 @@ class StreamFileReaderTestCase(unittest.TestCase):
def test_size(self):
self.assertEqual(9, self.make_count_reader().size())
- def test_tell_after_block_read(self):
- sfile = self.make_count_reader()
- sfile.read(5)
- self.assertEqual(3, sfile.tell())
-
def test_tell_after_small_read(self):
sfile = self.make_count_reader()
sfile.read(1)
@@ -109,10 +96,6 @@ class StreamFileReaderTestCase(unittest.TestCase):
self.assertEqual(b'12', sfile.read(2))
self.assertTrue(sfile.closed, "reader is open after context")
- def make_newlines_reader(self):
- stream = tutil.MockStreamReader('.', 'one\ntwo\n\nth', 'ree\nfour\n\n')
- return StreamFileReader(stream, [Range(0, 0, 11), Range(11, 11, 10)], 'count.txt')
-
def check_lines(self, actual):
self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'],
actual)
@@ -143,19 +126,14 @@ class StreamFileReaderTestCase(unittest.TestCase):
def test_readlines_sizehint(self):
result = self.make_newlines_reader().readlines(8)
- self.assertEqual(['one\n', 'two\n'], result[:2])
- self.assertNotIn('three\n', result)
+ self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'], result)
def test_name_attribute(self):
- # Test both .name and .name() (for backward compatibility)
- stream = tutil.MockStreamReader()
- sfile = StreamFileReader(stream, [Range(0, 0, 0)], 'nametest')
+ sfile = self.make_file_reader(name='nametest')
self.assertEqual('nametest', sfile.name)
- self.assertEqual('nametest', sfile.name())
def check_decompressed_name(self, filename, expect):
- stream = tutil.MockStreamReader('.', '')
- reader = StreamFileReader(stream, [Range(0, 0, 0)], filename)
+ reader = self.make_file_reader(name=filename)
self.assertEqual(expect, reader.decompressed_name())
def test_decompressed_name_uncompressed_file(self):
@@ -170,9 +148,7 @@ class StreamFileReaderTestCase(unittest.TestCase):
def check_decompression(self, compress_ext, compress_func):
test_text = b'decompression\ntest\n'
test_data = compress_func(test_text)
- stream = tutil.MockStreamReader('.', test_data)
- reader = StreamFileReader(stream, [Range(0, 0, len(test_data))],
- 'test.' + compress_ext)
+ reader = self.make_file_reader(name='test.'+compress_ext, data=test_data)
self.assertEqual(test_text, b''.join(reader.readall_decompressed()))
@staticmethod
@@ -258,48 +234,5 @@ class StreamRetryTestMixin(object):
self.read_for_test(reader, 10, num_retries=1)
-class StreamReaderTestCase(unittest.TestCase, StreamRetryTestMixin):
- def reader_for(self, coll_name, **kwargs):
- return StreamReader(self.manifest_for(coll_name).split(),
- self.keep_client(), **kwargs)
-
- def read_for_test(self, reader, byte_count, **kwargs):
- return reader.readfrom(0, byte_count, **kwargs)
-
- def test_manifest_text_without_keep_client(self):
- mtext = self.manifest_for('multilevel_collection_1')
- for line in mtext.rstrip('\n').split('\n'):
- reader = StreamReader(line.split())
- self.assertEqual(line + '\n', reader.manifest_text())
-
-
-class StreamFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
- def reader_for(self, coll_name, **kwargs):
- return StreamReader(self.manifest_for(coll_name).split(),
- self.keep_client(), **kwargs).all_files()[0]
-
- def read_for_test(self, reader, byte_count, **kwargs):
- return reader.read(byte_count, **kwargs)
-
-
-class StreamFileReadFromTestCase(StreamFileReadTestCase):
- def read_for_test(self, reader, byte_count, **kwargs):
- return reader.readfrom(0, byte_count, **kwargs)
-
-
-class StreamFileReadAllTestCase(StreamFileReadTestCase):
- def read_for_test(self, reader, byte_count, **kwargs):
- return b''.join(reader.readall(**kwargs))
-
-
-class StreamFileReadAllDecompressedTestCase(StreamFileReadTestCase):
- def read_for_test(self, reader, byte_count, **kwargs):
- return b''.join(reader.readall_decompressed(**kwargs))
-
-
-class StreamFileReadlinesTestCase(StreamFileReadTestCase):
- def read_for_test(self, reader, byte_count, **kwargs):
- return ''.join(reader.readlines(**kwargs)).encode()
-
if __name__ == '__main__':
unittest.main()
diff --git a/sdk/python/tests/test_util.py b/sdk/python/tests/test_util.py
index 75d4a89e30..38388ef4eb 100644
--- a/sdk/python/tests/test_util.py
+++ b/sdk/python/tests/test_util.py
@@ -4,57 +4,28 @@
import itertools
import os
-import parameterized
import subprocess
import unittest
+import parameterized
+import pytest
from unittest import mock
import arvados
import arvados.util
-class MkdirDashPTest(unittest.TestCase):
- def setUp(self):
- try:
- os.path.mkdir('./tmp')
- except:
- pass
- def tearDown(self):
- try:
- os.unlink('./tmp/bar')
- os.rmdir('./tmp/foo')
- os.rmdir('./tmp')
- except:
- pass
- def runTest(self):
- arvados.util.mkdir_dash_p('./tmp/foo')
- with open('./tmp/bar', 'wb') as f:
- f.write(b'bar')
- self.assertRaises(OSError, arvados.util.mkdir_dash_p, './tmp/bar')
-
-
-class RunCommandTestCase(unittest.TestCase):
- def test_success(self):
- stdout, stderr = arvados.util.run_command(['echo', 'test'],
- stderr=subprocess.PIPE)
- self.assertEqual("test\n".encode(), stdout)
- self.assertEqual("".encode(), stderr)
-
- def test_failure(self):
- with self.assertRaises(arvados.errors.CommandFailedError):
- arvados.util.run_command(['false'])
-
class KeysetTestHelper:
- def __init__(self, expect):
+ def __init__(self, expect, expect_num_retries=0):
self.n = 0
self.expect = expect
+ self.expect_num_retries = expect_num_retries
def fn(self, **kwargs):
- if self.expect[self.n][0] != kwargs:
- raise Exception("Didn't match %s != %s" % (self.expect[self.n][0], kwargs))
+ assert kwargs == self.expect[self.n][0]
return self
def execute(self, num_retries):
+ assert num_retries == self.expect_num_retries
self.n += 1
return self.expect[self.n-1][1]
@@ -64,6 +35,12 @@ _SELECT_FAKE_ITEM = {
'created_at': '2023-08-28T12:34:56.123456Z',
}
+_FAKE_COMPUTED_PERMISSIONS_ITEM = {
+ 'user_uuid': 'zzzzz-zyyyz-zzzzzyyyyywwwww',
+ 'target_uuid': 'zzzzz-ttttt-xxxxxyyyyyzzzzz',
+ 'perm_level': 'can_write',
+}
+
class KeysetListAllTestCase(unittest.TestCase):
def test_empty(self):
ks = KeysetTestHelper([[
@@ -185,20 +162,24 @@ class KeysetListAllTestCase(unittest.TestCase):
ls = list(arvados.util.keyset_list_all(ks.fn, ascending=False))
self.assertEqual(ls, [{"created_at": "2", "uuid": "2"}, {"created_at": "1", "uuid": "1"}])
- @parameterized.parameterized.expand(zip(
- itertools.cycle(_SELECT_FAKE_ITEM),
- itertools.chain.from_iterable(
- itertools.combinations(_SELECT_FAKE_ITEM, count)
- for count in range(len(_SELECT_FAKE_ITEM) + 1)
- ),
- ))
- def test_select(self, order_key, select):
+ @parameterized.parameterized.expand(
+ (fake_item, key_fields, order_key, select)
+ for (fake_item, key_fields) in [
+ (_SELECT_FAKE_ITEM, ('uuid',)),
+ (_FAKE_COMPUTED_PERMISSIONS_ITEM, ('user_uuid', 'target_uuid')),
+ ]
+ for order_key in fake_item
+ if order_key != 'perm_level'
+ for count in range(len(fake_item) + 1)
+ for select in itertools.combinations(fake_item, count)
+ )
+ def test_select(self, fake_item, key_fields, order_key, select):
# keyset_list_all must have both uuid and order_key to function.
# Test that it selects those fields along with user-specified ones.
- expect_select = {'uuid', order_key, *select}
+ expect_select = {*key_fields, order_key, *select}
item = {
key: value
- for key, value in _SELECT_FAKE_ITEM.items()
+ for key, value in fake_item.items()
if key in expect_select
}
list_func = mock.Mock()
@@ -210,9 +191,42 @@ class KeysetListAllTestCase(unittest.TestCase):
],
)
list_func.reset_mock()
- actual = list(arvados.util.keyset_list_all(list_func, order_key, select=list(select)))
+ actual = list(arvados.util.keyset_list_all(list_func, order_key, select=list(select), key_fields=key_fields))
self.assertEqual(actual, [item])
calls = list_func.call_args_list
self.assertTrue(len(calls) >= 2, "list_func() not called enough to exhaust items")
for args, kwargs in calls:
self.assertEqual(set(kwargs.get('select', ())), expect_select)
+
+
+class TestIterStorageClasses:
+ @pytest.fixture
+ def mixed_config(self):
+ return {'StorageClasses': {
+ 'foo': {'Default': False},
+ 'bar': {'Default': True},
+ 'baz': {'Default': True},
+ }}
+
+ @pytest.fixture
+ def nodef_config(self):
+ return {'StorageClasses': {
+ 'foo': {'Default': False},
+ 'bar': {'Default': False},
+ }}
+
+ def test_defaults(self, mixed_config):
+ assert list(arvados.util.iter_storage_classes(mixed_config)) == ['bar', 'baz']
+
+ def test_custom_check(self, mixed_config):
+ assert list(arvados.util.iter_storage_classes(mixed_config, bool)) == ['foo', 'bar', 'baz']
+
+ def test_default_fallback(self, nodef_config):
+ assert list(arvados.util.iter_storage_classes(nodef_config)) == ['default']
+
+ def test_custom_fallback(self, nodef_config):
+ assert list(arvados.util.iter_storage_classes(nodef_config, fallback='fb')) == ['fb']
+
+ def test_no_fallback(self, nodef_config):
+ assert list(arvados.util.iter_storage_classes(nodef_config, fallback='')) == []
+
diff --git a/sdk/python/tests/test_vocabulary.py b/sdk/python/tests/test_vocabulary.py
index aa2e739e20..2f5db3b9d9 100644
--- a/sdk/python/tests/test_vocabulary.py
+++ b/sdk/python/tests/test_vocabulary.py
@@ -4,7 +4,8 @@
import arvados
import unittest
-import mock
+
+from unittest import mock
from arvados import api, vocabulary
diff --git a/sdk/ruby-google-api-client/arvados-google-api-client.gemspec b/sdk/ruby-google-api-client/arvados-google-api-client.gemspec
index 123180ae1c..01fabbcc18 100644
--- a/sdk/ruby-google-api-client/arvados-google-api-client.gemspec
+++ b/sdk/ruby-google-api-client/arvados-google-api-client.gemspec
@@ -26,21 +26,28 @@ Gem::Specification.new do |s|
s.add_runtime_dependency 'addressable', '~> 2.3'
s.add_runtime_dependency 'signet', '~> 0.16.0'
- # faraday requires Ruby 3.0 starting with 2.9.0. If you install this gem
- # on Ruby 2.7, the dependency resolver asks you to resolve the conflict
- # manually. Instead of teaching all our tooling to do that, we prefer to
- # require the latest version that supports Ruby 2.7 here. This requirement
- # can be relaxed to '~> 2.0' when we drop support for Ruby 2.7.
+ # faraday stopped supporting Ruby 2.7 with its 2.9.0 release.
+ # Force a resolution that supports all our Rubies:
s.add_runtime_dependency 'faraday', '~> 2.8.0'
s.add_runtime_dependency 'faraday-multipart', '~> 1.0'
s.add_runtime_dependency 'faraday-gzip', '~> 2.0'
- s.add_runtime_dependency 'googleauth', '~> 1.0'
+ # googleauth stopped supporting Ruby 2.7 in 1.2.0 (due to a new dependency).
+ s.add_runtime_dependency 'googleauth', '~> 1.1.0'
s.add_runtime_dependency 'multi_json', '~> 1.10'
s.add_runtime_dependency 'autoparse', '~> 0.3'
s.add_runtime_dependency 'extlib', '~> 0.9'
s.add_runtime_dependency 'launchy', '~> 2.4'
s.add_runtime_dependency 'retriable', '~> 1.4'
- s.add_runtime_dependency 'activesupport', '>= 3.2', '< 8.0'
+ # Rails 7.1.3.x is the last version to support Ruby 2.7.0 in Ubuntu 20.04.
+ # Later 7.1.x releases require Ruby >= 2.7.3:
+ #
+ s.add_runtime_dependency 'activesupport', '~> 7.1.3.4'
+
+ # These are indirect dependencies of the above where we force a resolution
+ # that supports all our Rubies.
+ s.add_runtime_dependency 'google-cloud-env', '~> 2.1.0'
+ s.add_runtime_dependency 'public_suffix', '~> 5.0'
+ s.add_runtime_dependency 'securerandom', '~> 0.3.2'
s.add_development_dependency 'rake', '~> 10.0'
s.add_development_dependency 'yard', '~> 0.8'
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/version.rb b/sdk/ruby-google-api-client/lib/google/api_client/version.rb
index 3f78e4ae37..c13310ae05 100644
--- a/sdk/ruby-google-api-client/lib/google/api_client/version.rb
+++ b/sdk/ruby-google-api-client/lib/google/api_client/version.rb
@@ -19,7 +19,7 @@ module Google
MAJOR = 0
MINOR = 8
TINY = 7
- PATCH = 6
+ PATCH = 11
STRING = [MAJOR, MINOR, TINY, PATCH].compact.join('.')
end
end
diff --git a/sdk/ruby/README b/sdk/ruby/README
index f72a3d1f71..95dbb12c8d 100644
--- a/sdk/ruby/README
+++ b/sdk/ruby/README
@@ -21,8 +21,8 @@ ENV['ARVADOS_API_TOKEN'] = 'qwertyuiopasdfghjklzxcvbnm1234567890abcdefghijklmn'
require 'arvados'
arv = Arvados.new( { :suppress_ssl_warnings => false } )
-pt_list = arv.pipeline_template.list(where:{})
-puts pt_list[:items].first.inspect
+cr_list = arv.container_request.list(where:{})
+puts cr_list[:items].first.inspect
-pt = arv.pipeline_template.get(uuid:"9zb4a-p5p6p-fkkbrl98u3pk87m")
+cr = arv.container_request.get(uuid:"zzzzz-xvhdp-fkkbrl98u3pk87m")
puts pt.inspect
diff --git a/sdk/ruby/arvados.gemspec b/sdk/ruby/arvados.gemspec
index ea5ff8c7c5..eda74c82b5 100644
--- a/sdk/ruby/arvados.gemspec
+++ b/sdk/ruby/arvados.gemspec
@@ -2,28 +2,31 @@
#
# SPDX-License-Identifier: Apache-2.0
-if not File.exist?('/usr/bin/git') then
- STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
- exit
-end
-
-git_dir = ENV["GIT_DIR"]
-git_work = ENV["GIT_WORK_TREE"]
begin
- ENV["GIT_DIR"] = File.expand_path "#{__dir__}/../../.git"
- ENV["GIT_WORK_TREE"] = File.expand_path "#{__dir__}/../.."
- git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H #{__dir__}`.chomp.split(":")
- if ENV["ARVADOS_BUILDING_VERSION"]
- version = ENV["ARVADOS_BUILDING_VERSION"]
- else
- version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
+ git_root = "#{__dir__}/../.."
+ git_timestamp, git_hash = IO.popen(
+ ["git", "-C", git_root,
+ "log", "-n1", "--first-parent", "--format=%ct:%H",
+ "--", "build/version-at-commit.sh", "sdk/ruby"],
+ ) do |git_log|
+ git_log.readline.chomp.split(":")
end
- version = version.sub("~dev", ".dev").sub("~rc", ".rc")
- git_timestamp = Time.at(git_timestamp.to_i).utc
-ensure
- ENV["GIT_DIR"] = git_dir
- ENV["GIT_WORK_TREE"] = git_work
+rescue Errno::ENOENT
+ $stderr.puts("failed to get version information: 'git' not found")
+ exit 69 # EX_UNAVAILABLE
+end
+
+if $? != 0
+ $stderr.puts("failed to get version information: 'git log' exited #{$?}")
+ exit 65 # EX_DATAERR
+end
+git_timestamp = Time.at(git_timestamp.to_i).utc
+version = ENV["ARVADOS_BUILDING_VERSION"] || IO.popen(
+ ["#{git_root}/build/version-at-commit.sh", git_hash],
+ ) do |ver_out|
+ ver_out.readline.chomp.encode("utf-8")
end
+version = version.sub("~dev", ".dev").sub("~rc", ".rc")
Gem::Specification.new do |s|
s.name = 'arvados'
@@ -38,7 +41,10 @@ Gem::Specification.new do |s|
"lib/arvados/collection.rb", "lib/arvados/keep.rb",
"README", "LICENSE-2.0.txt"]
s.required_ruby_version = '>= 2.7.0'
- s.add_dependency('activesupport', '>= 3')
+ # Rails 7.1.3.x is the last version to support Ruby 2.7.0 in Ubuntu 20.04.
+ # Later 7.1.x releases require Ruby >= 2.7.3:
+ #
+ s.add_dependency('activesupport', '~> 7.1.3.4')
s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
# arvados fork of google-api-client gem with old API and new
# compatibility fixes, built from ../ruby-google-api-client/
diff --git a/services/api/Gemfile b/services/api/Gemfile
index 9cc5f1b7bc..3f6634e42c 100644
--- a/services/api/Gemfile
+++ b/services/api/Gemfile
@@ -4,14 +4,21 @@
source 'https://rubygems.org'
-gem 'rails', '~> 7.0.0'
+# Rails 7.1.3.x is the last version to support Ruby 2.7.0 in Ubuntu 20.04.
+# Later 7.1.x releases require Ruby >= 2.7.3:
+#
+gem 'rails', '~> 7.1.3.4'
gem 'responders'
gem 'i18n'
gem 'sprockets-rails'
group :test, :development do
gem 'factory_bot_rails'
- gem 'ruby-prof'
+ # Enforce Ruby 2.7 compatibility for an indirect dependency.
+ # Can't use ~> because 6.4.6 changes the minimum Ruby (!).
+ gem 'factory_bot', '6.4.5'
+ # This version pin enforces Ruby 2.7 compatibility for a direct dependency.
+ gem 'ruby-prof', '~> 1.6.3'
# Note: "require: false" here tells bunder not to automatically
# 'require' the packages during application startup. Installation is
# still mandatory.
@@ -32,7 +39,7 @@ gem 'jquery-rails'
gem 'acts_as_api'
-gem 'passenger'
+gem 'passenger', '~> 6.0.26'
# Locking to 5.10.3 to workaround issue in 5.11.1 (https://github.com/seattlerb/minitest/issues/730)
gem 'minitest', '5.10.3'
@@ -41,9 +48,7 @@ gem 'andand'
gem 'optimist'
-gem 'themes_for_rails', git: 'https://github.com/arvados/themes_for_rails'
-
-gem 'arvados', '~> 2.7.0.rc1'
+gem 'arvados', '~> 3.1.2'
gem 'httpclient'
gem 'lograge'
@@ -58,6 +63,11 @@ gem 'webrick'
gem 'mini_portile2', '~> 2.8', '>= 2.8.1'
+# Enforce Ruby 2.7 compatibility for indirect dependencies.
+gem 'nokogiri', '~> 1.15.7'
+gem 'net-imap', '~> 0.3.8'
+gem 'securerandom', '~> 0.3.2'
+
# Install any plugin gems
Dir.glob(File.join(File.dirname(__FILE__), 'lib', '**', "Gemfile")) do |f|
eval(IO.read(f), binding)
diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock
index 0fe91e0a18..5cec00f51e 100644
--- a/services/api/Gemfile.lock
+++ b/services/api/Gemfile.lock
@@ -1,93 +1,95 @@
-GIT
- remote: https://github.com/arvados/themes_for_rails
- revision: ddf6e592b3b6493ea0c2de7b5d3faa120ed35be0
- specs:
- themes_for_rails (0.5.1)
- rails (>= 3.0.0)
-
GEM
remote: https://rubygems.org/
specs:
- actioncable (7.0.8.1)
- actionpack (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ actioncable (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
nio4r (~> 2.0)
websocket-driver (>= 0.6.1)
- actionmailbox (7.0.8.1)
- actionpack (= 7.0.8.1)
- activejob (= 7.0.8.1)
- activerecord (= 7.0.8.1)
- activestorage (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ zeitwerk (~> 2.6)
+ actionmailbox (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ activejob (= 7.1.3.4)
+ activerecord (= 7.1.3.4)
+ activestorage (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
mail (>= 2.7.1)
net-imap
net-pop
net-smtp
- actionmailer (7.0.8.1)
- actionpack (= 7.0.8.1)
- actionview (= 7.0.8.1)
- activejob (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ actionmailer (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ actionview (= 7.1.3.4)
+ activejob (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
mail (~> 2.5, >= 2.5.4)
net-imap
net-pop
net-smtp
- rails-dom-testing (~> 2.0)
- actionpack (7.0.8.1)
- actionview (= 7.0.8.1)
- activesupport (= 7.0.8.1)
- rack (~> 2.0, >= 2.2.4)
+ rails-dom-testing (~> 2.2)
+ actionpack (7.1.3.4)
+ actionview (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
+ nokogiri (>= 1.8.5)
+ racc
+ rack (>= 2.2.4)
+ rack-session (>= 1.0.1)
rack-test (>= 0.6.3)
- rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.0, >= 1.2.0)
- actiontext (7.0.8.1)
- actionpack (= 7.0.8.1)
- activerecord (= 7.0.8.1)
- activestorage (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ rails-dom-testing (~> 2.2)
+ rails-html-sanitizer (~> 1.6)
+ actiontext (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ activerecord (= 7.1.3.4)
+ activestorage (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
globalid (>= 0.6.0)
nokogiri (>= 1.8.5)
- actionview (7.0.8.1)
- activesupport (= 7.0.8.1)
+ actionview (7.1.3.4)
+ activesupport (= 7.1.3.4)
builder (~> 3.1)
- erubi (~> 1.4)
- rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.1, >= 1.2.0)
- activejob (7.0.8.1)
- activesupport (= 7.0.8.1)
+ erubi (~> 1.11)
+ rails-dom-testing (~> 2.2)
+ rails-html-sanitizer (~> 1.6)
+ activejob (7.1.3.4)
+ activesupport (= 7.1.3.4)
globalid (>= 0.3.6)
- activemodel (7.0.8.1)
- activesupport (= 7.0.8.1)
- activerecord (7.0.8.1)
- activemodel (= 7.0.8.1)
- activesupport (= 7.0.8.1)
- activestorage (7.0.8.1)
- actionpack (= 7.0.8.1)
- activejob (= 7.0.8.1)
- activerecord (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ activemodel (7.1.3.4)
+ activesupport (= 7.1.3.4)
+ activerecord (7.1.3.4)
+ activemodel (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
+ timeout (>= 0.4.0)
+ activestorage (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ activejob (= 7.1.3.4)
+ activerecord (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
marcel (~> 1.0)
- mini_mime (>= 1.1.0)
- activesupport (7.0.8.1)
+ activesupport (7.1.3.4)
+ base64
+ bigdecimal
concurrent-ruby (~> 1.0, >= 1.0.2)
+ connection_pool (>= 2.2.5)
+ drb
i18n (>= 1.6, < 2)
minitest (>= 5.1)
+ mutex_m
tzinfo (~> 2.0)
acts_as_api (1.0.1)
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
- addressable (2.8.6)
- public_suffix (>= 2.0.2, < 6.0)
+ addressable (2.8.7)
+ public_suffix (>= 2.0.2, < 7.0)
andand (1.3.3)
- arvados (2.7.0.rc2)
- activesupport (>= 3)
+ arvados (3.1.2)
+ activesupport (~> 7.1.3.4)
andand (~> 1.3, >= 1.3.3)
arvados-google-api-client (>= 0.8.7.5, < 0.8.8)
i18n
json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
- arvados-google-api-client (0.8.7.6)
+ arvados-google-api-client (0.8.7.9)
activesupport (>= 3.2, < 8.0)
addressable (~> 2.3)
autoparse (~> 0.3)
@@ -95,9 +97,11 @@ GEM
faraday (~> 2.8.0)
faraday-gzip (~> 2.0)
faraday-multipart (~> 1.0)
- googleauth (~> 1.0)
+ google-cloud-env (~> 2.1.0)
+ googleauth (~> 1.1.0)
launchy (~> 2.4)
multi_json (~> 1.10)
+ public_suffix (~> 5.0)
retriable (~> 1.4)
signet (~> 0.16.0)
autoparse (0.3.3)
@@ -105,18 +109,21 @@ GEM
extlib (>= 0.9.15)
multi_json (>= 1.0.0)
base64 (0.2.0)
- builder (3.2.4)
+ bigdecimal (3.1.9)
+ builder (3.3.0)
byebug (11.1.3)
- concurrent-ruby (1.2.3)
+ concurrent-ruby (1.3.4)
+ connection_pool (2.5.0)
crass (1.0.6)
- date (3.3.4)
- docile (1.4.0)
- erubi (1.12.0)
+ date (3.4.1)
+ docile (1.4.1)
+ drb (2.2.1)
+ erubi (1.13.1)
extlib (0.9.16)
- factory_bot (6.2.1)
+ factory_bot (6.4.5)
activesupport (>= 5.0.0)
- factory_bot_rails (6.2.0)
- factory_bot (~> 6.2.0)
+ factory_bot_rails (6.4.3)
+ factory_bot (~> 6.4)
railties (>= 5.0.0)
faraday (2.8.1)
base64
@@ -125,42 +132,47 @@ GEM
faraday-gzip (2.0.1)
faraday (>= 1.0)
zlib (~> 3.0)
- faraday-multipart (1.0.4)
- multipart-post (~> 2)
+ faraday-multipart (1.1.0)
+ multipart-post (~> 2.0)
faraday-net_http (3.0.2)
ffi (1.15.5)
globalid (1.2.1)
activesupport (>= 6.1)
google-cloud-env (2.1.1)
faraday (>= 1.0, < 3.a)
- googleauth (1.9.2)
- faraday (>= 1.0, < 3.a)
- google-cloud-env (~> 2.1)
+ googleauth (1.1.3)
+ faraday (>= 0.17.3, < 3.a)
jwt (>= 1.4, < 3.0)
+ memoist (~> 0.16)
multi_json (~> 1.11)
os (>= 0.9, < 2.0)
signet (>= 0.16, < 2.a)
httpclient (2.8.3)
- i18n (1.14.4)
+ i18n (1.14.6)
concurrent-ruby (~> 1.0)
+ io-console (0.8.0)
+ irb (1.15.1)
+ pp (>= 0.6.0)
+ rdoc (>= 4.0.0)
+ reline (>= 0.4.2)
jquery-rails (4.6.0)
rails-dom-testing (>= 1, < 3)
railties (>= 4.2.0)
thor (>= 0.14, < 2.0)
- json (2.6.3)
+ json (2.10.2)
jwt (1.5.6)
launchy (2.5.2)
addressable (~> 2.8)
- listen (3.8.0)
+ listen (3.9.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
- lograge (0.13.0)
+ lograge (0.14.0)
actionpack (>= 4)
activesupport (>= 4)
railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.22.0)
+ loofah (2.23.1)
crass (~> 1.0.2)
nokogiri (>= 1.12.0)
mail (2.8.1)
@@ -169,54 +181,70 @@ GEM
net-pop
net-smtp
marcel (1.0.4)
- method_source (1.0.0)
+ memoist (0.16.2)
mini_mime (1.1.5)
- mini_portile2 (2.8.5)
+ mini_portile2 (2.8.9)
minitest (5.10.3)
- mocha (2.1.0)
+ mocha (2.7.1)
ruby2_keywords (>= 0.0.5)
multi_json (1.15.0)
- multipart-post (2.4.0)
- net-imap (0.3.7)
+ multipart-post (2.4.1)
+ mutex_m (0.3.0)
+ net-imap (0.3.9)
date
net-protocol
net-pop (0.1.2)
net-protocol
net-protocol (0.2.2)
timeout
- net-smtp (0.5.0)
+ net-smtp (0.5.1)
net-protocol
- nio4r (2.7.1)
- nokogiri (1.15.6)
+ nio4r (2.7.4)
+ nokogiri (1.15.7)
mini_portile2 (~> 2.8.2)
racc (~> 1.4)
- oj (3.16.1)
- optimist (3.1.0)
+ oj (3.16.9)
+ bigdecimal (>= 3.0)
+ ostruct (>= 0.2)
+ optimist (3.2.0)
os (1.1.4)
- passenger (6.0.18)
- rack
- rake (>= 0.8.1)
- pg (1.5.4)
- power_assert (2.0.3)
- public_suffix (5.0.4)
- racc (1.7.3)
- rack (2.2.9)
- rack-test (2.1.0)
+ ostruct (0.6.1)
+ passenger (6.0.26)
+ rack (>= 1.6.13)
+ rackup (>= 2.0.0)
+ rake (>= 12.3.3)
+ pg (1.5.9)
+ power_assert (2.0.5)
+ pp (0.6.2)
+ prettyprint
+ prettyprint (0.2.0)
+ psych (5.2.3)
+ date
+ stringio
+ public_suffix (5.1.1)
+ racc (1.8.1)
+ rack (3.1.16)
+ rack-session (2.1.0)
+ base64 (>= 0.1.0)
+ rack (>= 3.0.0)
+ rack-test (2.2.0)
rack (>= 1.3)
- rails (7.0.8.1)
- actioncable (= 7.0.8.1)
- actionmailbox (= 7.0.8.1)
- actionmailer (= 7.0.8.1)
- actionpack (= 7.0.8.1)
- actiontext (= 7.0.8.1)
- actionview (= 7.0.8.1)
- activejob (= 7.0.8.1)
- activemodel (= 7.0.8.1)
- activerecord (= 7.0.8.1)
- activestorage (= 7.0.8.1)
- activesupport (= 7.0.8.1)
+ rackup (2.2.1)
+ rack (>= 3)
+ rails (7.1.3.4)
+ actioncable (= 7.1.3.4)
+ actionmailbox (= 7.1.3.4)
+ actionmailer (= 7.1.3.4)
+ actionpack (= 7.1.3.4)
+ actiontext (= 7.1.3.4)
+ actionview (= 7.1.3.4)
+ activejob (= 7.1.3.4)
+ activemodel (= 7.1.3.4)
+ activerecord (= 7.1.3.4)
+ activestorage (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
bundler (>= 1.15.0)
- railties (= 7.0.8.1)
+ railties (= 7.1.3.4)
rails-controller-testing (1.0.5)
actionpack (>= 5.0.1.rc1)
actionview (>= 5.0.1.rc1)
@@ -225,31 +253,37 @@ GEM
activesupport (>= 5.0.0)
minitest
nokogiri (>= 1.6)
- rails-html-sanitizer (1.6.0)
+ rails-html-sanitizer (1.6.2)
loofah (~> 2.21)
- nokogiri (~> 1.14)
+ nokogiri (>= 1.15.7, != 1.16.7, != 1.16.6, != 1.16.5, != 1.16.4, != 1.16.3, != 1.16.2, != 1.16.1, != 1.16.0.rc1, != 1.16.0)
rails-observers (0.1.5)
activemodel (>= 4.0)
rails-perftest (0.0.7)
- railties (7.0.8.1)
- actionpack (= 7.0.8.1)
- activesupport (= 7.0.8.1)
- method_source
+ railties (7.1.3.4)
+ actionpack (= 7.1.3.4)
+ activesupport (= 7.1.3.4)
+ irb
+ rackup (>= 1.0.0)
rake (>= 12.2)
- thor (~> 1.0)
- zeitwerk (~> 2.5)
+ thor (~> 1.0, >= 1.2.2)
+ zeitwerk (~> 2.6)
rake (13.2.1)
rb-fsevent (0.11.2)
- rb-inotify (0.10.1)
+ rb-inotify (0.11.1)
ffi (~> 1.0)
- request_store (1.5.1)
+ rdoc (6.12.0)
+ psych (>= 4.0.0)
+ reline (0.6.0)
+ io-console (~> 0.5)
+ request_store (1.7.0)
rack (>= 1.4)
- responders (3.1.0)
+ responders (3.1.1)
actionpack (>= 5.2)
railties (>= 5.2)
retriable (1.4.1)
ruby-prof (1.6.3)
ruby2_keywords (0.0.5)
+ securerandom (0.3.2)
signet (0.16.1)
addressable (~> 2.8)
faraday (>= 0.17.5, < 3.0)
@@ -259,29 +293,31 @@ GEM
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
- simplecov-html (0.12.3)
- simplecov-rcov (0.3.1)
+ simplecov-html (0.13.1)
+ simplecov-rcov (0.3.7)
simplecov (>= 0.4.1)
simplecov_json_formatter (0.1.4)
sprockets (4.2.1)
concurrent-ruby (~> 1.0)
rack (>= 2.2.4, < 4)
- sprockets-rails (3.4.2)
- actionpack (>= 5.2)
- activesupport (>= 5.2)
+ sprockets-rails (3.5.2)
+ actionpack (>= 6.1)
+ activesupport (>= 6.1)
sprockets (>= 3.0.0)
- test-unit (3.6.1)
+ stringio (3.1.5)
+ test-unit (3.6.7)
power_assert
- thor (1.3.1)
- timeout (0.4.1)
+ thor (1.3.2)
+ timeout (0.4.3)
tzinfo (2.0.6)
concurrent-ruby (~> 1.0)
- webrick (1.8.1)
- websocket-driver (0.7.6)
+ webrick (1.9.1)
+ websocket-driver (0.7.7)
+ base64
websocket-extensions (>= 0.1.0)
websocket-extensions (0.1.5)
- zeitwerk (2.6.13)
- zlib (3.1.0)
+ zeitwerk (2.6.18)
+ zlib (3.2.1)
PLATFORMS
ruby
@@ -289,8 +325,9 @@ PLATFORMS
DEPENDENCIES
acts_as_api
andand
- arvados (~> 2.7.0.rc1)
+ arvados (~> 3.1.2)
byebug
+ factory_bot (= 6.4.5)
factory_bot_rails
httpclient
i18n
@@ -302,22 +339,24 @@ DEPENDENCIES
minitest (= 5.10.3)
mocha
multi_json
+ net-imap (~> 0.3.8)
+ nokogiri (~> 1.15.7)
oj
optimist
- passenger
+ passenger (~> 6.0.26)
pg (~> 1.0)
- rails (~> 7.0.0)
+ rails (~> 7.1.3.4)
rails-controller-testing
rails-observers
rails-perftest
responders
- ruby-prof
+ ruby-prof (~> 1.6.3)
+ securerandom (~> 0.3.2)
simplecov
simplecov-rcov
sprockets-rails
test-unit
- themes_for_rails!
webrick
BUNDLED WITH
- 2.4.19
+ 2.4.22
diff --git a/services/api/Passengerfile.json b/services/api/Passengerfile.json
new file mode 100644
index 0000000000..1039e01907
--- /dev/null
+++ b/services/api/Passengerfile.json
@@ -0,0 +1,7 @@
+{
+ "auto": true,
+ "envvars": {
+ "RUBYOPT": "--disable-did_you_mean --disable-error_highlight --disable-syntax_suggest"
+ },
+ "preload_bundler": true
+}
diff --git a/services/api/Rakefile b/services/api/Rakefile
index 70ceb653e6..37592790db 100644
--- a/services/api/Rakefile
+++ b/services/api/Rakefile
@@ -33,7 +33,7 @@ namespace :test do
end
namespace :db do
- namespace :structure do
+ namespace :schema do
task :dump do
require 'tempfile'
origfnm = File.expand_path('../db/structure.sql', __FILE__)
@@ -41,7 +41,7 @@ namespace :db do
copyright_done = false
started = false
begin
- tmpfile = File.new tmpfnm, 'w'
+ tmpfile = File.new tmpfnm, 'w+'
origfile = File.new origfnm
origfile.each_line do |line|
if !copyright_done
@@ -69,6 +69,16 @@ namespace :db do
tmpfile.write line
end
origfile.close
+
+ # Remove trailing blank lines by stripping all trailing \n and
+ # then adding one back.
+ tmpfile.seek(-1, :END)
+ while tmpfile.read == "\n"
+ tmpfile.truncate(tmpfile.tell - 1)
+ tmpfile.seek(-1, :END)
+ end
+ tmpfile.write "\n"
+
tmpfile.close
File.rename tmpfnm, origfnm
tmpfnm = false
diff --git a/services/api/app/assets/stylesheets/api_client_authorizations.css.scss b/services/api/app/assets/stylesheets/api_client_authorizations.css.scss
deleted file mode 100644
index ec87eb255f..0000000000
--- a/services/api/app/assets/stylesheets/api_client_authorizations.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the ApiClientAuthorizations controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/api_clients.css.scss b/services/api/app/assets/stylesheets/api_clients.css.scss
deleted file mode 100644
index 61d7e53aa6..0000000000
--- a/services/api/app/assets/stylesheets/api_clients.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the ApiClients controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/authorized_keys.css.scss b/services/api/app/assets/stylesheets/authorized_keys.css.scss
deleted file mode 100644
index 9eeaa89f3f..0000000000
--- a/services/api/app/assets/stylesheets/authorized_keys.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the AuthorizedKeys controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/collections.css.scss b/services/api/app/assets/stylesheets/collections.css.scss
deleted file mode 100644
index 7510f173b9..0000000000
--- a/services/api/app/assets/stylesheets/collections.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Collections controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commit_ancestors.css.scss b/services/api/app/assets/stylesheets/commit_ancestors.css.scss
deleted file mode 100644
index 5004f86911..0000000000
--- a/services/api/app/assets/stylesheets/commit_ancestors.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the commit_ancestors controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commits.css.scss b/services/api/app/assets/stylesheets/commits.css.scss
deleted file mode 100644
index 6b4df4d74f..0000000000
--- a/services/api/app/assets/stylesheets/commits.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the commits controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/groups.css.scss b/services/api/app/assets/stylesheets/groups.css.scss
deleted file mode 100644
index 905e72add9..0000000000
--- a/services/api/app/assets/stylesheets/groups.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Groups controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/humans.css.scss b/services/api/app/assets/stylesheets/humans.css.scss
deleted file mode 100644
index 29668c2737..0000000000
--- a/services/api/app/assets/stylesheets/humans.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Humans controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/job_tasks.css.scss b/services/api/app/assets/stylesheets/job_tasks.css.scss
deleted file mode 100644
index 0d4d2607bb..0000000000
--- a/services/api/app/assets/stylesheets/job_tasks.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the JobTasks controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/jobs.css.scss b/services/api/app/assets/stylesheets/jobs.css.scss
deleted file mode 100644
index 53b6ca7fbe..0000000000
--- a/services/api/app/assets/stylesheets/jobs.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Jobs controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/keep_disks.css.scss b/services/api/app/assets/stylesheets/keep_disks.css.scss
deleted file mode 100644
index 1996f11635..0000000000
--- a/services/api/app/assets/stylesheets/keep_disks.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the KeepDisks controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/links.css.scss b/services/api/app/assets/stylesheets/links.css.scss
deleted file mode 100644
index c2e90adf09..0000000000
--- a/services/api/app/assets/stylesheets/links.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the links controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/logs.css.scss b/services/api/app/assets/stylesheets/logs.css.scss
deleted file mode 100644
index c8b22f9f5f..0000000000
--- a/services/api/app/assets/stylesheets/logs.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Logs controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/nodes.css b/services/api/app/assets/stylesheets/nodes.css
deleted file mode 100644
index d1ce011576..0000000000
--- a/services/api/app/assets/stylesheets/nodes.css
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 */
-
-/*
- Place all the styles related to the matching controller here.
- They will automatically be included in application.css.
-*/
-.node-status {
- /* unknown status - might be bad */
- background: #ff8888;
-}
-.node-status-running .node-status {
- background: #88ff88;
-}
-.node-status-missing .node-status {
- background: #ff8888;
-}
-.node-status-terminated .node-status {
- background: #ffffff;
-}
-
-.node-slurm-state {
- /* unknown status - might be bad */
- background: #ff8888;
-}
-.node-status-missing .node-slurm-state {
- background: #ffffff;
-}
-.node-status-terminated .node-slurm-state {
- background: #ffffff;
-}
-.node-status-running .node-slurm-state-alloc {
- background: #88ff88;
-}
-.node-status-running .node-slurm-state-idle {
- background: #ffbbbb;
-}
-.node-status-running .node-slurm-state-down {
- background: #ff8888;
-}
diff --git a/services/api/app/assets/stylesheets/nodes.css.scss b/services/api/app/assets/stylesheets/nodes.css.scss
deleted file mode 100644
index a7b08612d7..0000000000
--- a/services/api/app/assets/stylesheets/nodes.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Nodes controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_instances.css.scss b/services/api/app/assets/stylesheets/pipeline_instances.css.scss
deleted file mode 100644
index 7292a9aa08..0000000000
--- a/services/api/app/assets/stylesheets/pipeline_instances.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the PipelineInstances controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_templates.css.scss b/services/api/app/assets/stylesheets/pipeline_templates.css.scss
deleted file mode 100644
index 40c0cefbea..0000000000
--- a/services/api/app/assets/stylesheets/pipeline_templates.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the PipelineTemplates controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/repositories.css.scss b/services/api/app/assets/stylesheets/repositories.css.scss
deleted file mode 100644
index 1dd9a16603..0000000000
--- a/services/api/app/assets/stylesheets/repositories.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Repositories controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/specimens.css.scss b/services/api/app/assets/stylesheets/specimens.css.scss
deleted file mode 100644
index 60d630c8ab..0000000000
--- a/services/api/app/assets/stylesheets/specimens.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Specimens controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/traits.css.scss b/services/api/app/assets/stylesheets/traits.css.scss
deleted file mode 100644
index 7d2f7133e1..0000000000
--- a/services/api/app/assets/stylesheets/traits.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the Traits controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/virtual_machines.css.scss b/services/api/app/assets/stylesheets/virtual_machines.css.scss
deleted file mode 100644
index 4a94d45111..0000000000
--- a/services/api/app/assets/stylesheets/virtual_machines.css.scss
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Place all the styles related to the VirtualMachines controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index b1e2a4008f..a9afcb8d18 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -23,7 +23,6 @@ end
require 'load_param'
class ApplicationController < ActionController::Base
- include ThemesForRails::ActionController
include CurrentApiClient
include LoadParam
include DbCurrentTime
@@ -167,7 +166,17 @@ class ApplicationController < ActionController::Base
else
errors = [e.inspect]
end
- status = e.respond_to?(:http_status) ? e.http_status : 422
+
+ case e
+ when ActiveRecord::Deadlocked,
+ ActiveRecord::ConnectionNotEstablished,
+ ActiveRecord::LockWaitTimeout,
+ ActiveRecord::QueryAborted
+ status = 500
+ else
+ status = e.respond_to?(:http_status) ? e.http_status : 422
+ end
+
send_error(*errors, status: status)
end
@@ -220,8 +229,8 @@ class ApplicationController < ActionController::Base
def find_objects_for_index
@objects ||= model_class.readable_by(*@read_users, {
- :include_trash => (bool_param(:include_trash) || 'untrash' == action_name),
- :include_old_versions => bool_param(:include_old_versions)
+ :include_trash => ((self.class._index_requires_parameters[:include_trash] && bool_param(:include_trash)) || 'untrash' == action_name),
+ :include_old_versions => self.class._index_requires_parameters[:include_old_versions] && bool_param(:include_old_versions)
})
apply_where_limit_order_params
end
@@ -262,7 +271,7 @@ class ApplicationController < ActionController::Base
value.length == 2 and
value[0] == 'contains' then
ilikes = []
- model_class.searchable_columns('ilike').each do |column|
+ model_class.any_searchable_columns('ilike').each do |column|
# Including owner_uuid in an "any column" search will
# probably just return a lot of false positives.
next if column == 'owner_uuid'
@@ -272,6 +281,13 @@ class ApplicationController < ActionController::Base
if ilikes.any?
conditions[0] << ' and (' + ilikes.join(' or ') + ')'
end
+ else
+ equals = []
+ model_class.any_searchable_columns('=').each do |column|
+ equals << "#{ar_table_name}.#{column} = ?"
+ conditions << value
+ end
+ conditions[0] << ' and (' + equals.join(' or ') + ')'
end
elsif attr.to_s.match(/^[a-z][_a-z0-9]+$/) and
model_class.columns.collect(&:name).index(attr.to_s)
@@ -341,10 +357,14 @@ class ApplicationController < ActionController::Base
limit_columns &= model_class.columns_for_attributes(select_for_klass @select, model_class) if @select
return if limit_columns.empty?
model_class.transaction do
+ # This query does not use `pg_column_size()` because the returned value
+ # can be smaller than the apparent length thanks to compression.
+ # `octet_length(::text)` better reflects how expensive it will be for
+ # Rails to process the data.
limit_query = @objects.
except(:select, :distinct).
select("(%s) as read_length" %
- limit_columns.map { |s| "octet_length(#{model_class.table_name}.#{s})" }.join(" + "))
+ limit_columns.map { |s| "coalesce(octet_length(#{model_class.table_name}.#{s}::text),0)" }.join(" + "))
new_limit = 0
read_total = 0
limit_query.each do |record|
@@ -408,7 +428,8 @@ class ApplicationController < ActionController::Base
@read_auths += ApiClientAuthorization
.includes(:user)
.where('api_token IN (?) AND
- (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)',
+ (least(expires_at, refreshes_at) IS NULL
+ OR least(expires_at, refreshes_at) > CURRENT_TIMESTAMP)',
secrets)
.to_a
end
@@ -494,7 +515,11 @@ class ApplicationController < ActionController::Base
if params[:id] and params[:id].match(/\D/)
params[:uuid] = params.delete :id
end
- @where = { uuid: params[:uuid] }
+ @where = {}
+ # Some APIs (at least groups/contents) take an optional uuid argument.
+ # They go through this method to handle it when present but we cannot
+ # assume it is always set.
+ @where[:uuid] = params[:uuid] if params[:uuid]
@offset = 0
@limit = 1
@orders = []
@@ -662,19 +687,19 @@ class ApplicationController < ActionController::Base
{
select: {
type: 'array',
- description: "Attributes of the new object to return in the response.",
+ description: "An array of names of attributes to return in the response.",
required: false,
},
ensure_unique_name: {
type: "boolean",
- description: "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ description: "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.",
location: "query",
required: false,
default: false
},
cluster_id: {
type: 'string',
- description: "Create object on a remote federated cluster instead of the current one.",
+ description: "Cluster ID of a federated cluster where this object should be created.",
location: "query",
required: false,
},
@@ -685,7 +710,7 @@ class ApplicationController < ActionController::Base
{
select: {
type: 'array',
- description: "Attributes of the updated object to return in the response.",
+ description: "An array of names of attributes to return in the response.",
required: false,
},
}
@@ -695,7 +720,7 @@ class ApplicationController < ActionController::Base
{
select: {
type: 'array',
- description: "Attributes of the object to return in the response.",
+ description: "An array of names of attributes to return in the response.",
required: false,
},
}
@@ -703,28 +728,94 @@ class ApplicationController < ActionController::Base
def self._index_requires_parameters
{
- filters: { type: 'array', required: false },
- where: { type: 'object', required: false },
- order: { type: 'array', required: false },
+ filters: {
+ type: 'array',
+ required: false,
+ description: "Filters to limit which objects are returned by their attributes.
+Refer to the [filters reference][] for more information about how to write filters.
+
+[filters reference]: https://doc.arvados.org/api/methods.html#filters
+",
+ },
+ where: {
+ type: 'object',
+ required: false,
+ description: "An object to limit which objects are returned by their attributes.
+The keys of this object are attribute names.
+Each value is either a single matching value or an array of matching values for that attribute.
+The `filters` parameter is more flexible and preferred.
+",
+ },
+ order: {
+ type: 'array',
+ required: false,
+ description: "An array of strings to set the order in which matching objects are returned.
+Each string has the format ` `.
+`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.
+",
+ },
select: {
type: 'array',
- description: "Attributes of each object to return in the response.",
+ description: "An array of names of attributes to return from each matching object.",
+ required: false,
+ },
+ distinct: {
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "If this is true, and multiple objects have the same values
+for the attributes that you specify in the `select` parameter, then each unique
+set of values will only be returned once in the result set.
+",
+ },
+ limit: {
+ type: 'integer',
+ required: false,
+ default: DEFAULT_LIMIT,
+ description: "The maximum number of objects to return in the result.
+Note that the API may return fewer results than this if your request hits other
+limits set by the administrator.
+",
+ },
+ offset: {
+ type: 'integer',
+ required: false,
+ default: 0,
+ description: "Return matching objects starting from this index.
+Note that result indexes may change if objects are modified in between a series
+of list calls.
+",
+ },
+ count: {
+ type: 'string',
required: false,
+ default: 'exact',
+ description: "A string to determine result counting behavior. Supported values are:
+
+ * `\"exact\"`: The response will include an `items_available` field that
+ counts the number of objects that matched this search criteria,
+ including ones not included in `items`.
+
+ * `\"none\"`: The response will not include an `items_avaliable`
+ field. This improves performance by returning a result as soon as enough
+ `items` have been loaded for this result.
+
+",
},
- distinct: { type: 'boolean', required: false, default: false },
- limit: { type: 'integer', required: false, default: DEFAULT_LIMIT },
- offset: { type: 'integer', required: false, default: 0 },
- count: { type: 'string', required: false, default: 'exact' },
cluster_id: {
type: 'string',
- description: "List objects on a remote federated cluster instead of the current one.",
+ description: "Cluster ID of a federated cluster to return objects from",
location: "query",
required: false,
},
bypass_federation: {
type: 'boolean',
required: false,
- description: 'bypass federation behavior, list items from local instance database only'
+ default: false,
+ description: "If true, do not return results from other clusters in the
+federation, only the cluster that received the request.
+You must be an administrator to use this flag.
+",
}
}
end
diff --git a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
index da7e11cd9f..9822861dce 100644
--- a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
+++ b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
@@ -6,22 +6,36 @@ require 'safe_json'
class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
accept_attribute_as_json :scopes, Array
- before_action :current_api_client_is_trusted, :except => [:current]
+ before_action :check_issue_trusted_tokens, :except => [:current]
before_action :admin_required, :only => :create_system_auth
skip_before_action :render_404_if_no_object, :only => [:create_system_auth, :current]
skip_before_action :find_object_by_uuid, :only => [:create_system_auth, :current]
+ def self._create_system_auth_method_description
+ "Create a token for the system (\"root\") user."
+ end
+
def self._create_system_auth_requires_parameters
{
- api_client_id: {type: 'integer', required: false},
- scopes: {type: 'array', required: false}
+ scopes: {
+ type: 'array',
+ required: false,
+ default: ["all"],
+ description: "An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.
+
+[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes
+",
+ }
}
end
+ def self._current_method_description
+ "Return all metadata for the token used to authorize this request."
+ end
+
def create_system_auth
@object = ApiClientAuthorization.
new(user_id: system_user.id,
- api_client_id: params[:api_client_id] || current_api_client.andand.id,
created_by_ip_address: remote_ip,
scopes: SafeJSON.load(params[:scopes] || '["all"]'))
@object.save!
@@ -41,10 +55,9 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
# translate UUID to numeric ID here.
resource_attrs[:user_id] =
User.where(uuid: resource_attrs.delete(:owner_uuid)).first.andand.id
- elsif not resource_attrs[:user_id]
+ else
resource_attrs[:user_id] = current_user.id
end
- resource_attrs[:api_client_id] = Thread.current[:api_client].id
super
end
@@ -81,7 +94,6 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
wanted_scopes << @where['scopes']
@where.select! { |attr, val|
# "where":{"uuid":"zzzzz-zzzzz-zzzzzzzzzzzzzzz"} is OK but
- # "where":{"api_client_id":1} is not supported
# "where":{"uuid":["contains","-"]} is not supported
# "where":{"uuid":["uuid1","uuid2","uuid3"]} is not supported
val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
@@ -131,7 +143,7 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
def find_object_by_uuid(with_lock: false)
uuid_param = params[:uuid] || params[:id]
if (uuid_param != current_api_client_authorization.andand.uuid &&
- !Thread.current[:api_client].andand.is_trusted)
+ !Rails.configuration.Login.IssueTrustedTokens)
return forbidden
end
@limit = 1
@@ -147,34 +159,13 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
@object = query.first
end
- def current_api_client_is_trusted
- if Thread.current[:api_client].andand.is_trusted
- return true
- end
- # A non-trusted client can do a search for its own token if it
- # explicitly restricts the search to its own UUID or api_token.
- # Any other kind of query must return 403, even if it matches only
- # the current token, because that's currently how Workbench knows
- # (after searching on scopes) the difference between "the token
- # I'm using now *is* the only sharing token for this collection"
- # (403) and "my token is trusted, and there is one sharing token
- # for this collection" (200).
- #
- # The @filters test here also prevents a non-trusted token from
- # filtering on its own scopes, and discovering whether any _other_
- # equally scoped tokens exist (403=yes, 200=no).
- return forbidden if !@objects
- full_set = @objects.except(:limit).except(:offset) if @objects
- if (full_set.count == 1 and
- full_set.first.uuid == current_api_client_authorization.andand.uuid and
- (@filters.map(&:first) & %w(uuid api_token)).any?)
- return true
- end
- forbidden
+ def check_issue_trusted_tokens
+ return true if current_api_client_authorization.andand.api_token == Rails.configuration.SystemRootToken
+ return forbidden if !Rails.configuration.Login.IssueTrustedTokens
end
def forbidden
- send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
+ send_error('Action prohibited by IssueTrustedTokens configuration.',
status: 403)
end
end
diff --git a/services/api/app/controllers/arvados/v1/api_clients_controller.rb b/services/api/app/controllers/arvados/v1/api_clients_controller.rb
deleted file mode 100644
index b459c51915..0000000000
--- a/services/api/app/controllers/arvados/v1/api_clients_controller.rb
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::ApiClientsController < ApplicationController
- before_action :admin_required
-end
diff --git a/services/api/app/controllers/arvados/v1/collections_controller.rb b/services/api/app/controllers/arvados/v1/collections_controller.rb
index ad1771a87e..1c9fdce29a 100644
--- a/services/api/app/controllers/arvados/v1/collections_controller.rb
+++ b/services/api/app/controllers/arvados/v1/collections_controller.rb
@@ -13,10 +13,16 @@ class Arvados::V1::CollectionsController < ApplicationController
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, default: false, description: "Include collections whose is_trashed attribute is true.",
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "Include collections whose `is_trashed` attribute is true.",
},
include_old_versions: {
- type: 'boolean', required: false, default: false, description: "Include past collection versions.",
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "Include past collection versions.",
},
})
end
@@ -25,10 +31,10 @@ class Arvados::V1::CollectionsController < ApplicationController
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, default: false, description: "Show collection even if its is_trashed attribute is true.",
- },
- include_old_versions: {
- type: 'boolean', required: false, default: true, description: "Include past collection versions.",
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "Show collection even if its `is_trashed` attribute is true.",
},
})
end
@@ -61,6 +67,9 @@ class Arvados::V1::CollectionsController < ApplicationController
end
def find_object_by_uuid(with_lock: false)
+ # We are always willing to return an old version by UUID.
+ # We set the parameter so it gets used correctly by super methods.
+ params[:include_old_versions] = true
if loc = Keep::Locator.parse(params[:id])
loc.strip_hints!
@@ -172,17 +181,7 @@ class Arvados::V1::CollectionsController < ApplicationController
end
if direction == :search_up
- # Search upstream for jobs where this locator is the output of some job
- if !Rails.configuration.API.DisabledAPIs["jobs.list"]
- Job.readable_by(*@read_users).where(output: loc.to_s).each do |job|
- search_edges(visited, job.uuid, :search_up)
- end
-
- Job.readable_by(*@read_users).where(log: loc.to_s).each do |job|
- search_edges(visited, job.uuid, :search_up)
- end
- end
-
+ # Search upstream for jobs where this locator is the output of some container
Container.readable_by(*@read_users).where(output: loc.to_s).pluck(:uuid).each do |c_uuid|
search_edges(visited, c_uuid, :search_up)
end
@@ -196,17 +195,7 @@ class Arvados::V1::CollectionsController < ApplicationController
return
end
- # Search downstream for jobs where this locator is in script_parameters
- if !Rails.configuration.API.DisabledAPIs["jobs.list"]
- Job.readable_by(*@read_users).where(["jobs.script_parameters like ?", "%#{loc.to_s}%"]).each do |job|
- search_edges(visited, job.uuid, :search_down)
- end
-
- Job.readable_by(*@read_users).where(["jobs.docker_image_locator = ?", "#{loc.to_s}"]).each do |job|
- search_edges(visited, job.uuid, :search_down)
- end
- end
-
+ # Search downstream for jobs where this locator is in mounts
Container.readable_by(*@read_users).where([Container.full_text_trgm + " like ?", "%#{loc.to_s}%"]).select("output, log, uuid").each do |c|
if c.output != loc.to_s && c.log != loc.to_s
search_edges(visited, c.uuid, :search_down)
@@ -216,21 +205,7 @@ class Arvados::V1::CollectionsController < ApplicationController
else
# uuid is a regular Arvados UUID
rsc = ArvadosModel::resource_class_for_uuid uuid
- if rsc == Job
- Job.readable_by(*@read_users).where(uuid: uuid).each do |job|
- visited[uuid] = job.as_api_response
- if direction == :search_up
- # Follow upstream collections referenced in the script parameters
- find_collections(visited, job) do |hash, col_uuid|
- search_edges(visited, hash, :search_up) if hash
- search_edges(visited, col_uuid, :search_up) if col_uuid
- end
- elsif direction == :search_down
- # Follow downstream job output
- search_edges(visited, job.output, direction)
- end
- end
- elsif rsc == Container
+ if rsc == Container
c = Container.readable_by(*@read_users).where(uuid: uuid).limit(1).first
if c
visited[uuid] = c.as_api_response
@@ -266,16 +241,6 @@ class Arvados::V1::CollectionsController < ApplicationController
if direction == :search_up
visited[c.uuid] = c.as_api_response
- if !Rails.configuration.API.DisabledAPIs["jobs.list"]
- Job.readable_by(*@read_users).where(output: c.portable_data_hash).each do |job|
- search_edges(visited, job.uuid, :search_up)
- end
-
- Job.readable_by(*@read_users).where(log: c.portable_data_hash).each do |job|
- search_edges(visited, job.uuid, :search_up)
- end
- end
-
ContainerRequest.readable_by(*@read_users).where(output_uuid: uuid).pluck(:uuid).each do |cr_uuid|
search_edges(visited, cr_uuid, :search_up)
end
@@ -313,6 +278,10 @@ class Arvados::V1::CollectionsController < ApplicationController
end
end
+ def self._provenance_method_description
+ "Detail the provenance of a given collection."
+ end
+
def provenance
visited = {}
if @object[:uuid]
@@ -323,6 +292,10 @@ class Arvados::V1::CollectionsController < ApplicationController
send_json visited
end
+ def self._used_by_method_description
+ "Detail where a given collection has been used."
+ end
+
def used_by
visited = {}
if @object[:uuid]
diff --git a/services/api/app/controllers/arvados/v1/computed_permissions_controller.rb b/services/api/app/controllers/arvados/v1/computed_permissions_controller.rb
new file mode 100644
index 0000000000..778967c832
--- /dev/null
+++ b/services/api/app/controllers/arvados/v1/computed_permissions_controller.rb
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::ComputedPermissionsController < ApplicationController
+ before_action :admin_required
+
+ def object_list(**args)
+ if !['none', '', nil].include?(params[:count])
+ raise ArgumentError.new("count parameter must be 'none'")
+ end
+ params[:count] = 'none'
+
+ if !['0', 0, nil].include?(params[:offset])
+ raise ArgumentError.new("non-zero offset parameter #{params[:offset].inspect} is not supported")
+ end
+
+ super
+ end
+
+ def limit_database_read(**args)
+ # This is counterproductive for this table, and the default
+ # implementation doesn't work because it relies on some
+ # real-model-like behavior that ComputedPermission does not offer.
+ end
+end
diff --git a/services/api/app/controllers/arvados/v1/container_requests_controller.rb b/services/api/app/controllers/arvados/v1/container_requests_controller.rb
index f99a0a55a9..8344c93954 100644
--- a/services/api/app/controllers/arvados/v1/container_requests_controller.rb
+++ b/services/api/app/controllers/arvados/v1/container_requests_controller.rb
@@ -35,7 +35,7 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
(super rescue {}).
merge({
uuid: {
- type: 'string', required: true, description: "The UUID of the ContainerRequest in question.",
+ type: 'string', required: true, description: "The UUID of the container request to query.",
},
})
end
@@ -43,6 +43,10 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
# This API is handled entirely by controller, so this method is
# never called -- it's only here for the sake of adding the API to
# the generated discovery document.
+ def self._container_status_method_description
+ "Return scheduling details for a container request."
+ end
+
def container_status
send_json({"errors" => "controller-only API, not handled by rails"}, status: 400)
end
diff --git a/services/api/app/controllers/arvados/v1/containers_controller.rb b/services/api/app/controllers/arvados/v1/containers_controller.rb
index 13aa478d26..558e49423e 100644
--- a/services/api/app/controllers/arvados/v1/containers_controller.rb
+++ b/services/api/app/controllers/arvados/v1/containers_controller.rb
@@ -15,6 +15,10 @@ class Arvados::V1::ContainersController < ApplicationController
skip_before_action :find_object_by_uuid, only: [:current]
skip_before_action :render_404_if_no_object, only: [:current]
+ def self._auth_method_description
+ "Get the API client authorization token associated with this container."
+ end
+
def auth
if @object.locked_by_uuid != Thread.current[:api_client_authorization].uuid
raise ArvadosModel::PermissionDeniedError.new("Not locked by your token")
@@ -65,21 +69,37 @@ class Arvados::V1::ContainersController < ApplicationController
end
end
+ def self._lock_method_description
+ "Lock a container (for a dispatcher to begin running it)."
+ end
+
def lock
@object.lock
show
end
+ def self._unlock_method_description
+ "Unlock a container (for a dispatcher to stop running it)."
+ end
+
def unlock
@object.unlock
show
end
+ def self._update_priority_method_description
+ "Recalculate and return the priority of a given container."
+ end
+
def update_priority
@object.update_priority!
show
end
+ def self._current_method_description
+ "Return the container record associated with the API token authorizing this request."
+ end
+
def current
if Thread.current[:api_client_authorization].nil?
send_error("Not logged in", status: 401)
@@ -93,6 +113,10 @@ class Arvados::V1::ContainersController < ApplicationController
end
end
+ def self._secret_mounts_method_description
+ "Return secret mount information for the container associated with the API token authorizing this request."
+ end
+
def secret_mounts
c = Container.for_current_token
if @object && c && @object.uuid == c.uuid
diff --git a/services/api/app/controllers/arvados/v1/credentials_controller.rb b/services/api/app/controllers/arvados/v1/credentials_controller.rb
new file mode 100644
index 0000000000..a3d2539d44
--- /dev/null
+++ b/services/api/app/controllers/arvados/v1/credentials_controller.rb
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::CredentialsController < ApplicationController
+
+ # "secret" is not returned in API calls, but we also want
+ # to disallow its use in queries in general.
+
+ def load_where_param
+ super
+ if @where[:secret]
+ raise ArvadosModel::PermissionDeniedError.new "Cannot use 'secret' in where clause"
+ end
+ end
+
+ def load_filters_param
+ super
+ @filters.map do |k|
+ if k[0] =~ /secret/
+ raise ArvadosModel::PermissionDeniedError.new "Cannot filter on 'secret'"
+ end
+ end
+ end
+
+ def load_limit_offset_order_params
+ super
+ @orders.each do |ord|
+ if ord =~ /secret/
+ raise ArvadosModel::PermissionDeniedError.new "Cannot order by 'secret'"
+ end
+ end
+ end
+
+ def self._secret_method_description
+ "Fetch the secret part of the credential (can only be invoked by running containers)."
+ end
+
+ def secret
+ # Should have previously determined the user can read the credential in @object
+ c = Container.for_current_token
+ if !@object || !c || c.state != Container::Running
+ send_error("Token is not associated with a running container.", status: 403)
+ return
+ end
+
+ if Time.now >= @object.expires_at
+ send_error("Credential has expired.", status: 403)
+ return
+ end
+
+ lg = Log.new(event_type: "secret_access")
+ lg.object_uuid = @object.uuid
+ lg.object_owner_uuid = @object.owner_uuid
+ lg.properties = {
+ "name": @object.name,
+ "credential_class": @object.credential_class,
+ "external_id": @object.external_id,
+ }
+ lg.save!
+ send_json({"external_id" => @object.external_id, "secret" => @object.secret})
+ end
+end
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
index c362cf32d7..aaefc5bfc4 100644
--- a/services/api/app/controllers/arvados/v1/groups_controller.rb
+++ b/services/api/app/controllers/arvados/v1/groups_controller.rb
@@ -7,16 +7,18 @@ require "trashable"
class Arvados::V1::GroupsController < ApplicationController
include TrashableController
+ before_action :load_include_param, only: [:shared, :contents]
skip_before_action :find_object_by_uuid, only: :shared
skip_before_action :render_404_if_no_object, only: :shared
- TRASHABLE_CLASSES = ['project']
-
def self._index_requires_parameters
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, default: false, description: "Include items whose is_trashed attribute is true.",
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "Include items whose `is_trashed` attribute is true.",
},
})
end
@@ -25,28 +27,57 @@ class Arvados::V1::GroupsController < ApplicationController
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, default: false, description: "Show group/project even if its is_trashed attribute is true.",
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "Return group/project even if its `is_trashed` attribute is true.",
},
})
end
def self._contents_requires_parameters
- params = _index_requires_parameters.
- merge({
- uuid: {
- type: 'string', required: false, default: '',
- },
- recursive: {
- type: 'boolean', required: false, default: false, description: 'Include contents from child groups recursively.',
- },
- include: {
- type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid).',
- },
- include_old_versions: {
- type: 'boolean', required: false, default: false, description: 'Include past collection versions.',
- }
- })
- params
+ _index_requires_parameters.merge(
+ {
+ uuid: {
+ type: 'string',
+ required: false,
+ default: '',
+ description: "If given, limit the listing to objects owned by the
+user or group with this UUID.",
+ },
+ recursive: {
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: 'If true, include contents from child groups recursively.',
+ },
+ include: {
+ type: 'array',
+ required: false,
+ description: "An array of referenced objects to include in the `included` field of the response. Supported values in the array are:
+
+ * `\"container_uuid\"`
+ * `\"owner_uuid\"`
+ * `\"collection_uuid\"`
+
+",
+ },
+ include_old_versions: {
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: 'If true, include past versions of collections in the listing.',
+ },
+ exclude_home_project: {
+ type: "boolean",
+ required: false,
+ default: false,
+ description: "If true, exclude contents of the user's home project from the listing.
+Calling this method with this flag set is how clients enumerate objects shared
+with the current user.",
+ },
+ }
+ )
end
def self._create_requires_parameters
@@ -57,7 +88,7 @@ class Arvados::V1::GroupsController < ApplicationController
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update',
+ description: 'If true, cluster permission will not be updated immediately, but instead at the next configured update interval.',
}
}
)
@@ -71,7 +102,7 @@ class Arvados::V1::GroupsController < ApplicationController
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update',
+ description: 'If true, cluster permission will not be updated immediately, but instead at the next configured update interval.',
}
}
)
@@ -100,15 +131,6 @@ class Arvados::V1::GroupsController < ApplicationController
end
end
- def destroy
- if !TRASHABLE_CLASSES.include?(@object.group_class)
- @object.destroy
- show
- else
- super # Calls destroy from TrashableController module
- end
- end
-
def render_404_if_no_object
if params[:action] == 'contents'
if !params[:uuid]
@@ -129,7 +151,12 @@ class Arvados::V1::GroupsController < ApplicationController
end
end
+ def self._contents_method_description
+ "List objects that belong to a group."
+ end
+
def contents
+ @orig_select = @select
load_searchable_objects
list = {
:kind => "arvados#objectList",
@@ -143,11 +170,22 @@ class Arvados::V1::GroupsController < ApplicationController
list[:items_available] = @items_available
end
if @extra_included
- list[:included] = @extra_included.as_api_response(nil, {select: @select})
+ if @orig_select.nil?
+ @orig_select = User.selectable_attributes.concat(
+ Group.selectable_attributes,
+ Container.selectable_attributes,
+ Collection.selectable_attributes - ["unsigned_manifest_text"])
+ end
+ @orig_select = @orig_select - ["manifest_text"]
+ list[:included] = @extra_included.as_api_response(nil, {select: @orig_select})
end
send_json(list)
end
+ def self._shared_method_description
+ "List groups that the current user can access via permission links."
+ end
+
def shared
# The purpose of this endpoint is to return the toplevel set of
# groups which are *not* reachable through a direct ownership
@@ -158,7 +196,6 @@ class Arvados::V1::GroupsController < ApplicationController
# This also returns (in the "included" field) the objects that own
# those projects (users or non-project groups).
#
- #
# The intended use of this endpoint is to support clients which
# wish to browse those projects which are visible to the user but
# are not part of the "home" project.
@@ -170,41 +207,75 @@ class Arvados::V1::GroupsController < ApplicationController
apply_where_limit_order_params
- if params["include"] == "owner_uuid"
+ if @include.include?("owner_uuid")
owners = @objects.map(&:owner_uuid).to_set
- @extra_included = []
+ @extra_included ||= []
[Group, User].each do |klass|
@extra_included += klass.readable_by(*@read_users).where(uuid: owners.to_a).to_a
end
end
+ if @include.include?("container_uuid")
+ @extra_included ||= []
+ container_uuids = @objects.map { |o|
+ o.respond_to?(:container_uuid) ? o.container_uuid : nil
+ }.compact.to_set.to_a
+ @extra_included += Container.where(uuid: container_uuids).to_a
+ end
+
+ if @include.include?("collection_uuid")
+ @extra_included ||= []
+ collection_uuids = @objects.map { |o|
+ o.respond_to?(:collection_uuid) ? o.collection_uuid : nil
+ }.compact.to_set.to_a
+ @extra_included += Collection.where(uuid: collection_uuids).to_a
+ end
+
index
end
def self._shared_requires_parameters
- rp = self._index_requires_parameters
- rp[:include] = { type: 'string', required: false }
- rp
+ self._index_requires_parameters.merge(
+ {
+ include: {
+ type: 'string',
+ required: false,
+ description: "A string naming referenced objects to include in the `included` field of the response. Supported values are:
+
+ * `\"owner_uuid\"`
+
+",
+ },
+ }
+ )
end
protected
+ def load_include_param
+ @include = params[:include]
+ if @include.nil? || @include == ""
+ @include = Set[]
+ elsif @include.is_a?(String) && @include.start_with?('[')
+ @include = SafeJSON.load(@include).to_set
+ elsif @include.is_a?(String)
+ @include = Set[@include]
+ else
+ return send_error("'include' parameter must be a string or array", status: 422)
+ end
+ end
+
def load_searchable_objects
all_objects = []
@items_available = 0
# Reload the orders param, this time without prefixing unqualified
# columns ("name" => "groups.name"). Here, unqualified orders
- # apply to each table being searched, not "groups".
+ # apply to each table being searched, not just "groups", as
+ # fill_table_names would assume. Instead, table names are added
+ # inside the klasses loop below (see request_order).
load_limit_offset_order_params(fill_table_names: false)
- if params['count'] == 'none' and @offset != 0 and (params['last_object_class'].nil? or params['last_object_class'].empty?)
- # can't use offset without getting counts, so
- # fall back to count=exact behavior.
- params['count'] = 'exact'
- set_count_none = true
- end
-
# Trick apply_where_limit_order_params into applying suitable
# per-table values. *_all are the real ones we'll apply to the
# aggregate set.
@@ -218,10 +289,7 @@ class Arvados::V1::GroupsController < ApplicationController
request_filters = @filters
- klasses = [Group,
- Job, PipelineInstance, PipelineTemplate, ContainerRequest, Workflow,
- Collection,
- Human, Specimen, Trait]
+ klasses = [Group, ContainerRequest, Workflow, Collection]
table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
@@ -230,8 +298,15 @@ class Arvados::V1::GroupsController < ApplicationController
klasses = avail_klasses.keys
request_filters.each do |col, op, val|
- if col.index('.') && !table_names.values.include?(col.split('.', 2)[0])
- raise ArgumentError.new("Invalid attribute '#{col}' in filter")
+ if col.index('.')
+ filter_table = col.split('.', 2)[0]
+ # singular "container" is valid as a special case for
+ # filtering container requests by their associated
+ # container_uuid, similarly singular "collection" for
+ # workflows.
+ if filter_table != "container" && filter_table != "collection" && !table_names.values.include?(filter_table)
+ raise ArgumentError.new("Invalid attribute '#{col}' in filter")
+ end
end
end
@@ -265,6 +340,12 @@ class Arvados::V1::GroupsController < ApplicationController
klasses.each do |klass|
all_attributes.concat klass.selectable_attributes
end
+ if klasses.include?(ContainerRequest) && @include.include?("container_uuid")
+ all_attributes.concat Container.selectable_attributes
+ end
+ if klasses.include?(Workflow) && @include.include?("collection_uuid")
+ all_attributes.concat Collection.selectable_attributes
+ end
@select.each do |check|
if !all_attributes.include? check
raise ArgumentError.new "Invalid attribute '#{check}' in select"
@@ -275,34 +356,34 @@ class Arvados::V1::GroupsController < ApplicationController
included_by_uuid = {}
- seen_last_class = false
error_by_class = {}
any_success = false
klasses.each do |klass|
- # check if current klass is same as params['last_object_class']
- seen_last_class = true if((params['count'].andand.==('none')) and
- (params['last_object_class'].nil? or
- params['last_object_class'].empty? or
- params['last_object_class'] == klass.to_s))
-
# if klasses are specified, skip all other klass types
next if wanted_klasses.any? and !wanted_klasses.include?(klass.to_s)
- # if specified, and count=none, then only look at the klass in
- # last_object_class.
- # for whatever reason, this parameter exists separately from 'wanted_klasses'
- next if params['count'] == 'none' and !seen_last_class
-
# don't process rest of object types if we already have needed number of objects
break if params['count'] == 'none' and all_objects.size >= limit_all
# If the currently requested orders specifically match the
# table_name for the current klass, apply that order.
# Otherwise, order by recency.
- request_order =
- request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i || r !~ /\./ } ||
- klass.default_orders.join(", ")
+ request_order = request_orders.andand.map do |r|
+ if r =~ /^#{klass.table_name}\./i
+ r
+ elsif r !~ /\./
+ # If the caller provided an unqualified column like
+ # "created_by desc", but we might be joining another table
+ # that also has that column, so we need to specify that we
+ # mean this table.
+ klass.table_name + '.' + r
+ else
+ # Only applies to a different table / object type.
+ nil
+ end
+ end.compact
+ request_order = optimize_orders(request_order, model_class: klass)
@select = select_for_klass any_selections, klass, false
@@ -322,8 +403,12 @@ class Arvados::V1::GroupsController < ApplicationController
@filters = request_filters.map do |col, op, val|
if !col.index('.')
[col, op, val]
- elsif (col = col.split('.', 2))[0] == klass.table_name
- [col[1], op, val]
+ elsif (colsp = col.split('.', 2))[0] == klass.table_name
+ [colsp[1], op, val]
+ elsif klass == ContainerRequest && colsp[0] == "container"
+ [col, op, val]
+ elsif klass == Workflow && colsp[0] == "collection"
+ [col, op, val]
else
nil
end
@@ -358,11 +443,36 @@ class Arvados::V1::GroupsController < ApplicationController
# This actually fetches the objects
klass_object_list = object_list(model_class: klass)
- # If count=none, :items_available will be nil, and offset is
- # required to be 0.
- klass_items_available = klass_object_list[:items_available] || 0
- @items_available += klass_items_available
- @offset = [@offset - klass_items_available, 0].max
+ # The appropriate @offset for querying the next table depends on
+ # how many matching rows in this table were skipped due to the
+ # current @offset. If we retrieved any items (or @offset is
+ # already zero), then clearly exactly @offset rows were skipped,
+ # and the correct @offset for the next table is zero.
+ # Otherwise, we need to count all matching rows in the current
+ # table, and subtract that from @offset. If our previous query
+ # used count=none, we will need an additional query to get that
+ # count.
+ if params['count'] == 'none' and @offset > 0 and klass_object_list[:items].length == 0
+ # Just get the count.
+ klass_object_list[:items_available] = @objects.
+ except(:limit).except(:offset).
+ count(@distinct ? :id : '*')
+ end
+
+ klass_items_available = klass_object_list[:items_available]
+ if klass_items_available.nil?
+ # items_available may be nil if count=none and a non-zero
+ # number of items were returned. One of these cases must be true:
+ #
+ # items returned >= limit, so we won't go to the next table, offset doesn't matter
+ # items returned < limit, so we want to start at the beginning of the next table, offset = 0
+ #
+ @offset = 0
+ else
+ # We have the exact count,
+ @items_available += klass_items_available
+ @offset = [@offset - klass_items_available, 0].max
+ end
# Add objects to the list of objects to be returned.
all_objects += klass_object_list[:items]
@@ -374,7 +484,7 @@ class Arvados::V1::GroupsController < ApplicationController
limit_all = all_objects.count
end
- if params["include"] == "owner_uuid"
+ if @include.include?("owner_uuid")
owners = klass_object_list[:items].map {|i| i[:owner_uuid]}.to_set
[Group, User].each do |ownerklass|
ownerklass.readable_by(*@read_users).where(uuid: owners.to_a).each do |ow|
@@ -382,6 +492,20 @@ class Arvados::V1::GroupsController < ApplicationController
end
end
end
+
+ if @include.include?("container_uuid") && klass == ContainerRequest
+ containers = klass_object_list[:items].collect { |cr| cr[:container_uuid] }.to_set
+ Container.where(uuid: containers.to_a).each do |c|
+ included_by_uuid[c.uuid] = c
+ end
+ end
+
+ if @include.include?("collection_uuid") && klass == Workflow
+ collections = klass_object_list[:items].collect { |wf| wf[:collection_uuid] }.to_set
+ Collection.where(uuid: collections.to_a).each do |c|
+ included_by_uuid[c.uuid] = c
+ end
+ end
end
# Only error out when every searchable object type errored out
@@ -392,14 +516,10 @@ class Arvados::V1::GroupsController < ApplicationController
raise ArgumentError.new(error_msg)
end
- if params["include"]
+ if !@include.empty?
@extra_included = included_by_uuid.values
end
- if set_count_none
- params['count'] = 'none'
- end
-
@objects = all_objects
@limit = limit_all
@offset = offset_all
@@ -423,5 +543,4 @@ class Arvados::V1::GroupsController < ApplicationController
"EXISTS(SELECT 1 FROM groups as gp where gp.uuid=#{klass.table_name}.owner_uuid and gp.group_class != 'project')",
user_uuid: current_user.uuid)
end
-
end
diff --git a/services/api/app/controllers/arvados/v1/job_tasks_controller.rb b/services/api/app/controllers/arvados/v1/job_tasks_controller.rb
deleted file mode 100644
index b960d2e9e4..0000000000
--- a/services/api/app/controllers/arvados/v1/job_tasks_controller.rb
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::JobTasksController < ApplicationController
- accept_attribute_as_json :parameters, Hash
-
- def create
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/jobs_controller.rb b/services/api/app/controllers/arvados/v1/jobs_controller.rb
deleted file mode 100644
index 2d6b05269d..0000000000
--- a/services/api/app/controllers/arvados/v1/jobs_controller.rb
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::JobsController < ApplicationController
- accept_attribute_as_json :components, Hash
- accept_attribute_as_json :script_parameters, Hash
- accept_attribute_as_json :runtime_constraints, Hash
- accept_attribute_as_json :tasks_summary, Hash
- skip_before_action :find_object_by_uuid, :only => [:queue, :queue_size]
- skip_before_action :render_404_if_no_object, :only => [:queue, :queue_size]
-
- include DbCurrentTime
-
- def create
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-
- def cancel
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-
- def lock
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-
- def queue
- @objects = []
- index
- end
-
- def queue_size
- render :json => {:queue_size => 0}
- end
-
- def self._create_requires_parameters
- (super rescue {}).
- merge({
- find_or_create: {
- type: 'boolean', required: false, default: false,
- },
- filters: {
- type: 'array', required: false,
- },
- minimum_script_version: {
- type: 'string', required: false,
- },
- exclude_script_versions: {
- type: 'array', required: false,
- },
- })
- end
-
- def self._queue_requires_parameters
- self._index_requires_parameters
- end
-
- protected
-
- def load_filters_param
- begin
- super
- attrs = resource_attrs rescue {}
- @filters = Job.load_job_specific_filters attrs, @filters, @read_users
- rescue ArgumentError => error
- send_error(error.message)
- false
- else
- true
- end
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/keep_disks_controller.rb b/services/api/app/controllers/arvados/v1/keep_disks_controller.rb
deleted file mode 100644
index b8aa09650f..0000000000
--- a/services/api/app/controllers/arvados/v1/keep_disks_controller.rb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::KeepDisksController < ApplicationController
- skip_before_action :require_auth_scope, only: :ping
- skip_before_action :render_404_if_no_object, only: :ping
-
- def self._ping_requires_parameters
- {
- uuid: {required: false},
- ping_secret: {required: true},
- node_uuid: {required: false},
- filesystem_uuid: {required: false},
- service_host: {required: false},
- service_port: {required: true},
- service_ssl_flag: {required: true}
- }
- end
-
- def ping
- params[:service_host] ||= request.env['REMOTE_ADDR']
- if !params[:uuid] && current_user.andand.is_admin
- # Create a new KeepDisk and ping it.
- @object = KeepDisk.new(filesystem_uuid: params[:filesystem_uuid])
- @object.save!
-
- # In the first ping from this new filesystem_uuid, we can't
- # expect the keep node to know the ping_secret so we made sure
- # we got an admin token. Here we add ping_secret to params so
- # the ping call below is properly authenticated.
- params[:ping_secret] = @object.ping_secret
- end
- act_as_system_user do
- if !@object.andand.ping(params)
- return render_not_found "object not found"
- end
- # Render the :superuser view (i.e., include the ping_secret) even
- # if !current_user.is_admin. This is safe because @object.ping's
- # success implies the ping_secret was already known by the client.
- send_json @object.as_api_response(:superuser)
- end
- end
-
- def find_objects_for_index
- # all users can list all keep disks
- @objects = model_class.where('1=1')
- super
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/keep_services_controller.rb b/services/api/app/controllers/arvados/v1/keep_services_controller.rb
index c6e8894352..eca56bf7a8 100644
--- a/services/api/app/controllers/arvados/v1/keep_services_controller.rb
+++ b/services/api/app/controllers/arvados/v1/keep_services_controller.rb
@@ -14,6 +14,10 @@ class Arvados::V1::KeepServicesController < ApplicationController
super
end
+ def self._accessible_method_description
+ "List Keep services that the current client can access."
+ end
+
def accessible
if request.headers['X-External-Client'] == '1'
@objects = KeepService.where('service_type=?', 'proxy')
diff --git a/services/api/app/controllers/arvados/v1/links_controller.rb b/services/api/app/controllers/arvados/v1/links_controller.rb
index c956bfc9b4..3258cd8a26 100644
--- a/services/api/app/controllers/arvados/v1/links_controller.rb
+++ b/services/api/app/controllers/arvados/v1/links_controller.rb
@@ -59,6 +59,10 @@ class Arvados::V1::LinksController < ApplicationController
super
end
+ def self._get_permissions_method_description
+ "List permissions granted on an Arvados object."
+ end
+
def get_permissions
if current_user.andand.can?(manage: @object)
# find all links and return them
diff --git a/services/api/app/controllers/arvados/v1/nodes_controller.rb b/services/api/app/controllers/arvados/v1/nodes_controller.rb
deleted file mode 100644
index 2510fd49fa..0000000000
--- a/services/api/app/controllers/arvados/v1/nodes_controller.rb
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::NodesController < ApplicationController
- skip_before_action :require_auth_scope, :only => :ping
- skip_before_action :find_object_by_uuid, :only => :ping
- skip_before_action :render_404_if_no_object, :only => :ping
-
- include DbCurrentTime
-
- def self._ping_requires_parameters
- { ping_secret: {required: true} }
- end
-
- def self._create_requires_parameters
- super.merge(
- { assign_slot: {required: false, type: 'boolean', description: 'assign slot and hostname'} })
- end
-
- def self._update_requires_parameters
- super.merge(
- { assign_slot: {required: false, type: 'boolean', description: 'assign slot and hostname'} })
- end
-
- def create
- @object = model_class.new(resource_attrs)
- @object.assign_slot if params[:assign_slot]
- @object.save!
- show
- end
-
- def update
- if resource_attrs[:job_uuid].is_a? String
- @object.job_readable = readable_job_uuids([resource_attrs[:job_uuid]]).any?
- end
- attrs_to_update = resource_attrs.reject { |k,v|
- [:kind, :etag, :href].index k
- }
- @object.update!(attrs_to_update)
- @object.assign_slot if params[:assign_slot]
- @object.save!
- show
- end
-
- def ping
- act_as_system_user do
- @object = Node.where(uuid: (params[:id] || params[:uuid])).first
- if !@object
- return render_not_found
- end
- ping_data = {
- ip: params[:local_ipv4] || request.remote_ip,
- ec2_instance_id: params[:instance_id]
- }
- [:ping_secret, :total_cpu_cores, :total_ram_mb, :total_scratch_mb]
- .each do |key|
- ping_data[key] = params[key] if params[key]
- end
- @object.ping(ping_data)
- if @object.info['ping_secret'] == params[:ping_secret]
- send_json @object.as_api_response(:superuser)
- else
- raise "Invalid ping_secret after ping"
- end
- end
- end
-
- def find_objects_for_index
- if !current_user.andand.is_admin && current_user.andand.is_active
- # active non-admin users can list nodes that are (or were
- # recently) working
- @objects = model_class.where('last_ping_at >= ?', db_current_time - 1.hours)
- end
- super
- if @select.nil? or @select.include? 'job_uuid'
- job_uuids = @objects.map { |n| n[:job_uuid] }.compact
- assoc_jobs = readable_job_uuids(job_uuids)
- @objects.each do |node|
- node.job_readable = assoc_jobs.include?(node[:job_uuid])
- end
- end
- end
-
- protected
-
- def readable_job_uuids(uuids)
- Job.readable_by(*@read_users).select(:uuid).where(uuid: uuids).map(&:uuid)
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb
deleted file mode 100644
index 166f71049b..0000000000
--- a/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::PipelineInstancesController < ApplicationController
- accept_attribute_as_json :components, Hash
- accept_attribute_as_json :properties, Hash
- accept_attribute_as_json :components_summary, Hash
-
- def create
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-
- def cancel
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb
deleted file mode 100644
index 4a5e724ee6..0000000000
--- a/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::PipelineTemplatesController < ApplicationController
- accept_attribute_as_json :components, Hash
-
- def create
- return send_error("Unsupported legacy jobs API",
- status: 400)
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/repositories_controller.rb b/services/api/app/controllers/arvados/v1/repositories_controller.rb
deleted file mode 100644
index 9dff6227bc..0000000000
--- a/services/api/app/controllers/arvados/v1/repositories_controller.rb
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::RepositoriesController < ApplicationController
- skip_before_action :find_object_by_uuid, :only => :get_all_permissions
- skip_before_action :render_404_if_no_object, :only => :get_all_permissions
- before_action :admin_required, :only => :get_all_permissions
-
- def get_all_permissions
- # user_aks is a map of {user_uuid => array of public keys}
- user_aks = {}
- # admins is an array of user_uuids
- admins = []
- User.
- where('users.is_active = ? or users.uuid = ?', true, anonymous_user_uuid).
- eager_load(:authorized_keys).find_each do |u|
- user_aks[u.uuid] = u.authorized_keys.collect do |ak|
- {
- public_key: ak.public_key,
- authorized_key_uuid: ak.uuid
- }
- end
- admins << u.uuid if u.is_admin
- end
- all_group_permissions = User.all_group_permissions
- @repo_info = {}
- Repository.eager_load(:permissions).find_each do |repo|
- @repo_info[repo.uuid] = {
- uuid: repo.uuid,
- name: repo.name,
- push_url: repo.push_url,
- fetch_url: repo.fetch_url,
- user_permissions: {},
- }
- # evidence is an array of {name: 'can_xxx', user_uuid: 'x-y-z'},
- # one entry for each piece of evidence we find in the permission
- # database that establishes that a user can access this
- # repository. Multiple entries can be added for a given user,
- # possibly with different access levels; these will be compacted
- # below.
- evidence = []
- repo.permissions.each do |perm|
- if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
- # A group has permission. Each user who has access to this
- # group also has access to the repository. Access level is
- # min(group-to-repo permission, user-to-group permission).
- user_aks.each do |user_uuid, _|
- perm_mask = all_group_permissions[user_uuid].andand[perm.tail_uuid]
- if not perm_mask
- next
- elsif perm_mask[:manage] and perm.name == 'can_manage'
- evidence << {name: 'can_manage', user_uuid: user_uuid}
- elsif perm_mask[:write] and ['can_manage', 'can_write'].index perm.name
- evidence << {name: 'can_write', user_uuid: user_uuid}
- elsif perm_mask[:read]
- evidence << {name: 'can_read', user_uuid: user_uuid}
- end
- end
- elsif user_aks.has_key?(perm.tail_uuid)
- # A user has permission; the user exists; and either the
- # user is active, or it's the special case of the anonymous
- # user which is never "active" but is allowed to read
- # content from public repositories.
- evidence << {name: perm.name, user_uuid: perm.tail_uuid}
- end
- end
- # Owner of the repository, and all admins, can do everything.
- ([repo.owner_uuid] | admins).each do |user_uuid|
- # Except: no permissions for inactive users, even if they own
- # repositories.
- next unless user_aks.has_key?(user_uuid)
- evidence << {name: 'can_manage', user_uuid: user_uuid}
- end
- # Distill all the evidence about permissions on this repository
- # into one hash per user, of the form {'can_xxx' => true, ...}.
- # The hash is nil for a user who has no permissions at all on
- # this particular repository.
- evidence.each do |perm|
- user_uuid = perm[:user_uuid]
- user_perms = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
- user_perms[perm[:name]] = true
- end
- end
- # Revisit each {'can_xxx' => true, ...} hash for some final
- # cleanup to make life easier for the requestor.
- #
- # Add a 'gitolite_permissions' key alongside the 'can_xxx' keys,
- # for the convenience of the gitolite config file generator.
- #
- # Add all lesser permissions when a greater permission is
- # present. If the requestor only wants to know who can write, it
- # only has to test for 'can_write' in the response.
- @repo_info.values.each do |repo|
- repo[:user_permissions].each do |user_uuid, user_perms|
- if user_perms['can_manage']
- user_perms['gitolite_permissions'] = 'RW+'
- user_perms['can_write'] = true
- user_perms['can_read'] = true
- elsif user_perms['can_write']
- user_perms['gitolite_permissions'] = 'RW+'
- user_perms['can_read'] = true
- elsif user_perms['can_read']
- user_perms['gitolite_permissions'] = 'R'
- end
- end
- end
- # The response looks like
- # {"kind":"...",
- # "repositories":[r1,r2,r3,...],
- # "user_keys":usermap}
- # where each of r1,r2,r3 looks like
- # {"uuid":"repo-uuid-1",
- # "name":"username/reponame",
- # "push_url":"...",
- # "user_permissions":{"user-uuid-a":{"can_read":true,"gitolite_permissions":"R"}}}
- # and usermap looks like
- # {"user-uuid-a":[{"public_key":"ssh-rsa g...","authorized_key_uuid":"ak-uuid-g"},...],
- # "user-uuid-b":[{"public_key":"ssh-rsa h...","authorized_key_uuid":"ak-uuid-h"},...],...}
- send_json(kind: 'arvados#RepositoryPermissionSnapshot',
- repositories: @repo_info.values,
- user_keys: user_aks)
- end
-end
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 74aa4078cb..f65ff0993d 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -23,6 +23,510 @@ class Arvados::V1::SchemaController < ApplicationController
protected
+ ActionNameMap = {
+ 'destroy' => 'delete',
+ 'index' => 'list',
+ 'show' => 'get',
+ }
+
+ HttpMethodDescriptionMap = {
+ "DELETE" => "delete",
+ "GET" => "query",
+ "POST" => "update",
+ "PUT" => "create",
+ }
+
+ ModelHumanNameMap = {
+ # The discovery document has code to humanize most model names.
+ # These are exceptions that require some capitalization.
+ "ApiClientAuthorization" => "API client authorization",
+ "KeepService" => "Keep service",
+ }
+
+ SchemaDescriptionMap = {
+ # This hash contains descriptions for everything in the schema.
+ # Schemas are looked up by their model name.
+ # Schema properties are looked up by "{model_name}.{property_name}"
+ # and fall back to just the property name if that doesn't exist.
+ "ApiClientAuthorization" => "Arvados API client authorization token
+
+This resource represents an API token a user may use to authenticate an
+Arvados API request.",
+ "AuthorizedKey" => "Arvados authorized public key
+
+This resource represents a public key a user may use to authenticate themselves
+to services on the cluster. Its primary use today is to store SSH keys for
+virtual machines (\"shell nodes\"). It may be extended to store other keys in
+the future.",
+ "Collection" => "Arvados data collection
+
+A collection describes how a set of files is stored in data blocks in Keep,
+along with associated metadata.",
+ "ComputedPermission" => "Arvados computed permission
+
+Computed permissions do not correspond directly to any Arvados resource, but
+provide a simple way to query the entire graph of permissions granted to
+users and groups.",
+ "ContainerRequest" => "Arvados container request
+
+A container request represents a user's request that Arvados do some compute
+work, along with full details about what work should be done. Arvados will
+attempt to fulfill the request by mapping it to a matching container record,
+running the work on demand if necessary.",
+ "Container" => "Arvados container record
+
+A container represents compute work that has been or should be dispatched,
+along with its results. A container can satisfy one or more container requests.",
+ "Group" => "Arvados group
+
+Groups provide a way to organize users or data together, depending on their
+`group_class`.",
+ "KeepService" => "Arvados Keep service
+
+This resource stores information about a single Keep service in this Arvados
+cluster that clients can contact to retrieve and store data.",
+ "Link" => "Arvados object link
+
+A link provides a way to define relationships between Arvados objects,
+depending on their `link_class`.",
+ "Log" => "Arvados log record
+
+This resource represents a single log record about an event in this Arvados
+cluster. Some individual Arvados services create log records. Users can also
+create custom logs.",
+ "UserAgreement" => "Arvados user agreement
+
+A user agreement is a collection with terms that users must agree to before
+they can use this Arvados cluster.",
+ "User" => "Arvados user
+
+A user represents a single individual or role who may be authorized to access
+this Arvados cluster.",
+ "VirtualMachine" => "Arvados virtual machine (\"shell node\")
+
+This resource stores information about a virtual machine or \"shell node\"
+hosted on this Arvados cluster where users can log in and use preconfigured
+Arvados client tools.",
+ "Workflow" => "Arvados workflow
+
+A workflow contains workflow definition source code that Arvados can execute
+along with associated metadata for users.",
+
+ # This section contains:
+ # * attributes shared across most resources
+ # * attributes shared across Collections and UserAgreements
+ # * attributes shared across Containers and ContainerRequests
+ "command" =>
+ "An array of strings that defines the command that the dispatcher should
+execute inside this container.",
+ "container_image" =>
+ "The portable data hash of the Arvados collection that contains the image
+to use for this container.",
+ "created_at" => "The time this %s was created.",
+ "current_version_uuid" => "The UUID of the current version of this %s.",
+ "cwd" =>
+ "A string that the defines the working directory that the dispatcher should
+use when it executes the command inside this container.",
+ "delete_at" => "The time this %s will be permanently deleted.",
+ "description" =>
+ "A longer HTML description of this %s assigned by a user.
+Allowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,
+`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,
+`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,
+`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.",
+ "environment" =>
+ "A hash of string keys and values that defines the environment variables
+for the dispatcher to set when it executes this container.",
+ "file_count" =>
+ "The number of files represented in this %s's `manifest_text`.
+This attribute is read-only.",
+ "file_size_total" =>
+ "The total size in bytes of files represented in this %s's `manifest_text`.
+This attribute is read-only.",
+ "is_trashed" => "A boolean flag to indicate whether or not this %s is trashed.",
+ "manifest_text" =>
+ "The manifest text that describes how files are constructed from data blocks
+in this %s. Refer to the [manifest format][] reference for details.
+
+[manifest format]: https://doc.arvados.org/architecture/manifest-format.html
+
+",
+ "modified_at" => "The time this %s was last updated.",
+ "modified_by_user_uuid" => "The UUID of the user that last updated this %s.",
+ "mounts" =>
+ "A hash where each key names a directory inside this container, and its
+value is an object that defines the mount source for that directory. Refer
+to the [mount types reference][] for details.
+
+[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types
+
+",
+ "name" => "The name of this %s assigned by a user.",
+ "output_glob" =>
+ "An array of strings of shell-style glob patterns that define which file(s)
+and subdirectory(ies) under the `output_path` directory should be recorded in
+the container's final output. Refer to the [glob patterns reference][] for details.
+
+[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns
+
+",
+ "output_path" =>
+ "A string that defines the file or directory path where the command
+writes output that should be saved from this container.",
+ "output_properties" =>
+"A hash of arbitrary metadata to set on the output collection of this %s.
+Some keys may be reserved by Arvados or defined by a configured vocabulary.
+Refer to the [metadata properties reference][] for details.
+
+[metadata properties reference]: https://doc.arvados.org/api/properties.html
+
+",
+ "output_storage_classes" =>
+ "An array of strings identifying the storage class(es) that should be set
+on the output collection of this %s. Storage classes are configured by
+the cluster administrator.",
+ "owner_uuid" => "The UUID of the user or group that owns this %s.",
+ "portable_data_hash" =>
+ "The portable data hash of this %s. This string provides a unique
+and stable reference to these contents.",
+ "preserve_version" =>
+ "A boolean flag to indicate whether this specific version of this %s
+should be persisted in cluster storage.",
+ "priority" =>
+ "An integer between 0 and 1000 (inclusive) that represents this %s's
+scheduling priority. 0 represents a request to be cancelled. Higher
+values represent higher priority. Refer to the [priority reference][] for details.
+
+[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority
+
+",
+ "properties" =>
+ "A hash of arbitrary metadata for this %s.
+Some keys may be reserved by Arvados or defined by a configured vocabulary.
+Refer to the [metadata properties reference][] for details.
+
+[metadata properties reference]: https://doc.arvados.org/api/properties.html
+
+",
+ "replication_confirmed" =>
+ "The number of copies of data in this %s that the cluster has confirmed
+exist in storage.",
+ "replication_confirmed_at" =>
+ "The last time the cluster confirmed that it met `replication_confirmed`
+for this %s.",
+ "replication_desired" =>
+ "The number of copies that should be made for data in this %s.",
+ "runtime_auth_scopes" =>
+ "The `scopes` from the API client authorization token used to run this %s.",
+ "runtime_constraints" =>
+ "A hash that identifies compute resources this container requires to run
+successfully. See the [runtime constraints reference][] for details.
+
+[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints
+
+",
+ "runtime_token" =>
+ "The `api_token` from an Arvados API client authorization token that a
+dispatcher should use to set up this container.",
+ "runtime_user_uuid" =>
+ "The UUID of the Arvados user associated with the API client authorization
+token used to run this container.",
+ "secret_mounts" =>
+ "A hash like `mounts`, but this attribute is only available through a
+dedicated API before the container is run.",
+ "scheduling_parameters" =>
+ "A hash of scheduling parameters that should be passed to the underlying
+dispatcher when this container is run.
+See the [scheduling parameters reference][] for details.
+
+[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters
+
+",
+ "storage_classes_desired" =>
+ "An array of strings identifying the storage class(es) that should be used
+for data in this %s. Storage classes are configured by the cluster administrator.",
+ "storage_classes_confirmed" =>
+ "An array of strings identifying the storage class(es) the cluster has
+confirmed have a copy of this %s's data.",
+ "storage_classes_confirmed_at" =>
+ "The last time the cluster confirmed that data was stored on the storage
+class(es) in `storage_classes_confirmed`.",
+ "trash_at" => "The time this %s will be trashed.",
+
+ "ApiClientAuthorization.api_token" =>
+ "The secret token that can be used to authorize Arvados API requests.",
+ "ApiClientAuthorization.created_by_ip_address" =>
+ "The IP address of the client that created this token.",
+ "ApiClientAuthorization.expires_at" =>
+ "The time after which this token is no longer valid for authorization.",
+ "ApiClientAuthorization.last_used_at" =>
+ "The last time this token was used to authorize a request.",
+ "ApiClientAuthorization.last_used_by_ip_address" =>
+ "The IP address of the client that last used this token.",
+ "ApiClientAuthorization.refreshes_at" =>
+ "The time at which the token will be revalidated if it is a cached token issued by a remote cluster, otherise null.",
+ "ApiClientAuthorization.scopes" =>
+ "An array of strings identifying HTTP methods and API paths this token is
+authorized to use. Refer to the [scopes reference][] for details.
+
+[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes
+
+",
+ "version" =>
+ "An integer that counts which version of a %s this record
+represents. Refer to [collection versioning][] for details. This attribute is
+read-only.
+
+[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html
+
+",
+
+ "AuthorizedKey.authorized_user_uuid" =>
+ "The UUID of the Arvados user that is authorized by this key.",
+ "AuthorizedKey.expires_at" =>
+ "The time after which this key is no longer valid for authorization.",
+ "AuthorizedKey.key_type" =>
+ "A string identifying what type of service uses this key. Supported values are:
+
+ * `\"SSH\"`
+
+",
+ "AuthorizedKey.public_key" =>
+ "The full public key, in the format referenced by `key_type`.",
+
+ "ComputedPermission.user_uuid" =>
+ "The UUID of the Arvados user who has this permission.",
+ "ComputedPermission.target_uuid" =>
+ "The UUID of the Arvados object the user has access to.",
+ "ComputedPermission.perm_level" =>
+ "A string representing the user's level of access to the target object.
+Possible values are:
+
+ * `\"can_read\"`
+ * `\"can_write\"`
+ * `\"can_manage\"`
+
+",
+
+ "Container.auth_uuid" =>
+ "The UUID of the Arvados API client authorization token that a dispatcher
+should use to set up this container. This token is automatically created by
+Arvados and this attribute automatically assigned unless a container is
+created with `runtime_token`.",
+ "Container.cost" =>
+ "A float with the estimated cost of the cloud instance used to run this
+container. The value is `0` if cost estimation is not available on this cluster.",
+ "Container.exit_code" =>
+ "An integer that records the Unix exit code of the `command` from a
+finished container.",
+ "Container.gateway_address" =>
+ "A string with the address of the Arvados gateway server, in `HOST:PORT`
+format. This is for internal use only.",
+ "Container.interactive_session_started" =>
+ "This flag is set true if any user starts an interactive shell inside the
+running container.",
+ "Container.lock_count" =>
+ "The number of times this container has been locked by a dispatcher. This
+may be greater than 1 if a dispatcher locks a container but then execution is
+interrupted for any reason.",
+ "Container.locked_by_uuid" =>
+ "The UUID of the Arvados API client authorization token that successfully
+locked this container in preparation to execute it.",
+ "Container.log" =>
+ "The portable data hash of the Arvados collection that contains this
+container's logs.",
+ "Container.output" =>
+ "The portable data hash of the Arvados collection that contains this
+container's output file(s).",
+ "Container.progress" =>
+ "A float between 0.0 and 1.0 (inclusive) that represents the container's
+execution progress. This attribute is not implemented yet.",
+ "Container.runtime_status" =>
+ "A hash with status updates from a running container.
+Refer to the [runtime status reference][] for details.
+
+[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status
+
+",
+ "Container.subrequests_cost" =>
+ "A float with the estimated cost of all cloud instances used to run this
+container and all its subrequests. The value is `0` if cost estimation is not
+available on this cluster.",
+ "Container.state" =>
+ "A string representing the container's current execution status. Possible
+values are:
+
+ * `\"Queued\"` --- This container has not been dispatched yet.
+ * `\"Locked\"` --- A dispatcher has claimed this container in preparation to run it.
+ * `\"Running\"` --- A dispatcher is running this container.
+ * `\"Cancelled\"` --- Container execution has been cancelled by user request.
+ * `\"Complete\"` --- A dispatcher ran this container to completion and recorded the results.
+
+",
+ "Container.service" =>
+ "A boolean flag. If set, it informs the system that this is a long-running container
+that functions as a system service or web app, rather than a once-through batch operation.",
+ "Container.published_ports" =>
+ "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados
+will proxy HTTP requests to these ports. Values are hashes with the following keys:
+
+ * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.
+ * `\"label\"` --- A human readable label describing the service, for display in Workbench.
+ * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
+
+ "ContainerRequest.auth_uuid" =>
+ "The UUID of the Arvados API client authorization token that a
+dispatcher should use to set up a corresponding container. This token is
+automatically created by Arvados and this attribute automatically assigned
+unless a container request is created with `runtime_token`.",
+ "ContainerRequest.container_count" =>
+ "An integer that records how many times Arvados has attempted to dispatch
+a container to fulfill this container request.",
+ "ContainerRequest.container_count_max" =>
+ "An integer that defines the maximum number of times Arvados should attempt
+to dispatch a container to fulfill this container request.",
+ "ContainerRequest.container_uuid" =>
+ "The UUID of the container that fulfills this container request, if any.",
+ "ContainerRequest.cumulative_cost" =>
+ "A float with the estimated cost of all cloud instances used to run
+container(s) to fulfill this container request and their subrequests.
+The value is `0` if cost estimation is not available on this cluster.",
+ "ContainerRequest.expires_at" =>
+ "The time after which this %s will no longer be fulfilled.",
+ "ContainerRequest.filters" =>
+ "Filters that limit which existing containers are eligible to satisfy this
+container request. This attribute is not implemented yet and should be null.",
+ "ContainerRequest.log_uuid" =>
+ "The UUID of the Arvados collection that contains logs for all the
+container(s) that were dispatched to fulfill this container request.",
+ "ContainerRequest.output_name" =>
+ "The name to set on the output collection of this container request.",
+ "ContainerRequest.output_ttl" =>
+ "An integer in seconds. If greater than zero, when an output collection is
+created for this container request, its `expires_at` attribute will be set this
+far in the future.",
+ "ContainerRequest.output_uuid" =>
+ "The UUID of the Arvados collection that contains output for all the
+container(s) that were dispatched to fulfill this container request.",
+ "ContainerRequest.requesting_container_uuid" =>
+ "The UUID of the container that created this container request, if any.",
+ "ContainerRequest.state" =>
+ "A string indicating where this container request is in its lifecycle.
+Possible values are:
+
+ * `\"Uncommitted\"` --- The container request has not been finalized and can still be edited.
+ * `\"Committed\"` --- The container request is ready to be fulfilled.
+ * `\"Final\"` --- The container request has been fulfilled or cancelled.
+
+",
+ "ContainerRequest.use_existing" =>
+ "A boolean flag. If set, Arvados may choose to satisfy this container
+request with an eligible container that already exists. Otherwise, Arvados will
+satisfy this container request with a newer container, which will usually result
+in the container running again.",
+ "ContainerRequest.service" =>
+ "A boolean flag. If set, it informs the system that this request is for a long-running container
+that functions as a system service or web app, rather than a once-through batch operation.",
+ "ContainerRequest.published_ports" =>
+ "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados
+will proxy HTTP requests to these ports. Values are hashes with the following keys:
+
+ * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.
+ * `\"label\"` --- A human readable label describing the service, for display in Workbench.
+ * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.",
+
+ "Group.group_class" =>
+ "A string representing which type of group this is. One of:
+
+ * `\"filter\"` --- A virtual project whose contents are selected dynamically by filters.
+ * `\"project\"` --- An Arvados project that can contain collections,
+ container records, workflows, and subprojects.
+ * `\"role\"` --- A group of users that can be granted permissions in Arvados.
+
+",
+ "Group.frozen_by_uuid" =>
+ "The UUID of the user that has frozen this group, if any. Frozen projects
+cannot have their contents or metadata changed, even by admins.",
+
+ "KeepService.service_host" => "The DNS hostname of this %s.",
+ "KeepService.service_port" => "The TCP port where this %s listens.",
+ "KeepService.service_ssl_flag" =>
+ "A boolean flag that indicates whether or not this %s uses TLS/SSL.",
+ "KeepService.service_type" =>
+ "A string that describes which type of %s this is. One of:
+
+ * `\"disk\"` --- A service that stores blocks on a local filesystem.
+ * `\"blob\"` --- A service that stores blocks in a cloud object store.
+ * `\"proxy\"` --- A keepproxy service.
+
+",
+ "KeepService.read_only" =>
+ "A boolean flag. If set, this %s does not accept requests to write data
+blocks; it only serves blocks it already has.",
+
+ "Link.head_uuid" =>
+ "The UUID of the Arvados object that is the originator or actor in this
+relationship. May be null.",
+ "Link.link_class" =>
+ "A string that defines which kind of link this is. One of:
+
+ * `\"permission\"` --- This link grants a permission to the user or group
+ referenced by `head_uuid` to the object referenced by `tail_uuid`. The
+ access level is set by `name`.
+ * `\"star\"` --- This link represents a \"favorite.\" The user referenced
+ by `head_uuid` wants quick access to the object referenced by `tail_uuid`.
+ * `\"tag\"` --- This link represents an unstructured metadata tag. The object
+ referenced by `tail_uuid` has the tag defined by `name`.
+
+",
+ "Link.name" =>
+ "The primary value of this link. For `\"permission\"` links, this is one of
+`\"can_read\"`, `\"can_write\"`, or `\"can_manage\"`.",
+ "Link.tail_uuid" =>
+ "The UUID of the Arvados object that is the target of this relationship.",
+
+ "Log.id" =>
+ "The serial number of this log. You can use this in filters to query logs
+that were created before/after another.",
+ "Log.event_type" =>
+ "An arbitrary short string that classifies what type of log this is.",
+ "Log.object_owner_uuid" =>
+ "The `owner_uuid` of the object referenced by `object_uuid` at the time
+this log was created.",
+ "Log.object_uuid" =>
+ "The UUID of the Arvados object that this log pertains to, such as a user
+or container.",
+ "Log.summary" =>
+ "A text string that describes the logged event. This is the primary
+attribute for simple logs.",
+
+ "User.email" => "This user's email address.",
+ "User.first_name" => "This user's first name.",
+ "User.identity_url" =>
+ "A URL that represents this user with the cluster's identity provider.",
+ "User.is_active" =>
+ "A boolean flag. If unset, this user is not permitted to make any Arvados
+API requests.",
+ "User.is_admin" =>
+ "A boolean flag. If set, this user is an administrator of the Arvados
+cluster, and automatically passes most permissions checks.",
+ "User.last_name" => "This user's last name.",
+ "User.prefs" => "A hash that stores cluster-wide user preferences.",
+ "User.username" => "This user's Unix username on virtual machines.",
+
+ "VirtualMachine.hostname" =>
+ "The DNS hostname where users should access this %s.",
+
+ "Workflow.definition" => "A string with the CWL source of this %s.",
+ "Workflow.collection_uuid" => "The collection this workflow is linked to, containing the definition of the workflow.",
+
+ "Credential.credential_class" => "The type of credential being stored.",
+ "Credential.scopes" => "The resources the credential applies to or should be used with.",
+ "Credential.external_id" => "The non-secret external identifier associated with a credential, e.g. a username.",
+ "Credential.secret" => "The secret part of the credential, e.g. a password.",
+ "Credential.expires_at" => "Date after which the credential_secret field is no longer valid.",
+ }
+
def discovery_doc
Rails.application.eager_load!
remoteHosts = {}
@@ -36,7 +540,7 @@ class Arvados::V1::SchemaController < ApplicationController
# format is YYYYMMDD, must be fixed width (needs to be lexically
# sortable), updated manually, may be used by clients to
# determine availability of API server features.
- revision: "20231117",
+ revision: "20250402",
source_version: AppVersion.hash,
sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
packageVersion: AppVersion.package_version,
@@ -57,13 +561,6 @@ class Arvados::V1::SchemaController < ApplicationController
maxRequestSize: Rails.configuration.API.MaxRequestSize,
maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats.keys,
- crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
- crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
- crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
- crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes,
- crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines,
- crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob,
- crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod,
crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,
crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,
remoteHosts: remoteHosts,
@@ -72,7 +569,6 @@ class Arvados::V1::SchemaController < ApplicationController
workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,
keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
- gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
parameters: {
alt: {
type: "string",
@@ -125,31 +621,35 @@ class Arvados::V1::SchemaController < ApplicationController
# No controller -> no discovery.
next
end
+ human_name = ModelHumanNameMap[k.to_s] || k.to_s.underscore.humanize.downcase
object_properties = {}
k.columns.
select { |col| k.selectable_attributes.include? col.name }.
collect do |col|
if k.serialized_attributes.has_key? col.name
- object_properties[col.name] = {
- type: k.serialized_attributes[col.name].object_class.to_s
- }
+ col_type = k.serialized_attributes[col.name].object_class.to_s
elsif k.attribute_types[col.name].is_a? JsonbType::Hash
- object_properties[col.name] = {
- type: Hash.to_s
- }
+ col_type = Hash.to_s
elsif k.attribute_types[col.name].is_a? JsonbType::Array
- object_properties[col.name] = {
- type: Array.to_s
- }
+ col_type = Array.to_s
else
- object_properties[col.name] = {
- type: col.type
- }
+ col_type = col.type
+ end
+ desc_fmt =
+ SchemaDescriptionMap["#{k}.#{col.name}"] ||
+ SchemaDescriptionMap[col.name] ||
+ ""
+ if k.attribute_types[col.name].type == :datetime
+ desc_fmt += " The string encodes a UTC date and time in ISO 8601 format."
end
+ object_properties[col.name] = {
+ description: desc_fmt % human_name,
+ type: col_type,
+ }
end
discovery[:schemas][k.to_s + 'List'] = {
id: k.to_s + 'List',
- description: k.to_s + ' list',
+ description: "A list of #{k} objects.",
type: "object",
properties: {
kind: {
@@ -159,56 +659,47 @@ class Arvados::V1::SchemaController < ApplicationController
},
etag: {
type: "string",
- description: "List version."
+ description: "List cache version."
},
items: {
type: "array",
- description: "The list of #{k.to_s.pluralize}.",
+ description: "An array of matching #{k} objects.",
items: {
"$ref" => k.to_s
}
},
- next_link: {
- type: "string",
- description: "A link to the next page of #{k.to_s.pluralize}."
- },
- next_page_token: {
- type: "string",
- description: "The page token for the next page of #{k.to_s.pluralize}."
- },
- selfLink: {
- type: "string",
- description: "A link back to this list."
- }
}
}
discovery[:schemas][k.to_s] = {
id: k.to_s,
- description: k.to_s,
+ description: SchemaDescriptionMap[k.to_s] || "Arvados #{human_name}.",
type: "object",
- uuidPrefix: (k.respond_to?(:uuid_prefix) ? k.uuid_prefix : nil),
+ uuidPrefix: nil,
properties: {
- uuid: {
- type: "string",
- description: "Object ID."
- },
etag: {
type: "string",
- description: "Object version."
+ description: "Object cache version."
}
}.merge(object_properties)
}
+ if k.respond_to? :uuid_prefix
+ discovery[:schemas][k.to_s][:uuidPrefix] ||= k.uuid_prefix
+ discovery[:schemas][k.to_s][:properties][:uuid] ||= {
+ type: "string",
+ description: "This #{human_name}'s Arvados UUID, like `zzzzz-#{k.uuid_prefix}-12345abcde67890`."
+ }
+ end
discovery[:resources][k.to_s.underscore.pluralize] = {
methods: {
get: {
id: "arvados.#{k.to_s.underscore.pluralize}.get",
path: "#{k.to_s.underscore.pluralize}/{uuid}",
httpMethod: "GET",
- description: "Gets a #{k.to_s}'s metadata by UUID.",
+ description: "Get a #{k.to_s} record by UUID.",
parameters: {
uuid: {
type: "string",
- description: "The UUID of the #{k.to_s} in question.",
+ description: "The UUID of the #{k.to_s} to return.",
required: true,
location: "path"
}
@@ -224,32 +715,11 @@ class Arvados::V1::SchemaController < ApplicationController
"https://api.arvados.org/auth/arvados.readonly"
]
},
- index: {
- id: "arvados.#{k.to_s.underscore.pluralize}.index",
+ list: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.list",
path: k.to_s.underscore.pluralize,
httpMethod: "GET",
- description:
- %|Index #{k.to_s.pluralize}.
-
- The index
method returns a
- resource list of
- matching #{k.to_s.pluralize}. For example:
-
-
- {
- "kind":"arvados##{k.to_s.camelcase(:lower)}List",
- "etag":"",
- "self_link":"",
- "next_page_token":"",
- "next_link":"",
- "items":[
- ...
- ],
- "items_available":745,
- "_profile":{
- "request_time":0.157236317
- }
-
|,
+ description: "Retrieve a #{k.to_s}List.",
parameters: {
},
response: {
@@ -289,7 +759,7 @@ class Arvados::V1::SchemaController < ApplicationController
parameters: {
uuid: {
type: "string",
- description: "The UUID of the #{k.to_s} in question.",
+ description: "The UUID of the #{k.to_s} to update.",
required: true,
location: "path"
}
@@ -317,7 +787,7 @@ class Arvados::V1::SchemaController < ApplicationController
parameters: {
uuid: {
type: "string",
- description: "The UUID of the #{k.to_s} in question.",
+ description: "The UUID of the #{k.to_s} to delete.",
required: true,
location: "path"
}
@@ -339,39 +809,47 @@ class Arvados::V1::SchemaController < ApplicationController
httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|
method if route.verb.match(method)
}.compact.first
- if httpMethod and
- route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
- ctl_class.action_methods.include? action
- if !d_methods[action.to_sym]
+ if httpMethod &&
+ route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize &&
+ ctl_class.action_methods.include?(action)
+ method_name = ActionNameMap[action] || action
+ method_key = method_name.to_sym
+ if !d_methods[method_key]
method = {
- id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
+ id: "arvados.#{k.to_s.underscore.pluralize}.#{method_name}",
path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
httpMethod: httpMethod,
- description: "#{action} #{k.to_s.underscore.pluralize}",
+ description: ctl_class.send("_#{method_name}_method_description".to_sym),
parameters: {},
response: {
- "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
+ "$ref" => (method_name == 'list' ? "#{k.to_s}List" : k.to_s)
},
scopes: [
"https://api.arvados.org/auth/arvados"
]
}
route.segment_keys.each do |key|
- if key != :format
- key = :uuid if key == :id
- method[:parameters][key] = {
- type: "string",
- description: "",
- required: true,
- location: "path"
- }
+ case key
+ when :format
+ next
+ when :id, :uuid
+ key = :uuid
+ description = "The UUID of the #{k} to #{HttpMethodDescriptionMap[httpMethod]}."
+ else
+ description = ""
end
+ method[:parameters][key] = {
+ type: "string",
+ description: description,
+ required: true,
+ location: "path",
+ }
end
else
# We already built a generic method description, but we
# might find some more required parameters through
# introspection.
- method = d_methods[action.to_sym]
+ method = d_methods[method_key]
end
if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
ctl_class.send("_#{action}_requires_parameters".to_sym).each do |l, v|
@@ -392,26 +870,53 @@ class Arvados::V1::SchemaController < ApplicationController
end
end
end
- d_methods[action.to_sym] = method
-
- if action == 'index'
- list_method = method.dup
- list_method[:id].sub!('index', 'list')
- list_method[:description].sub!('Index', 'List')
- list_method[:description].sub!('index', 'list')
- d_methods[:list] = list_method
- end
+ d_methods[method_key] = method
end
end
end
- # The 'replace_files' option is implemented in lib/controller,
- # not Rails -- we just need to add it here so discovery-aware
- # clients know how to validate it.
+ # The computed_permissions controller does not offer all of the
+ # usual methods and attributes. Modify discovery doc accordingly.
+ discovery[:resources]['computed_permissions'][:methods].select! do |method|
+ method == :list
+ end
+ discovery[:resources]['computed_permissions'][:methods][:list][:parameters].reject! do |param|
+ [:cluster_id, :bypass_federation, :offset].include?(param)
+ end
+ discovery[:schemas]['ComputedPermission'].delete(:uuidPrefix)
+ discovery[:schemas]['ComputedPermission'][:properties].reject! do |prop|
+ [:uuid, :etag].include?(prop)
+ end
+ discovery[:schemas]['ComputedPermission'][:properties]['perm_level'][:type] = 'string'
+
+ # The 'replace_files' and 'replace_segments' options are
+ # implemented in lib/controller, not Rails -- we just need to add
+ # them here so discovery-aware clients know how to validate them.
[:create, :update].each do |action|
discovery[:resources]['collections'][:methods][action][:parameters]['replace_files'] = {
type: 'object',
- description: 'Files and directories to initialize/replace with content from other collections.',
+ description:
+ "Add, delete, and replace files and directories with new content
+and/or content from other collections. Refer to the
+[replace_files reference][] for details.
+
+[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files
+
+",
+ required: false,
+ location: 'query',
+ properties: {},
+ additionalProperties: {type: 'string'},
+ }
+ discovery[:resources]['collections'][:methods][action][:parameters]['replace_segments'] = {
+ type: 'object',
+ description:
+ "Replace existing block segments in the collection with new segments.
+Refer to the [replace_segments reference][] for details.
+
+[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments
+
+",
required: false,
location: 'query',
properties: {},
@@ -425,7 +930,7 @@ class Arvados::V1::SchemaController < ApplicationController
id: "arvados.configs.get",
path: "config",
httpMethod: "GET",
- description: "Get public config",
+ description: "Get this cluster's public configuration settings.",
parameters: {
},
parameterOrder: [
@@ -446,7 +951,13 @@ class Arvados::V1::SchemaController < ApplicationController
id: "arvados.vocabularies.get",
path: "vocabulary",
httpMethod: "GET",
- description: "Get vocabulary definition",
+ description: "Get this cluster's configured vocabulary definition.
+
+Refer to [metadata vocabulary documentation][] for details.
+
+[metadata vocabulary documentation]: https://doc.aravdos.org/admin/metadata-vocabulary.html
+
+",
parameters: {
},
parameterOrder: [
@@ -467,7 +978,8 @@ class Arvados::V1::SchemaController < ApplicationController
id: "arvados.sys.trash_sweep",
path: "sys/trash_sweep",
httpMethod: "POST",
- description: "apply scheduled trash and delete operations",
+ description:
+ "Run scheduled data trash and sweep operations across this cluster's Keep services.",
parameters: {
},
parameterOrder: [
@@ -484,6 +996,7 @@ class Arvados::V1::SchemaController < ApplicationController
Rails.configuration.API.DisabledAPIs.each do |method, _|
ctrl, action = method.to_s.split('.', 2)
+ next if ctrl.in?(['api_clients', 'job_tasks', 'jobs', 'keep_disks', 'nodes', 'pipeline_instances', 'pipeline_templates', 'repositories'])
discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
discovery
diff --git a/services/api/app/controllers/arvados/v1/specimens_controller.rb b/services/api/app/controllers/arvados/v1/specimens_controller.rb
deleted file mode 100644
index b1e50a7e3e..0000000000
--- a/services/api/app/controllers/arvados/v1/specimens_controller.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::SpecimensController < ApplicationController
-end
diff --git a/services/api/app/controllers/arvados/v1/traits_controller.rb b/services/api/app/controllers/arvados/v1/traits_controller.rb
deleted file mode 100644
index 7aaed5c4d4..0000000000
--- a/services/api/app/controllers/arvados/v1/traits_controller.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Arvados::V1::TraitsController < ApplicationController
-end
diff --git a/services/api/app/controllers/arvados/v1/user_agreements_controller.rb b/services/api/app/controllers/arvados/v1/user_agreements_controller.rb
index 748eb06f0a..bfe6ec5cd9 100644
--- a/services/api/app/controllers/arvados/v1/user_agreements_controller.rb
+++ b/services/api/app/controllers/arvados/v1/user_agreements_controller.rb
@@ -15,6 +15,15 @@ class Arvados::V1::UserAgreementsController < ApplicationController
'links'
end
+ def limit_database_read(model_class:)
+ # Because we implement a custom index method that takes no arguments,
+ # there's nothing to limit. Explicitly override; the superclass isn't
+ # prepared for the case where model_class is not the type of @objects.
+ # This should be safe since administrators are expected to select a (very)
+ # limited number of agreements.
+ return
+ end
+
def index
if not current_user.is_invited
# New users cannot see user agreements until/unless invited to
@@ -36,6 +45,10 @@ class Arvados::V1::UserAgreementsController < ApplicationController
super
end
+ def self._signatures_method_description
+ "List all user agreement signature links from a user."
+ end
+
def signatures
current_user_uuid = (current_user.andand.is_admin && params[:uuid]) ||
current_user.uuid
@@ -51,6 +64,10 @@ class Arvados::V1::UserAgreementsController < ApplicationController
render_list
end
+ def self._sign_method_description
+ "Create a signature link from the current user for a given user agreement."
+ end
+
def sign
current_user_uuid = current_user.uuid
act_as_system_user do
@@ -65,10 +82,6 @@ class Arvados::V1::UserAgreementsController < ApplicationController
def create
usage_error
end
-
- def new
- usage_error
- end
def update
usage_error
diff --git a/services/api/app/controllers/arvados/v1/users_controller.rb b/services/api/app/controllers/arvados/v1/users_controller.rb
index 031dd2e4f9..a5329b7e97 100644
--- a/services/api/app/controllers/arvados/v1/users_controller.rb
+++ b/services/api/app/controllers/arvados/v1/users_controller.rb
@@ -30,6 +30,10 @@ class Arvados::V1::UsersController < ApplicationController
render_list
end
+ def self._current_method_description
+ "Return the user record associated with the API token authorizing this request."
+ end
+
def current
if current_user
@object = current_user
@@ -39,11 +43,19 @@ class Arvados::V1::UsersController < ApplicationController
end
end
+ def self._system_method_description
+ "Return this cluster's system (\"root\") user record."
+ end
+
def system
@object = system_user
show
end
+ def self._activate_method_description
+ "Set the `is_active` flag on a user record."
+ end
+
def activate
if params[:id] and params[:id].match(/\D/)
params[:uuid] = params.delete :id
@@ -92,6 +104,10 @@ class Arvados::V1::UsersController < ApplicationController
show
end
+ def self._setup_method_description
+ "Convenience method to \"fully\" set up a user record with a virtual machine login and notification email."
+ end
+
# create user object and all the needed links
def setup
if params[:uuid]
@@ -107,29 +123,16 @@ class Arvados::V1::UsersController < ApplicationController
@object = model_class.create! resource_attrs
end
- # It's not always possible for the client to know the user's
- # username when submitting this request: the username might have
- # been assigned automatically in create!() above. If client
- # provided a plain repository name, prefix it with the username
- # now that we know what it is.
- if params[:repo_name].nil?
- full_repo_name = nil
- elsif @object.username.nil?
- raise ArgumentError.
- new("cannot setup a repository because user has no username")
- elsif params[:repo_name].index("/")
- full_repo_name = params[:repo_name]
- else
- full_repo_name = "#{@object.username}/#{params[:repo_name]}"
- end
-
- @response = @object.setup(repo_name: full_repo_name,
- vm_uuid: params[:vm_uuid],
+ @response = @object.setup(vm_uuid: params[:vm_uuid],
send_notification_email: params[:send_notification_email])
send_json kind: "arvados#HashList", items: @response.as_api_response(nil)
end
+ def self._unsetup_method_description
+ "Unset a user's active flag and delete associated records."
+ end
+
# delete user agreements, vm, repository, login links; set state to inactive
def unsetup
reload_object_before_update
@@ -137,6 +140,10 @@ class Arvados::V1::UsersController < ApplicationController
show
end
+ def self._merge_method_description
+ "Transfer ownership of one user's data to another."
+ end
+
def merge
if (params[:old_user_uuid] || params[:new_user_uuid])
if !current_user.andand.is_admin
@@ -154,9 +161,7 @@ class Arvados::V1::UsersController < ApplicationController
return send_error("User in old_user_uuid not found", status: 422)
end
else
- if !Thread.current[:api_client].andand.is_trusted
- return send_error("supplied API token is not from a trusted client", status: 403)
- elsif Thread.current[:api_client_authorization].scopes != ['all']
+ if Thread.current[:api_client_authorization].scopes != ['all']
return send_error("cannot merge with a scoped token", status: 403)
end
@@ -166,9 +171,7 @@ class Arvados::V1::UsersController < ApplicationController
end
if new_auth.user.uuid[0..4] == Rails.configuration.ClusterID
- if !new_auth.api_client.andand.is_trusted
- return send_error("supplied new_user_token is not from a trusted client", status: 403)
- elsif new_auth.scopes != ['all']
+ if new_auth.scopes != ['all']
return send_error("supplied new_user_token has restricted scope", status: 403)
end
end
@@ -201,19 +204,30 @@ class Arvados::V1::UsersController < ApplicationController
def self._merge_requires_parameters
{
new_owner_uuid: {
- type: 'string', required: true,
+ type: 'string',
+ required: true,
+ description: "UUID of the user or group that will take ownership of data owned by the old user.",
},
new_user_token: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.",
},
redirect_to_new_user: {
- type: 'boolean', required: false, default: false,
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "If true, authorization attempts for the old user will be redirected to the new user.",
},
old_user_uuid: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.",
},
new_user_uuid: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "UUID of the user receiving ownership. You must be an admin to use this option.",
}
}
end
@@ -221,19 +235,30 @@ class Arvados::V1::UsersController < ApplicationController
def self._setup_requires_parameters
{
uuid: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "UUID of an existing user record to set up."
},
user: {
- type: 'object', required: false,
+ type: 'object',
+ required: false,
+ description: "Attributes of a new user record to set up.",
},
repo_name: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "This parameter is obsolete and ignored.",
},
vm_uuid: {
- type: 'string', required: false,
+ type: 'string',
+ required: false,
+ description: "If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.",
},
send_notification_email: {
- type: 'boolean', required: false, default: false,
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "If true, send an email to the user notifying them they can now access this Arvados cluster.",
},
}
end
@@ -241,7 +266,12 @@ class Arvados::V1::UsersController < ApplicationController
def self._update_requires_parameters
super.merge({
bypass_federation: {
- type: 'boolean', required: false, default: false,
+ type: 'boolean',
+ required: false,
+ default: false,
+ description: "If true, do not try to update the user on any other clusters in the federation,
+only the cluster that received the request.
+You must be an administrator to use this flag.",
},
})
end
diff --git a/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb b/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb
index cb637c2fda..ead78cc393 100644
--- a/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb
+++ b/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb
@@ -10,10 +10,18 @@ class Arvados::V1::VirtualMachinesController < ApplicationController
# Get all login permissons (user uuid, login account, SSH key) for a
# single VM
+ def self._logins_method_description
+ "List login permission links for a given virtual machine."
+ end
+
def logins
render_logins_for VirtualMachine.where(uuid: @object.uuid)
end
+ def self._get_all_logins_method_description
+ "List login permission links for all virtual machines."
+ end
+
# Get all login permissons for all VMs
def get_all_logins
render_logins_for VirtualMachine
diff --git a/services/api/app/controllers/arvados/v1/workflows_controller.rb b/services/api/app/controllers/arvados/v1/workflows_controller.rb
index 7cfdd9d741..407d9acd26 100644
--- a/services/api/app/controllers/arvados/v1/workflows_controller.rb
+++ b/services/api/app/controllers/arvados/v1/workflows_controller.rb
@@ -3,4 +3,12 @@
# SPDX-License-Identifier: AGPL-3.0
class Arvados::V1::WorkflowsController < ApplicationController
+ def update
+ if @object.collection_uuid.nil?
+ # Only allowed to update directly when collection_uuid is nil (legacy behavior)
+ super
+ else
+ raise ArvadosModel::PermissionDeniedError.new("Cannot directly update Workflow records that have collection_uuid set, must update the linked collection (#{@object.collection_uuid})")
+ end
+ end
end
diff --git a/services/api/app/controllers/sys_controller.rb b/services/api/app/controllers/sys_controller.rb
index 7d20cf77fd..6592a8d8e1 100644
--- a/services/api/app/controllers/sys_controller.rb
+++ b/services/api/app/controllers/sys_controller.rb
@@ -19,19 +19,35 @@ class SysController < ApplicationController
in_batches(of: 15).
update_all('is_trashed = true')
- # Sweep trashed projects and their contents (as well as role
- # groups that were trashed before #18340 when that was
- # disallowed)
+ # Want to make sure the #update_trash hook on the Group class
+ # runs. It does a couple of important things:
+ #
+ # - For projects, puts all the subprojects in the trashed_groups table.
+ #
+ # - For role groups, outbound permissions are deleted.
Group.
- where('delete_at is not null and delete_at < statement_timestamp()').each do |project|
- delete_project_and_contents(project.uuid)
+ where("is_trashed = false and trash_at < statement_timestamp()").each do |grp|
+ grp.is_trashed = true
+ grp.save
end
+
+ # Sweep groups and their contents that are ready to be deleted
Group.
- where('is_trashed = false and trash_at < statement_timestamp()').
- update_all('is_trashed = true')
+ where('delete_at is not null and delete_at < statement_timestamp()').each do |group|
+ delete_project_and_contents(group.uuid)
+ end
# Sweep expired tokens
ActiveRecord::Base.connection.execute("DELETE from api_client_authorizations where expires_at <= statement_timestamp()")
+ ActiveRecord::Base.connection.execute("DELETE from api_client_authorizations where refreshes_at <= statement_timestamp()")
+
+ # Sweep unused uuid_locks entries
+ ActiveRecord::Base.connection.execute("DELETE FROM uuid_locks WHERE uuid IN (SELECT uuid FROM uuid_locks FOR UPDATE SKIP LOCKED)")
+
+ # forget expired credential secrets
+ Credential.
+ where("expires_at < statement_timestamp() and secret != ''").
+ update_all("secret = ''")
end
head :no_content
end
@@ -43,19 +59,21 @@ class SysController < ApplicationController
if !p
raise "can't sweep group '#{p_uuid}', it may not exist"
end
- # First delete sub projects
- Group.where({group_class: 'project', owner_uuid: p_uuid}).each do |sub_project|
- delete_project_and_contents(sub_project.uuid)
- end
- # Next, iterate over all tables which have owner_uuid fields, with some
- # exceptions, and delete records owned by this project
- skipped_classes = ['Group', 'User']
- ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
- if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
- klass.where({owner_uuid: p_uuid}).in_batches(of: 15).destroy_all
+ if p.group_class == 'project'
+ # First delete sub projects and owned filter groups
+ Group.where({owner_uuid: p_uuid}).each do |sub_project|
+ delete_project_and_contents(sub_project.uuid)
+ end
+ # Next, iterate over all tables which have owner_uuid fields, with some
+ # exceptions, and delete records owned by this project
+ skipped_classes = ['Group', 'User']
+ ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+ if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
+ klass.where({owner_uuid: p_uuid}).in_batches(of: 15).destroy_all
+ end
end
end
- # Finally delete the project itself
+ # Finally delete the group itself
p.destroy
end
end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index 0c67c9c9d8..a7b43a4399 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -76,12 +76,6 @@ class UserSessionsController < ApplicationController
# Give the API client a token for making API calls on behalf of
# the authenticated user
- # Stub: automatically register all new API clients
- api_client_url_prefix = callback_url.match(%r{^.*?://[^/]+})[0] + '/'
- act_as_system_user do
- @api_client = ApiClient.
- find_or_create_by(url_prefix: api_client_url_prefix)
- end
if Rails.configuration.Login.TokenLifetime > 0
if token_expiration == nil
token_expiration = db_current_time + Rails.configuration.Login.TokenLifetime
@@ -92,7 +86,6 @@ class UserSessionsController < ApplicationController
@api_client_auth = ApiClientAuthorization.
new(user: user,
- api_client: @api_client,
created_by_ip_address: remote_ip,
expires_at: token_expiration,
scopes: ["all"])
diff --git a/services/api/app/helpers/api_client_authorizations_helper.rb b/services/api/app/helpers/api_client_authorizations_helper.rb
deleted file mode 100644
index e1066badc8..0000000000
--- a/services/api/app/helpers/api_client_authorizations_helper.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module ApiClientAuthorizationsHelper
-end
diff --git a/services/api/app/helpers/api_clients_helper.rb b/services/api/app/helpers/api_clients_helper.rb
deleted file mode 100644
index 9604777598..0000000000
--- a/services/api/app/helpers/api_clients_helper.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module ApiClientsHelper
-end
diff --git a/services/api/app/helpers/authorized_keys_helper.rb b/services/api/app/helpers/authorized_keys_helper.rb
deleted file mode 100644
index 665fff7f7c..0000000000
--- a/services/api/app/helpers/authorized_keys_helper.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module AuthorizedKeysHelper
-end
diff --git a/services/api/app/helpers/collections_helper.rb b/services/api/app/helpers/collections_helper.rb
deleted file mode 100644
index ca44f474b9..0000000000
--- a/services/api/app/helpers/collections_helper.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module CollectionsHelper
-end
diff --git a/services/api/app/helpers/commits_helper.rb b/services/api/app/helpers/commits_helper.rb
deleted file mode 100644
index fdb83a0375..0000000000
--- a/services/api/app/helpers/commits_helper.rb
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module CommitsHelper
- extend CurrentApiClient
-
- class GitError < RequestError
- def http_status
- 422
- end
- end
-
- def self.git_check_ref_format(e)
- if !e or e.empty? or e[0] == '-' or e[0] == '$'
- # definitely not valid
- false
- else
- `git check-ref-format --allow-onelevel #{e.shellescape}`
- $?.success?
- end
- end
-
- # Return an array of commits (each a 40-char sha1) satisfying the
- # given criteria.
- #
- # Return [] if the revisions given in minimum/maximum are invalid or
- # don't exist in the given repository.
- #
- # Raise ArgumentError if the given repository is invalid, does not
- # exist, or cannot be read for any reason. (Any transient error that
- # prevents commit ranges from resolving must raise rather than
- # returning an empty array.)
- #
- # repository can be the name of a locally hosted repository or a git
- # URL (see git-fetch(1)). Currently http, https, and git schemes are
- # supported.
- def self.find_commit_range repository, minimum, maximum, exclude
- if minimum and minimum.empty?
- minimum = nil
- end
-
- if minimum and !git_check_ref_format(minimum)
- Rails.logger.warn "find_commit_range called with invalid minimum revision: '#{minimum}'"
- return []
- end
-
- if maximum and !git_check_ref_format(maximum)
- Rails.logger.warn "find_commit_range called with invalid maximum revision: '#{maximum}'"
- return []
- end
-
- if !maximum
- maximum = "HEAD"
- end
-
- gitdir, is_remote = git_dir_for repository
- fetch_remote_repository gitdir, repository if is_remote
- ENV['GIT_DIR'] = gitdir
-
- commits = []
-
- # Get the commit hash for the upper bound
- max_hash = nil
- git_max_hash_cmd = "git rev-list --max-count=1 #{maximum.shellescape} --"
- IO.foreach("|#{git_max_hash_cmd}") do |line|
- max_hash = line.strip
- end
-
- # If not found, nothing else to do
- if !max_hash
- Rails.logger.warn "no refs found looking for max_hash: `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` returned no output"
- return []
- end
-
- # If string is invalid, nothing else to do
- if !git_check_ref_format(max_hash)
- Rails.logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` was invalid for max_hash: #{max_hash}"
- return []
- end
-
- resolved_exclude = nil
- if exclude
- resolved_exclude = []
- exclude.each do |e|
- if git_check_ref_format(e)
- IO.foreach("|git rev-list --max-count=1 #{e.shellescape} --") do |line|
- resolved_exclude.push(line.strip)
- end
- else
- Rails.logger.warn "find_commit_range called with invalid exclude invalid characters: '#{exclude}'"
- return []
- end
- end
- end
-
- if minimum
- # Get the commit hash for the lower bound
- min_hash = nil
- git_min_hash_cmd = "git rev-list --max-count=1 #{minimum.shellescape} --"
- IO.foreach("|#{git_min_hash_cmd}") do |line|
- min_hash = line.strip
- end
-
- # If not found, nothing else to do
- if !min_hash
- Rails.logger.warn "no refs found looking for min_hash: `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` returned no output"
- return []
- end
-
- # If string is invalid, nothing else to do
- if !git_check_ref_format(min_hash)
- Rails.logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` was invalid for min_hash: #{min_hash}"
- return []
- end
-
- # Now find all commits between them
- IO.foreach("|git rev-list #{min_hash.shellescape}..#{max_hash.shellescape} --") do |line|
- hash = line.strip
- commits.push(hash) if !resolved_exclude or !resolved_exclude.include? hash
- end
-
- commits.push(min_hash) if !resolved_exclude or !resolved_exclude.include? min_hash
- else
- commits.push(max_hash) if !resolved_exclude or !resolved_exclude.include? max_hash
- end
-
- commits
- end
-
- # Given a repository (url, or name of hosted repo) and commit sha1,
- # copy the commit into the internal git repo (if necessary), and tag
- # it with the given tag (typically a job UUID).
- #
- # The repo can be a remote url, but in this case sha1 must already
- # be present in our local cache for that repo: e.g., sha1 was just
- # returned by find_commit_range.
- def self.tag_in_internal_repository repo_name, sha1, tag
- unless git_check_ref_format tag
- raise ArgumentError.new "invalid tag #{tag}"
- end
- unless /^[0-9a-f]{40}$/ =~ sha1
- raise ArgumentError.new "invalid sha1 #{sha1}"
- end
- src_gitdir, _ = git_dir_for repo_name
- unless src_gitdir
- raise ArgumentError.new "no local repository for #{repo_name}"
- end
- dst_gitdir = Rails.configuration.Containers.JobsAPI.GitInternalDir
-
- begin
- commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
- rescue GitError
- commit_in_dst = false
- end
-
- tag_cmd = "tag --force #{tag.shellescape} #{sha1.shellescape}^{commit}"
- if commit_in_dst == sha1
- must_git(dst_gitdir, tag_cmd)
- else
- # git-fetch is faster than pack-objects|unpack-objects, but
- # git-fetch can't fetch by sha1. So we first try to fetch a
- # branch that has the desired commit, and if that fails (there
- # is no such branch, or the branch we choose changes under us in
- # race), we fall back to pack|unpack.
- begin
- branches = must_git(src_gitdir,
- "branch --contains #{sha1.shellescape}")
- m = branches.match(/^. (\w+)\n/)
- if !m
- raise GitError.new "commit is not on any branch"
- end
- branch = m[1]
- must_git(dst_gitdir,
- "fetch file://#{src_gitdir.shellescape} #{branch.shellescape}")
- # Even if all of the above steps succeeded, we might still not
- # have the right commit due to a race, in which case tag_cmd
- # will fail, and we'll need to fall back to pack|unpack. So
- # don't be tempted to condense this tag_cmd and the one in the
- # rescue block into a single attempt.
- must_git(dst_gitdir, tag_cmd)
- rescue GitError
- must_pipe("echo #{sha1.shellescape}",
- "git --git-dir #{src_gitdir.shellescape} pack-objects -q --revs --stdout",
- "git --git-dir #{dst_gitdir.shellescape} unpack-objects -q")
- must_git(dst_gitdir, tag_cmd)
- end
- end
- end
-
- protected
-
- def self.remote_url? repo_name
- /^(https?|git):\/\// =~ repo_name
- end
-
- # Return [local_git_dir, is_remote]. If is_remote, caller must use
- # fetch_remote_repository to ensure content is up-to-date.
- #
- # Raises an exception if the latest content could not be fetched for
- # any reason.
- def self.git_dir_for repo_name
- if remote_url? repo_name
- return [cache_dir_for(repo_name), true]
- end
- repos = Repository.readable_by(current_user).where(name: repo_name)
- if repos.count == 0
- raise ArgumentError.new "Repository not found: '#{repo_name}'"
- elsif repos.count > 1
- Rails.logger.error "Multiple repositories with name=='#{repo_name}'!"
- raise ArgumentError.new "Name conflict"
- else
- return [repos.first.server_path, false]
- end
- end
-
- def self.cache_dir_for git_url
- File.join(cache_dir_base, Digest::SHA1.hexdigest(git_url) + ".git").to_s
- end
-
- def self.cache_dir_base
- Rails.root.join 'tmp', 'git-cache'
- end
-
- def self.fetch_remote_repository gitdir, git_url
- # Caller decides which protocols are worth using. This is just a
- # safety check to ensure we never use urls like "--flag" or wander
- # into git's hardlink features by using bare "/path/foo" instead
- # of "file:///path/foo".
- unless /^[a-z]+:\/\// =~ git_url
- raise ArgumentError.new "invalid git url #{git_url}"
- end
- begin
- must_git gitdir, "branch"
- rescue GitError => e
- raise unless /Not a git repository/i =~ e.to_s
- # OK, this just means we need to create a blank cache repository
- # before fetching.
- FileUtils.mkdir_p gitdir
- must_git gitdir, "init"
- end
- must_git(gitdir,
- "fetch --no-progress --tags --prune --force --update-head-ok #{git_url.shellescape} 'refs/heads/*:refs/heads/*'")
- end
-
- def self.must_git gitdir, *cmds
- # Clear token in case a git helper tries to use it as a password.
- orig_token = ENV['ARVADOS_API_TOKEN']
- ENV['ARVADOS_API_TOKEN'] = ''
- last_output = ''
- begin
- git = "git --git-dir #{gitdir.shellescape}"
- cmds.each do |cmd|
- last_output = must_pipe git+" "+cmd
- end
- ensure
- ENV['ARVADOS_API_TOKEN'] = orig_token
- end
- return last_output
- end
-
- def self.must_pipe *cmds
- cmd = cmds.join(" 2>&1 |") + " 2>&1"
- out = IO.read("| CURRENT_TIMESTAMP)', token_uuid).
+ includes(:user).
+ where('uuid=?', token_uuid).
+ where('expires_at is null or expires_at > CURRENT_TIMESTAMP').
+ where('refreshes_at is null or refreshes_at > CURRENT_TIMESTAMP').
first
if auth && auth.user &&
(secret == auth.api_token ||
@@ -243,8 +234,10 @@ class ApiClientAuthorization < ArvadosModel
# and then insert a local row for a faster lookup next time.
hmac = OpenSSL::HMAC.hexdigest('sha256', Rails.configuration.SystemRootToken, token)
auth = ApiClientAuthorization.
- includes(:user, :api_client).
- where('api_token in (?, ?) and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token, hmac).
+ includes(:user).
+ where('api_token in (?, ?)', token, hmac).
+ where('expires_at is null or expires_at > CURRENT_TIMESTAMP').
+ where('refreshes_at is null or refreshes_at > CURRENT_TIMESTAMP').
first
if auth && auth.user
return auth
@@ -388,18 +381,32 @@ class ApiClientAuthorization < ArvadosModel
stored_secret = stored_secret || secret
# We will accept this token (and avoid reloading the user
- # record) for 'RemoteTokenRefresh' (default 5 minutes).
- exp = [db_current_time + Rails.configuration.Login.RemoteTokenRefresh,
- remote_token.andand['expires_at']].compact.min
+ # record) for at most 'RemoteTokenRefresh' (default 5 minutes).
+ cache_expires = remote_token.andand['expires_at']
+ cache_refreshes = db_current_time + Rails.configuration.Login.RemoteTokenRefresh
scopes = remote_token.andand['scopes'] || ['all']
+ retries = 0
begin
- retries ||= 0
- auth = ApiClientAuthorization.find_or_create_by(uuid: token_uuid) do |auth|
- auth.user = user
- auth.api_token = stored_secret
- auth.api_client_id = 0
- auth.scopes = scopes
- auth.expires_at = exp
+ # In older versions of Rails, `find_or_create_by` did not try to
+ # address race conditions, and the rescue logic below expects that
+ # behavior. This block reimplements the old method so we can handle
+ # races ourselves.
+ if auth = ApiClientAuthorization.find_by(uuid: token_uuid)
+ auth.update!(
+ user: user,
+ api_token: stored_secret,
+ scopes: scopes,
+ expires_at: cache_expires,
+ refreshes_at: cache_refreshes,
+ )
+ else
+ auth = ApiClientAuthorization.create(uuid: token_uuid) do |auth|
+ auth.user = user
+ auth.api_token = stored_secret
+ auth.scopes = scopes
+ auth.expires_at = cache_expires
+ auth.refreshes_at = cache_refreshes
+ end
end
rescue ActiveRecord::RecordNotUnique
Rails.logger.debug("cached remote token #{token_uuid} already exists, retrying...")
@@ -420,11 +427,6 @@ class ApiClientAuthorization < ArvadosModel
return nil
end
end
- auth.update!(user: user,
- api_token: stored_secret,
- api_client_id: 0,
- scopes: scopes,
- expires_at: exp)
Rails.logger.debug "cached remote token #{token_uuid} with secret #{stored_secret} and scopes #{scopes} in local db"
auth.api_token = secret
return auth
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
index 9ee2cca410..20251668a1 100644
--- a/services/api/app/models/arvados_model.rb
+++ b/services/api/app/models/arvados_model.rb
@@ -12,7 +12,7 @@ class ArvadosModel < ApplicationRecord
self.abstract_class = true
include ArvadosModelUpdates
- include CurrentApiClient # current_user, current_api_client, etc.
+ include CurrentApiClient # current_user, current_api_client_authorization, etc.
include DbCurrentTime
extend RecordFilters
@@ -92,10 +92,6 @@ class ArvadosModel < ApplicationRecord
kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
end
- def href
- "#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}"
- end
-
def self.permit_attribute_params raw_params
# strong_parameters does not provide security: permissions are
# implemented with before_save hooks.
@@ -170,8 +166,9 @@ class ArvadosModel < ApplicationRecord
end.map(&:name)
end
- def self.attribute_column attr
- self.columns.select { |col| col.name == attr.to_s }.first
+ def self.any_searchable_columns operator
+ datetime_columns = self.columns.select { |col| col.type == :datetime }.map(&:name)
+ self.searchable_columns(operator) - datetime_columns
end
def self.attributes_required_columns
@@ -233,7 +230,18 @@ class ArvadosModel < ApplicationRecord
# If an index request reads that column from the database,
# APIs that return lists will only fetch objects until reaching
# max_index_database_read bytes of data from those columns.
- []
+ # This default implementation returns all columns that aren't "small".
+ self.columns.select do |col|
+ col_meta = col.sql_type_metadata
+ case col_meta.type
+ when :boolean, :datetime, :float, :integer
+ false
+ else
+ # 1024 is a semi-arbitrary choice. As of Arvados 3.0.0, "regular"
+ # strings are typically 255, and big strings are much larger (512K).
+ col_meta.limit.nil? or (col_meta.limit > 1024)
+ end
+ end.map(&:name)
end
# If current user can manage the object, return an array of uuids of
@@ -539,7 +547,8 @@ class ArvadosModel < ApplicationRecord
def self.full_text_searchable_columns
self.columns.select do |col|
- [:string, :text, :jsonb].include?(col.type)
+ [:string, :text, :jsonb].include?(col.type) and
+ col.name !~ /(^|_)(^container_image|hash|uuid)$/
end.map(&:name)
end
@@ -564,18 +573,6 @@ class ArvadosModel < ApplicationRecord
"to_tsvector('english', substr(#{parts.join(" || ' ' || ")}, 0, 8000))"
end
- def self.apply_filters query, filters
- ft = record_filters filters, self
- if not ft[:cond_out].any?
- return query
- end
- ft[:joins].each do |t|
- query = query.joins(t)
- end
- query.where('(' + ft[:cond_out].join(') AND (') + ')',
- *ft[:param_out])
- end
-
@_add_uuid_to_name = false
def add_uuid_to_make_unique_name
@_add_uuid_to_name = true
@@ -752,14 +749,13 @@ class ArvadosModel < ApplicationRecord
current_time = db_current_time
self.created_at ||= created_at_was || current_time
self.updated_at = current_time
- self.owner_uuid ||= current_default_owner if self.respond_to? :owner_uuid=
+ self.owner_uuid ||= current_user.uuid if current_user && self.respond_to?(:owner_uuid=)
if !anonymous_updater
self.modified_by_user_uuid = current_user ? current_user.uuid : nil
end
if !timeless_updater
self.modified_at = current_time
end
- self.modified_by_client_uuid = current_api_client ? current_api_client.uuid : nil
true
end
@@ -814,7 +810,7 @@ class ArvadosModel < ApplicationRecord
coder = Serializer[type]
@serialized_attributes ||= {}
@serialized_attributes[colname.to_s] = coder
- super(colname, coder)
+ super(colname, coder: coder)
end
def self.serialized_attributes
@@ -970,10 +966,12 @@ class ArvadosModel < ApplicationRecord
# in the database.
self.runtime_constraints = {
'API' => false,
- 'cuda' => {
+ 'gpu' => {
'device_count' => 0,
'driver_version' => '',
- 'hardware_capability' => '',
+ 'hardware_target' => [],
+ 'stack' => '',
+ 'vram' => 0,
},
'keep_cache_disk' => 0,
'keep_cache_ram' => 0,
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index 16e85c0dd9..ce6bb34dfd 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -34,10 +34,18 @@ class Collection < ArvadosModel
validate :versioning_metadata_updates, on: :update
validate :past_versions_cannot_be_updated, on: :update
validate :protected_managed_properties_updates, on: :update
+ validate :workflow_type_property, on: :update
after_validation :set_file_count_and_total_size
before_save :set_file_names
+ after_save :check_linked_workflows
around_update :manage_versioning, unless: :is_past_version?
+ has_many :workflows,
+ class_name: 'Workflow',
+ foreign_key: 'collection_uuid',
+ primary_key: 'uuid',
+ dependent: :destroy
+
api_accessible :user, extend: :common do |t|
t.add lambda { |x| x.name || "" }, as: :name
t.add :description
@@ -84,10 +92,6 @@ class Collection < ArvadosModel
super + ["updated_at", "file_names"]
end
- def self.limit_index_columns_read
- ["manifest_text"]
- end
-
FILE_TOKEN = /^[[:digit:]]+:[[:digit:]]+:/
def check_signatures
throw(:abort) if self.manifest_text.nil?
@@ -605,6 +609,84 @@ class Collection < ArvadosModel
super - ["manifest_text", "storage_classes_desired", "storage_classes_confirmed", "current_version_uuid"]
end
+ def check_linked_workflows
+ # - can't be linked (yet) if it is a new record.
+ #
+ # - properties["type"]=>"workflow" is protected by the
+ # "workflow_type_property" validation and can't be changed or removed as
+ # long as there are linked workflows
+ #
+ # - "workflows" is provided by the ActiveRecord association at the
+ # top of the file, we only want to do this (including
+ # enforcement of property contents) if the collection is linked.
+ if !new_record? && properties["type"] == "workflow" && workflows.any?
+ update_linked_workflows(workflows, true)
+ end
+ end
+
+ def update_linked_workflows(workflows_to_update, should_save)
+ workflowMain = self.properties["arv:workflowMain"]
+ inputs = self.properties["arv:cwl_inputs"]
+ outputs = self.properties["arv:cwl_outputs"]
+ requirements = self.properties["arv:cwl_requirements"]
+ hints = self.properties["arv:cwl_hints"]
+
+ [['arv:workflowMain', workflowMain, String],
+ ['arv:cwl_inputs', inputs, Array],
+ ['arv:cwl_outputs', outputs, Array],
+ ['arv:cwl_requirements', requirements, Array],
+ ['arv:cwl_hints', hints, Array],
+ ].each do |key, val, type|
+ if val.nil?
+ raise "missing field '#{key}' in collection properties"
+ end
+ if !val.is_a?(type)
+ raise "expected field '#{key}' in collection properties to be a #{type}"
+ end
+ end
+
+ step = {
+ id: "#main/" + workflowMain,
+ in: [],
+ out: [],
+ run: "keep:#{self.portable_data_hash}/#{workflowMain}",
+ label: name
+ }
+
+ inputs.each do |i|
+ step[:in].push({id: "#main/step/#{Collection.cwl_shortname(i['id'])}",
+ source: i['id']})
+ end
+
+ outputs.each do |i|
+ outid = "#main/step/#{Collection.cwl_shortname(i['id'])}"
+ step[:out].push({"id": outid})
+ i['outputSource'] = outid
+ end
+
+ wrapper = {
+ class: "Workflow",
+ id: "#main",
+ inputs: inputs,
+ outputs: outputs,
+ steps: [step],
+ requirements: requirements + [{"class": "SubworkflowFeatureRequirement"}],
+ hints: hints,
+ }
+
+ doc = SafeJSON.dump({cwlVersion: "v1.2", "$graph": [wrapper]})
+
+ workflows_to_update.each do |w|
+ w.name = self.name
+ w.description = self.description
+ w.definition = doc
+ w.owner_uuid = self.owner_uuid
+ w.save! if should_save
+ end
+
+ true
+ end
+
protected
# Although the defaults for these columns is already set up on the schema,
@@ -714,6 +796,19 @@ class Collection < ArvadosModel
true
end
+ def workflow_type_property
+ return if properties["type"] == properties_was["type"] || properties_was["type"] != "workflow"
+
+ # properties["type"] changed and the previous value of
+ # properties["type"] was "workflow"
+
+ linked_workflows = Workflow.where(collection_uuid: self.uuid)
+ if !linked_workflows.empty?
+ errors.add(:properties, "cannot change 'type' property when there are linked workflows")
+ return false
+ end
+ end
+
def versioning_metadata_updates
valid = true
if !is_past_version? && current_version_uuid_changed?
@@ -736,4 +831,8 @@ class Collection < ArvadosModel
def log_update
super unless (saved_changes.keys - UNLOGGED_CHANGES).empty?
end
+
+ def self.cwl_shortname inputid
+ inputid.split("/")[-1]
+ end
end
diff --git a/services/api/app/models/computed_permission.rb b/services/api/app/models/computed_permission.rb
new file mode 100644
index 0000000000..c89860c48e
--- /dev/null
+++ b/services/api/app/models/computed_permission.rb
@@ -0,0 +1,62 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'record_filters'
+
+class ComputedPermission < ApplicationRecord
+ self.table_name = 'materialized_permissions'
+ include CurrentApiClient
+ include CommonApiTemplate
+ extend RecordFilters
+
+ PERM_LEVEL_S = ['none', 'can_read', 'can_write', 'can_manage']
+
+ api_accessible :user do |t|
+ t.add :user_uuid
+ t.add :target_uuid
+ t.add :perm_level_s, as: :perm_level
+ end
+
+ protected
+
+ def perm_level_s
+ PERM_LEVEL_S[perm_level]
+ end
+
+ def self.default_orders
+ ["#{table_name}.user_uuid", "#{table_name}.target_uuid"]
+ end
+
+ def self.readable_by(*args)
+ self
+ end
+
+ def self.searchable_columns(operator)
+ if !operator.match(/[<=>]/) && !operator.in?(['in', 'not in'])
+ []
+ else
+ ['user_uuid', 'target_uuid']
+ end
+ end
+
+ def self.limit_index_columns_read
+ []
+ end
+
+ def self.selectable_attributes
+ %w(user_uuid target_uuid perm_level)
+ end
+
+ def self.columns_for_attributes(select_attributes)
+ select_attributes
+ end
+
+ def self.serialized_attributes
+ {}
+ end
+
+ def self.unique_columns
+ []
+ end
+end
diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb
index ee338b81ff..594fa1d20f 100644
--- a/services/api/app/models/container.rb
+++ b/services/api/app/models/container.rb
@@ -30,6 +30,7 @@ class Container < ArvadosModel
serialize :runtime_constraints, Hash
serialize :command, Array
serialize :scheduling_parameters, Hash
+ serialize :output_glob, Array
after_find :fill_container_defaults_after_find
before_validation :fill_field_defaults, :if => :new_record?
@@ -48,6 +49,7 @@ class Container < ArvadosModel
before_save :update_secret_mounts_md5
before_save :scrub_secrets
before_save :clear_runtime_status_when_queued
+ before_save :assign_external_ports
after_save :update_cr_logs
after_save :handle_completed
@@ -73,6 +75,7 @@ class Container < ArvadosModel
t.add :mounts
t.add :output
t.add :output_path
+ t.add :output_glob
t.add :priority
t.add :progress
t.add :runtime_constraints
@@ -90,6 +93,8 @@ class Container < ArvadosModel
t.add :output_properties
t.add :cost
t.add :subrequests_cost
+ t.add :service
+ t.add :published_ports
end
# Supported states for a container
@@ -110,10 +115,6 @@ class Container < ArvadosModel
Complete => [Cancelled]
}
- def self.limit_index_columns_read
- ["mounts"]
- end
-
def self.full_text_searchable_columns
super - ["secret_mounts", "secret_mounts_md5", "runtime_token", "gateway_address", "output_storage_classes"]
end
@@ -164,6 +165,7 @@ class Container < ArvadosModel
cwd: req.cwd,
environment: req.environment,
output_path: req.output_path,
+ output_glob: req.output_glob,
container_image: resolve_container_image(req.container_image),
mounts: resolve_mounts(req.mounts),
runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
@@ -173,6 +175,8 @@ class Container < ArvadosModel
runtime_user_uuid: runtime_user.uuid,
runtime_auth_scopes: runtime_auth_scopes,
output_storage_classes: req.output_storage_classes,
+ service: req.service,
+ published_ports: req.published_ports,
}
end
act_as_system_user do
@@ -208,7 +212,8 @@ class Container < ArvadosModel
if rc['keep_cache_disk'] == 0 and rc['keep_cache_ram'] == 0
rc['keep_cache_disk'] = bound_keep_cache_disk(rc['ram'])
end
- rc
+ ContainerRequest.translate_cuda_to_gpu rc
+ self.deep_sort_hash(rc)
end
# Return a mounts hash suitable for a Container, i.e., with every
@@ -263,6 +268,9 @@ class Container < ArvadosModel
candidates = candidates.where('output_path = ?', attrs[:output_path])
log_reuse_info(candidates) { "after filtering on output_path #{attrs[:output_path].inspect}" }
+ candidates = candidates.where_serialized(:output_glob, attrs[:output_glob], md5: true)
+ log_reuse_info(candidates) { "after filtering on output_glob #{attrs[:output_glob].inspect}" }
+
image = resolve_container_image(attrs[:container_image])
candidates = candidates.where('container_image = ?', image)
log_reuse_info(candidates) { "after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})" }
@@ -303,6 +311,28 @@ class Container < ArvadosModel
resolved_runtime_constraints.delete('keep_cache_ram'),
].uniq,
}
+
+ resolved_gpu = resolved_runtime_constraints['gpu']
+ if resolved_gpu.nil? or resolved_gpu['device_count'] == 0
+ runtime_constraint_variations[:gpu] = [
+ # Check for constraints without gpu
+ # (containers that predate the constraint)
+ nil,
+ # The default "don't need GPUs" value
+ {
+ 'device_count' => 0,
+ 'driver_version' => '',
+ 'hardware_target' => [],
+ 'stack' => '',
+ 'vram' => 0,
+ },
+ # The requested value
+ resolved_runtime_constraints.delete('gpu')
+ ].uniq
+ end
+
+ # Note: deprecated in favor of the more general "GPU" constraint above
+ # Kept for backwards compatability.
resolved_cuda = resolved_runtime_constraints['cuda']
if resolved_cuda.nil? or resolved_cuda['device_count'] == 0
runtime_constraint_variations[:cuda] = [
@@ -318,7 +348,17 @@ class Container < ArvadosModel
# The requested value
resolved_runtime_constraints.delete('cuda')
].uniq
+ else
+ # Need to check
+ # a) for legacy containers that only mention CUDA
+ # b) for new containers that were submitted with the old API that
+ # list both CUDA and GPU
+ runtime_constraint_variations[:gpu] = [
+ nil,
+ resolved_runtime_constraints.delete('gpu')
+ ]
end
+
reusable_runtime_constraints = hash_product(**runtime_constraint_variations)
.map { |v| resolved_runtime_constraints.merge(v) }
@@ -421,15 +461,7 @@ class Container < ArvadosModel
end
def self.readable_by(*users_list)
- # Load optional keyword arguments, if they exist.
- if users_list.last.is_a? Hash
- kwargs = users_list.pop
- else
- kwargs = {}
- end
- if users_list.select { |u| u.is_admin }.any?
- return super
- end
+ return super if users_list.select { |u| u.is_a?(User) && u.is_admin }.any?
Container.where(ContainerRequest.readable_by(*users_list).where("containers.uuid = container_requests.container_uuid").arel.exists)
end
@@ -482,6 +514,7 @@ class Container < ArvadosModel
self.environment ||= {}
self.runtime_constraints ||= {}
self.mounts ||= {}
+ self.output_glob ||= []
self.cwd ||= "."
self.priority ||= 0
self.scheduling_parameters ||= {}
@@ -531,11 +564,12 @@ class Container < ArvadosModel
if self.new_record?
permitted.push(:owner_uuid, :command, :container_image, :cwd,
- :environment, :mounts, :output_path, :priority,
- :runtime_constraints, :scheduling_parameters,
- :secret_mounts, :runtime_token,
- :runtime_user_uuid, :runtime_auth_scopes,
- :output_storage_classes)
+ :environment, :mounts, :output_path, :output_glob,
+ :priority, :runtime_constraints,
+ :scheduling_parameters, :secret_mounts,
+ :runtime_token, :runtime_user_uuid,
+ :runtime_auth_scopes, :output_storage_classes,
+ :service, :published_ports)
end
case self.state
@@ -677,7 +711,6 @@ class Container < ArvadosModel
# dispatcher user, so expires_at isn't enforced by API.MaxTokenLifetime.
self.auth = ApiClientAuthorization.
create!(user_id: User.find_by_uuid(self.runtime_user_uuid).id,
- api_client_id: 0,
scopes: self.runtime_auth_scopes)
end
end
@@ -724,6 +757,65 @@ class Container < ArvadosModel
end
end
+ def assign_external_ports
+ if state_was == Running && state != Running
+ ActiveRecord::Base.connection.exec_query(
+ 'delete from container_ports where container_uuid=$1',
+ 'assign_external_ports',
+ [uuid])
+ elsif state_was != Running && state == Running
+ exturl = Rails.configuration.Services.ContainerWebServices.ExternalURL
+ port_min = Rails.configuration.Services.ContainerWebServices.ExternalPortMin
+ port_max = Rails.configuration.Services.ContainerWebServices.ExternalPortMax
+ if port_min.andand > 0 &&
+ port_max.andand > 0 &&
+ !exturl.andand.host.andand.starts_with?("*")
+ ActiveRecord::Base.connection.execute(
+ 'lock table container_ports in exclusive mode')
+ published_ports.each do |ppkey, ppvalue|
+ external_port = nil
+ ActiveRecord::Base.connection.exec_query(
+ 'select * from generate_series($1::int, $2::int) as port ' +
+ 'where port not in (select external_port from container_ports) ' +
+ 'limit 1',
+ 'assign_external_ports',
+ [port_min, port_max]).each do |row|
+ external_port = row['port']
+ end
+ if !external_port
+ Rails.logger.debug("no ports available for #{uuid} port #{ppkey}")
+ break
+ end
+ ActiveRecord::Base.connection.exec_query(
+ 'insert into container_ports ' +
+ '(external_port, container_uuid, container_port) ' +
+ 'values ($1, $2, $3)',
+ 'assign_external_ports',
+ [external_port, uuid, ppkey.to_i])
+ ppvalue['external_port'] = external_port
+ published_ports[ppkey] = ppvalue
+ end
+ end
+ published_ports.each do |ppkey, ppvalue|
+ baseurl = exturl.dup
+ if baseurl.host.starts_with?("*")
+ baseurl.host = "#{uuid}-#{ppkey}#{baseurl.host[1..]}"
+ elsif ppvalue['external_port'].andand > 0
+ baseurl.port = ppvalue['external_port'].to_s
+ else
+ next
+ end
+ ppvalue['base_url'] = baseurl.to_s
+ initialurl = baseurl
+ if ppvalue['initial_path'] && ppvalue['initial_path'] != ""
+ initialurl.path = "/" + ppvalue['initial_path'].delete_prefix("/")
+ end
+ ppvalue['initial_url'] = initialurl.to_s
+ published_ports[ppkey] = ppvalue
+ end
+ end
+ end
+
def handle_completed
# This container is finished so finalize any associated container requests
# that are associated with this container.
@@ -798,6 +890,7 @@ class Container < ArvadosModel
cwd: self.cwd,
environment: self.environment,
output_path: self.output_path,
+ output_glob: self.output_glob,
container_image: self.container_image,
mounts: self.mounts,
runtime_constraints: self.runtime_constraints,
diff --git a/services/api/app/controllers/arvados/v1/humans_controller.rb b/services/api/app/models/container_port.rb
similarity index 56%
rename from services/api/app/controllers/arvados/v1/humans_controller.rb
rename to services/api/app/models/container_port.rb
index 88eee3058d..0e61218a57 100644
--- a/services/api/app/controllers/arvados/v1/humans_controller.rb
+++ b/services/api/app/models/container_port.rb
@@ -2,5 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-class Arvados::V1::HumansController < ApplicationController
+class ContainerPort < ApplicationRecord
+ self.table_name = 'container_ports'
end
diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb
index f5789f31f6..91c5cae774 100644
--- a/services/api/app/models/container_request.rb
+++ b/services/api/app/models/container_request.rb
@@ -28,16 +28,19 @@ class ContainerRequest < ArvadosModel
attribute :secret_mounts, :jsonbHash, default: {}
attribute :output_storage_classes, :jsonbArray, default: lambda { Rails.configuration.DefaultStorageClasses }
attribute :output_properties, :jsonbHash, default: {}
+ attribute :published_ports, :jsonbHash, default: {}
serialize :environment, Hash
serialize :mounts, Hash
serialize :runtime_constraints, Hash
serialize :command, Array
serialize :scheduling_parameters, Hash
+ serialize :output_glob, Array
after_find :fill_container_defaults_after_find
after_initialize { @state_was_when_initialized = self.state_was } # see finalize_if_needed
before_validation :fill_field_defaults, :if => :new_record?
+ before_validation :fill_cuda_to_gpu
before_validation :fill_container_defaults
validates :command, :container_image, :output_path, :cwd, :presence => true
validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
@@ -49,6 +52,7 @@ class ContainerRequest < ArvadosModel
validate :check_update_whitelist
validate :secret_mounts_key_conflict
validate :validate_runtime_token
+ validate :validate_published_ports
after_validation :scrub_secrets
after_validation :set_preemptible
after_validation :set_container
@@ -73,6 +77,7 @@ class ContainerRequest < ArvadosModel
t.add :name
t.add :output_name
t.add :output_path
+ t.add :output_glob
t.add :output_uuid
t.add :output_ttl
t.add :priority
@@ -85,6 +90,8 @@ class ContainerRequest < ArvadosModel
t.add :output_storage_classes
t.add :output_properties
t.add :cumulative_cost
+ t.add :service
+ t.add :published_ports
end
# Supported states for a container request
@@ -104,10 +111,10 @@ class ContainerRequest < ArvadosModel
AttrsPermittedAlways = [:owner_uuid, :state, :name, :description, :properties]
AttrsPermittedBeforeCommit = [:command, :container_count_max,
:container_image, :cwd, :environment, :filters, :mounts,
- :output_path, :priority, :runtime_token,
+ :output_path, :output_glob, :priority, :runtime_token,
:runtime_constraints, :state, :container_uuid, :use_existing,
:scheduling_parameters, :secret_mounts, :output_name, :output_ttl,
- :output_storage_classes, :output_properties]
+ :output_storage_classes, :output_properties, :service, :published_ports]
def self.any_preemptible_instances?
Rails.configuration.InstanceTypes.any? do |k, v|
@@ -115,10 +122,6 @@ class ContainerRequest < ArvadosModel
end
end
- def self.limit_index_columns_read
- ["mounts"]
- end
-
def logged_attributes
super.except('secret_mounts', 'runtime_token')
end
@@ -307,7 +310,7 @@ class ContainerRequest < ArvadosModel
end
def self.full_text_searchable_columns
- super - ["mounts", "secret_mounts", "secret_mounts_md5", "runtime_token", "output_storage_classes"]
+ super - ["mounts", "secret_mounts", "secret_mounts_md5", "runtime_token", "output_storage_classes", "output_glob", "service", "published_ports"]
end
def set_priority_zero
@@ -326,9 +329,28 @@ class ContainerRequest < ArvadosModel
self.container_count_max ||= Rails.configuration.Containers.MaxRetryAttempts
self.scheduling_parameters ||= {}
self.output_ttl ||= 0
+ self.output_glob ||= []
self.priority ||= 0
end
+ def fill_cuda_to_gpu
+ ContainerRequest.translate_cuda_to_gpu attributes['runtime_constraints']
+ end
+
+ def self.translate_cuda_to_gpu rc
+ if rc['cuda'] && rc['cuda']['device_count'] > 0
+ # Legacy API to request Nvidia GPUs, convert it so downstream
+ # code only has to handle generic GPU requests.
+ rc['gpu'] = {
+ 'device_count' => rc['cuda']['device_count'],
+ 'driver_version' => rc['cuda']['driver_version'],
+ 'hardware_target' => [rc['cuda']['hardware_capability']],
+ 'stack' => 'cuda',
+ 'vram' => 0,
+ }
+ end
+ end
+
def set_container
if (container_uuid_changed? and
not current_user.andand.is_admin and
@@ -428,6 +450,46 @@ class ContainerRequest < ArvadosModel
end
end
end
+
+ if runtime_constraints['gpu']
+ k = 'stack'
+ v = runtime_constraints['gpu'][k]
+ if not [nil, '', 'cuda', 'rocm'].include? v
+ errors.add(:runtime_constraints,
+ "[gpu.#{k}]=#{v.inspect} must be one of 'cuda' or 'rocm' or be empty")
+ end
+
+ ['device_count', 'vram'].each do |k|
+ v = runtime_constraints['gpu'][k]
+ if !v.is_a?(Integer) || v < 0
+ errors.add(:runtime_constraints,
+ "[gpu.#{k}]=#{v.inspect} must be a positive or zero integer")
+ end
+ end
+
+ if runtime_constraints['gpu']['device_count'] > 0
+ k = 'driver_version'
+ v = runtime_constraints['gpu'][k]
+ if !v.is_a?(String) || v.to_f == 0.0
+ errors.add(:runtime_constraints,
+ "[gpu.#{k}]=#{v.inspect} must be a string in format 'X.Y'")
+ end
+
+ k = 'hardware_target'
+ v = runtime_constraints['gpu'][k]
+ if v.is_a?(Array)
+ v.each do |tgt|
+ if !tgt.is_a?(String)
+ errors.add(:runtime_constraints,
+ "[gpu.#{k}]=#{v.inspect} must be an array of strings")
+ end
+ end
+ else
+ errors.add(:runtime_constraints,
+ "[gpu.#{k}]=#{v.inspect} must be an array of strings")
+ end
+ end
+ end
end
end
@@ -442,6 +504,11 @@ class ContainerRequest < ArvadosModel
errors.add(:environment, "must be an map of String to String but has entry #{k.class} to #{v.class}")
end
end
+ output_glob.each do |g|
+ if !g.is_a? String
+ errors.add(:output_glob, "must be an array of strings but has entry #{g.class}")
+ end
+ end
[:mounts, :secret_mounts].each do |m|
self[m].each do |k, v|
if !k.is_a?(String) || !v.is_a?(Hash)
@@ -564,6 +631,45 @@ class ContainerRequest < ArvadosModel
end
end
+ def validate_published_ports
+ if self.service and self.use_existing
+ errors.add :use_existing, "cannot be true if 'service' is true"
+ end
+
+ self.published_ports.each do |k,v|
+ if !/^[0-9]+$/.match?(k)
+ errors.add :published_ports, "entry #{k} must be a decimal port number in the range 1-65535"
+ next
+ end
+ i = k.to_i
+ if i < 1 || i > 65535
+ errors.add :published_ports, "entry #{k} must be a decimal port number in the range 1-65535"
+ next
+ end
+
+ if v.is_a?(Hash)
+ v.each do |vkey, _|
+ if !["access", "label", "initial_path"].include? vkey
+ errors.add :published_ports, "entry #{k} has invalid key: #{vkey.inspect}"
+ end
+ end
+ if v["access"] != "private" && v["access"] != "public"
+ errors.add :published_ports, "entry #{k} 'access' must be one of 'public' or 'private' but was: #{v["access"].inspect}"
+ end
+ if !v["label"].is_a?(String)
+ errors.add :published_ports, "entry #{k} 'label' must be a string but was: #{v["label"].inspect}"
+ elsif v["label"].empty?
+ errors.add :published_ports, "entry #{k} 'label' cannot be empty"
+ end
+ if !v["initial_path"].is_a?(String)
+ errors.add :published_ports, "entry #{k} 'initial_path' must be a string but was: #{v["initial_path"].inspect}"
+ end
+ else
+ errors.add :published_ports, "entry #{k} must be an hash: #{v.inspect}"
+ end
+ end
+ end
+
def scrub_secrets
if self.state == Final
self.secret_mounts = {}
diff --git a/services/api/app/models/credential.rb b/services/api/app/models/credential.rb
new file mode 100644
index 0000000000..98bac50522
--- /dev/null
+++ b/services/api/app/models/credential.rb
@@ -0,0 +1,62 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Credential < ArvadosModel
+ include HasUuid
+ include KindAndEtag
+ include CommonApiTemplate
+
+ attribute :scopes, :jsonbArray, default: []
+
+ after_create :add_credential_manage_link
+
+ api_accessible :user, extend: :common do |t|
+ t.add :name
+ t.add :description
+ t.add :credential_class
+ t.add :scopes
+ t.add :external_id
+ t.add :expires_at
+ end
+
+ def updated_at=(v)
+ # no-op
+ end
+
+ def logged_attributes
+ super.except('secret')
+ end
+
+ def self.full_text_searchable_columns
+ super - ["credential_class", "external_id", "secret", "expires_at"]
+ end
+
+ def self.searchable_columns *args
+ super - ["secret"]
+ end
+
+ def ensure_owner_uuid_is_permitted
+ if new_record?
+ @requested_manager_uuid = owner_uuid
+ self.owner_uuid = system_user_uuid
+ return true
+ end
+
+ if self.owner_uuid != system_user_uuid
+ raise "Owner uuid for credential must be system user"
+ end
+ end
+
+ def add_credential_manage_link
+ if @requested_manager_uuid
+ act_as_system_user do
+ Link.create!(tail_uuid: @requested_manager_uuid,
+ head_uuid: self.uuid,
+ link_class: "permission",
+ name: "can_manage")
+ end
+ end
+ end
+
+end
diff --git a/services/api/app/models/database_seeds.rb b/services/api/app/models/database_seeds.rb
index e0ae850ae7..c31c2f5c63 100644
--- a/services/api/app/models/database_seeds.rb
+++ b/services/api/app/models/database_seeds.rb
@@ -4,6 +4,11 @@
require 'update_permissions'
+# Seed database with default/initial data if needed.
+#
+# This runs before db:migrate in
+# build/rails-package-scripts/postinst.sh so it must only do things
+# that are safe in an in-use/production database.
class DatabaseSeeds
extend CurrentApiClient
def self.install
@@ -14,8 +19,6 @@ class DatabaseSeeds
anonymous_group
anonymous_group_read_permission
anonymous_user
- anonymous_user_token_api_client
- system_root_token_api_client
public_project_group
public_project_read_permission
empty_collection
diff --git a/services/api/app/models/group.rb b/services/api/app/models/group.rb
index d4c81fe9d1..6d30fe1bab 100644
--- a/services/api/app/models/group.rb
+++ b/services/api/app/models/group.rb
@@ -49,6 +49,30 @@ class Group < ArvadosModel
t.add :can_manage
end
+ def default_delete_after_trash_interval
+ if self.group_class == 'role'
+ ActiveSupport::Duration.build(0)
+ else
+ super
+ end
+ end
+
+ def minimum_delete_after_trash_interval
+ if self.group_class == 'role'
+ ActiveSupport::Duration.build(0)
+ else
+ super
+ end
+ end
+
+ def validate_trash_and_delete_timing
+ if self.group_class == 'role' && delete_at && delete_at != trash_at
+ errors.add :delete_at, "must be == trash_at for role groups"
+ else
+ super
+ end
+ end
+
# check if admins are allowed to make changes to the project, e.g. it
# isn't trashed or frozen.
def admin_change_permitted
@@ -171,10 +195,17 @@ with temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)
[self.uuid,
TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at,
self.trash_at])
+
if frozen_descendants.any?
raise ArgumentError.new("cannot trash project containing frozen project #{frozen_descendants[0]["uuid"]}")
end
+ if self.trash_at and self.group_class == 'role'
+ # if this is a role group that is now in the trash, it loses all
+ # of its outgoing permissions.
+ Link.where(link_class: 'permission', tail_uuid: self.uuid).destroy_all
+ end
+
ActiveRecord::Base.connection.exec_query(%{
with temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp)),
@@ -231,7 +262,7 @@ insert into frozen_groups (uuid) select uuid from temptable where is_frozen on c
def before_ownership_change
if owner_uuid_changed? and !self.owner_uuid_was.nil?
- MaterializedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all
+ ComputedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all
update_permissions self.owner_uuid_was, self.uuid, REVOKE_PERM
end
end
@@ -243,7 +274,8 @@ insert into frozen_groups (uuid) select uuid from temptable where is_frozen on c
end
def clear_permissions_trash_frozen
- MaterializedPermission.where(target_uuid: uuid).delete_all
+ Link.where(link_class: 'permission', tail_uuid: self.uuid).destroy_all
+ ComputedPermission.where(target_uuid: uuid).delete_all
ActiveRecord::Base.connection.exec_delete(
"delete from trashed_groups where group_uuid=$1",
"Group.clear_permissions_trash_frozen",
diff --git a/services/api/app/models/human.rb b/services/api/app/models/human.rb
deleted file mode 100644
index 68972825f9..0000000000
--- a/services/api/app/models/human.rb
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Human < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :properties, Hash
-
- api_accessible :user, extend: :common do |t|
- t.add :properties
- end
-end
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
deleted file mode 100644
index 029a313285..0000000000
--- a/services/api/app/models/job.rb
+++ /dev/null
@@ -1,564 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-#
-#
-# Legacy jobs API aka crunch v1
-#
-# This is superceded by containers / container_requests (aka crunch v2)
-#
-# Arvados installations since the end of 2017 should have never
-# used jobs, and are unaffected by this change.
-#
-# So that older Arvados sites don't lose access to legacy records, the
-# API has been converted to read-only. Creating and updating jobs
-# (and related types job_task, pipeline_template and
-# pipeline_instance) is disabled and much of the business logic
-# related has been removed, along with the crunch-dispatch.rb and
-# various other code specific to the jobs API.
-#
-# If you need to resurrect any of this code, here is the last commit
-# on master before the branch removing jobs API support:
-#
-# Wed Aug 7 14:49:38 2019 -0400 07d92519438a592d531f2c7558cd51788da262ca
-
-require 'log_reuse_info'
-require 'safe_json'
-
-class Job < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- extend CurrentApiClient
- extend LogReuseInfo
- serialize :components, Hash
- serialize :script_parameters, Hash
- serialize :runtime_constraints, Hash
- serialize :tasks_summary, Hash
- before_create :ensure_unique_submit_id
- before_validation :set_priority
- before_validation :update_state_from_old_state_attrs
- before_validation :update_script_parameters_digest
- validate :ensure_script_version_is_commit
- validate :find_docker_image_locator
- validate :find_arvados_sdk_version
- validate :validate_status
- validate :validate_state_change
- validate :ensure_no_collection_uuids_in_script_params
- before_save :tag_version_in_internal_repository
- before_save :update_timestamps_when_state_changes
- before_create :create_disabled
- before_update :update_disabled
-
- has_many(:nodes, foreign_key: 'job_uuid', primary_key: 'uuid')
-
- class SubmitIdReused < RequestError
- end
-
- api_accessible :user, extend: :common do |t|
- t.add :submit_id
- t.add :priority
- t.add :script
- t.add :script_parameters
- t.add :script_version
- t.add :cancelled_at
- t.add :cancelled_by_client_uuid
- t.add :cancelled_by_user_uuid
- t.add :started_at
- t.add :finished_at
- t.add :output
- t.add :success
- t.add :running
- t.add :state
- t.add :is_locked_by_uuid
- t.add :log
- t.add :runtime_constraints
- t.add :tasks_summary
- t.add :nondeterministic
- t.add :repository
- t.add :supplied_script_version
- t.add :arvados_sdk_version
- t.add :docker_image_locator
- t.add :queue_position
- t.add :node_uuids
- t.add :description
- t.add :components
- end
-
- # Supported states for a job
- States = [
- (Queued = 'Queued'),
- (Running = 'Running'),
- (Cancelled = 'Cancelled'),
- (Failed = 'Failed'),
- (Complete = 'Complete'),
- ]
-
- after_initialize do
- @need_crunch_dispatch_trigger = false
- end
-
- def self.limit_index_columns_read
- ["components"]
- end
-
- def self.protected_attributes
- [:arvados_sdk_version, :docker_image_locator]
- end
-
- def assert_finished
- update(finished_at: finished_at || db_current_time,
- success: success.nil? ? false : success,
- running: false)
- end
-
- def node_uuids
- nodes.map(&:uuid)
- end
-
- def self.queue
- self.where('state = ?', Queued).order('priority desc, created_at')
- end
-
- def queue_position
- # We used to report this accurately, but the implementation made queue
- # API requests O(n**2) for the size of the queue. See #8800.
- # We've soft-disabled it because it's not clear we even want this
- # functionality: now that we have Node Manager with support for multiple
- # node sizes, "queue position" tells you very little about when a job will
- # run.
- state == Queued ? 0 : nil
- end
-
- def self.running
- self.where('running = ?', true).
- order('priority desc, created_at')
- end
-
- def lock locked_by_uuid
- with_lock do
- unless self.state == Queued and self.is_locked_by_uuid.nil?
- raise AlreadyLockedError
- end
- self.state = Running
- self.is_locked_by_uuid = locked_by_uuid
- self.save!
- end
- end
-
- def update_script_parameters_digest
- self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
- end
-
- def self.searchable_columns operator
- super - ["script_parameters_digest"]
- end
-
- def self.full_text_searchable_columns
- super - ["script_parameters_digest"]
- end
-
- def self.load_job_specific_filters attrs, orig_filters, read_users
- # Convert Job-specific @filters entries into general SQL filters.
- script_info = {"repository" => nil, "script" => nil}
- git_filters = Hash.new do |hash, key|
- hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
- end
- filters = []
- orig_filters.each do |attr, operator, operand|
- if (script_info.has_key? attr) and (operator == "=")
- if script_info[attr].nil?
- script_info[attr] = operand
- elsif script_info[attr] != operand
- raise ArgumentError.new("incompatible #{attr} filters")
- end
- end
- case operator
- when "in git"
- git_filters[attr]["min_version"] = operand
- when "not in git"
- git_filters[attr]["exclude_versions"] += Array.wrap(operand)
- when "in docker", "not in docker"
- image_hashes = Array.wrap(operand).flat_map do |search_term|
- image_search, image_tag = search_term.split(':', 2)
- Collection.
- find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
- map(&:portable_data_hash)
- end
- filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
- else
- filters << [attr, operator, operand]
- end
- end
-
- # Build a real script_version filter from any "not? in git" filters.
- git_filters.each_pair do |attr, filter|
- case attr
- when "script_version"
- script_info.each_pair do |key, value|
- if value.nil?
- raise ArgumentError.new("script_version filter needs #{key} filter")
- end
- end
- filter["repository"] = script_info["repository"]
- if attrs[:script_version]
- filter["max_version"] = attrs[:script_version]
- else
- # Using HEAD, set earlier by the hash default, is fine.
- end
- when "arvados_sdk_version"
- filter["repository"] = "arvados"
- else
- raise ArgumentError.new("unknown attribute for git filter: #{attr}")
- end
- revisions = CommitsHelper::find_commit_range(filter["repository"],
- filter["min_version"],
- filter["max_version"],
- filter["exclude_versions"])
- if revisions.empty?
- raise ArgumentError.
- new("error searching #{filter['repository']} from " +
- "'#{filter['min_version']}' to '#{filter['max_version']}', " +
- "excluding #{filter['exclude_versions']}")
- end
- filters.append([attr, "in", revisions])
- end
-
- filters
- end
-
- def self.default_git_filters(attr_name, repo_name, refspec)
- # Add a filter to @filters for `attr_name` = the latest commit available
- # in `repo_name` at `refspec`. No filter is added if refspec can't be
- # resolved.
- commits = CommitsHelper::find_commit_range(repo_name, nil, refspec, nil)
- if commit_hash = commits.first
- [[attr_name, "=", commit_hash]]
- else
- []
- end
- end
-
- def cancel(cascade: false, need_transaction: true)
- raise "No longer supported"
- end
-
- protected
-
- def self.sorted_hash_digest h
- Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
- end
-
- def foreign_key_attributes
- super + %w(output log)
- end
-
- def skip_uuid_read_permission_check
- super + %w(cancelled_by_client_uuid)
- end
-
- def skip_uuid_existence_check
- super + %w(output log)
- end
-
- def set_priority
- if self.priority.nil?
- self.priority = 0
- end
- true
- end
-
- def ensure_script_version_is_commit
- if state == Running
- # Apparently client has already decided to go for it. This is
- # needed to run a local job using a local working directory
- # instead of a commit-ish.
- return true
- end
- if new_record? or repository_changed? or script_version_changed?
- sha1 = CommitsHelper::find_commit_range(repository,
- nil, script_version, nil).first
- if not sha1
- errors.add :script_version, "#{script_version} does not resolve to a commit"
- return false
- end
- if supplied_script_version.nil? or supplied_script_version.empty?
- self.supplied_script_version = script_version
- end
- self.script_version = sha1
- end
- true
- end
-
- def tag_version_in_internal_repository
- if state == Running
- # No point now. See ensure_script_version_is_commit.
- true
- elsif errors.any?
- # Won't be saved, and script_version might not even be valid.
- true
- elsif new_record? or repository_changed? or script_version_changed?
- uuid_was = uuid
- begin
- assign_uuid
- CommitsHelper::tag_in_internal_repository repository, script_version, uuid
- rescue
- self.uuid = uuid_was
- raise
- end
- end
- end
-
- def ensure_unique_submit_id
- if !submit_id.nil?
- if Job.where('submit_id=?',self.submit_id).first
- raise SubmitIdReused.new
- end
- end
- true
- end
-
- def resolve_runtime_constraint(key, attr_sym)
- if ((runtime_constraints.is_a? Hash) and
- (search = runtime_constraints[key]))
- ok, result = yield search
- else
- ok, result = true, nil
- end
- if ok
- send("#{attr_sym}=".to_sym, result)
- else
- errors.add(attr_sym, result)
- end
- ok
- end
-
- def find_arvados_sdk_version
- resolve_runtime_constraint("arvados_sdk_version",
- :arvados_sdk_version) do |git_search|
- commits = CommitsHelper::find_commit_range("arvados",
- nil, git_search, nil)
- if commits.empty?
- [false, "#{git_search} does not resolve to a commit"]
- elsif not runtime_constraints["docker_image"]
- [false, "cannot be specified without a Docker image constraint"]
- else
- [true, commits.first]
- end
- end
- end
-
- def find_docker_image_locator
- if runtime_constraints.is_a? Hash and Rails.configuration.Containers.JobsAPI.DefaultDockerImage != ""
- runtime_constraints['docker_image'] ||=
- Rails.configuration.Containers.JobsAPI.DefaultDockerImage
- end
-
- resolve_runtime_constraint("docker_image",
- :docker_image_locator) do |image_search|
- image_tag = runtime_constraints['docker_image_tag']
- if coll = Collection.for_latest_docker_image(image_search, image_tag)
- [true, coll.portable_data_hash]
- else
- [false, "not found for #{image_search}"]
- end
- end
- end
-
- def permission_to_update
- if is_locked_by_uuid_was and !(current_user and
- (current_user.uuid == is_locked_by_uuid_was or
- current_user.uuid == system_user.uuid))
- if script_changed? or
- script_parameters_changed? or
- script_version_changed? or
- (!cancelled_at_was.nil? and
- (cancelled_by_client_uuid_changed? or
- cancelled_by_user_uuid_changed? or
- cancelled_at_changed?)) or
- started_at_changed? or
- finished_at_changed? or
- running_changed? or
- success_changed? or
- output_changed? or
- log_changed? or
- tasks_summary_changed? or
- (state_changed? && state != Cancelled) or
- components_changed?
- logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
- return false
- end
- end
- if !is_locked_by_uuid_changed?
- super
- else
- if !current_user
- logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
- false
- elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
- logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
- false
- elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
- logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
- false
- else
- super
- end
- end
- end
-
- def update_modified_by_fields
- if self.cancelled_at_changed?
- # Ensure cancelled_at cannot be set to arbitrary non-now times,
- # or changed once it is set.
- if self.cancelled_at and not self.cancelled_at_was
- self.cancelled_at = db_current_time
- self.cancelled_by_user_uuid = current_user.uuid
- self.cancelled_by_client_uuid = current_api_client.andand.uuid
- @need_crunch_dispatch_trigger = true
- else
- self.cancelled_at = self.cancelled_at_was
- self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
- self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
- end
- end
- super
- end
-
- def update_timestamps_when_state_changes
- return if not (state_changed? or new_record?)
-
- case state
- when Running
- self.started_at ||= db_current_time
- when Failed, Complete
- self.finished_at ||= db_current_time
- when Cancelled
- self.cancelled_at ||= db_current_time
- end
-
- # TODO: Remove the following case block when old "success" and
- # "running" attrs go away. Until then, this ensures we still
- # expose correct success/running flags to older clients, even if
- # some new clients are writing only the new state attribute.
- case state
- when Queued
- self.running = false
- self.success = nil
- when Running
- self.running = true
- self.success = nil
- when Cancelled, Failed
- self.running = false
- self.success = false
- when Complete
- self.running = false
- self.success = true
- end
- self.running ||= false # Default to false instead of nil.
-
- @need_crunch_dispatch_trigger = true
-
- true
- end
-
- def update_state_from_old_state_attrs
- # If a client has touched the legacy state attrs, update the
- # "state" attr to agree with the updated values of the legacy
- # attrs.
- #
- # TODO: Remove this method when old "success" and "running" attrs
- # go away.
- if cancelled_at_changed? or
- success_changed? or
- running_changed? or
- state.nil?
- if cancelled_at
- self.state = Cancelled
- elsif success == false
- self.state = Failed
- elsif success == true
- self.state = Complete
- elsif running == true
- self.state = Running
- else
- self.state = Queued
- end
- end
- true
- end
-
- def validate_status
- if self.state.in?(States)
- true
- else
- errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
- false
- end
- end
-
- def validate_state_change
- ok = true
- if self.state_changed?
- ok = case self.state_was
- when nil
- # state isn't set yet
- true
- when Queued
- # Permit going from queued to any state
- true
- when Running
- # From running, may only transition to a finished state
- [Complete, Failed, Cancelled].include? self.state
- when Complete, Failed, Cancelled
- # Once in a finished state, don't permit any more state changes
- false
- else
- # Any other state transition is also invalid
- false
- end
- if not ok
- errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
- end
- end
- ok
- end
-
- def ensure_no_collection_uuids_in_script_params
- # Fail validation if any script_parameters field includes a string containing a
- # collection uuid pattern.
- if self.script_parameters_changed?
- if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
- self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
- return false
- end
- end
- true
- end
-
- # recursive_hash_search searches recursively through hashes and
- # arrays in 'thing' for string fields matching regular expression
- # 'pattern'. Returns true if pattern is found, false otherwise.
- def recursive_hash_search thing, pattern
- if thing.is_a? Hash
- thing.each do |k, v|
- return true if recursive_hash_search v, pattern
- end
- elsif thing.is_a? Array
- thing.each do |k|
- return true if recursive_hash_search k, pattern
- end
- elsif thing.is_a? String
- return true if thing.match pattern
- end
- false
- end
-
- def create_disabled
- raise "Disabled"
- end
-
- def update_disabled
- raise "Disabled"
- end
-end
diff --git a/services/api/app/models/job_task.rb b/services/api/app/models/job_task.rb
deleted file mode 100644
index b181e76ccf..0000000000
--- a/services/api/app/models/job_task.rb
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class JobTask < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :parameters, Hash
- before_create :set_default_qsequence
- after_update :delete_created_job_tasks_if_failed
- before_create :create_disabled
- before_update :update_disabled
-
- api_accessible :user, extend: :common do |t|
- t.add :job_uuid
- t.add :created_by_job_task_uuid
- t.add :sequence
- t.add :qsequence
- t.add :parameters
- t.add :output
- t.add :progress
- t.add :success
- t.add :started_at
- t.add :finished_at
- end
-
- protected
-
- def delete_created_job_tasks_if_failed
- if self.success == false and self.success != self.success_was
- JobTask.delete_all ['created_by_job_task_uuid = ?', self.uuid]
- end
- end
-
- def set_default_qsequence
- self.qsequence ||= self.class.connection.
- select_value("SELECT nextval('job_tasks_qsequence_seq')")
- end
-
- def create_disabled
- raise "Disabled"
- end
-
- def update_disabled
- raise "Disabled"
- end
-end
diff --git a/services/api/app/models/keep_disk.rb b/services/api/app/models/keep_disk.rb
deleted file mode 100644
index 589936f845..0000000000
--- a/services/api/app/models/keep_disk.rb
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class KeepDisk < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- before_validation :ensure_ping_secret
-
- api_accessible :user, extend: :common do |t|
- t.add :node_uuid
- t.add :filesystem_uuid
- t.add :bytes_total
- t.add :bytes_free
- t.add :is_readable
- t.add :is_writable
- t.add :last_read_at
- t.add :last_write_at
- t.add :last_ping_at
- t.add :service_host
- t.add :service_port
- t.add :service_ssl_flag
- t.add :keep_service_uuid
- end
- api_accessible :superuser, :extend => :user do |t|
- t.add :ping_secret
- end
-
- def foreign_key_attributes
- super.reject { |a| a == "filesystem_uuid" }
- end
-
- def ping(o)
- raise "must have :service_host and :ping_secret" unless o[:service_host] and o[:ping_secret]
-
- if o[:ping_secret] != self.ping_secret
- logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.ping_secret}\""
- return nil
- end
-
- @bypass_arvados_authorization = true
- self.update!(o.select { |k,v|
- [:bytes_total,
- :bytes_free,
- :is_readable,
- :is_writable,
- :last_read_at,
- :last_write_at
- ].collect(&:to_s).index k
- }.merge(last_ping_at: db_current_time))
- end
-
- def service_host
- KeepService.find_by_uuid(self.keep_service_uuid).andand.service_host
- end
-
- def service_port
- KeepService.find_by_uuid(self.keep_service_uuid).andand.service_port
- end
-
- def service_ssl_flag
- KeepService.find_by_uuid(self.keep_service_uuid).andand.service_ssl_flag
- end
-
- protected
-
- def ensure_ping_secret
- self.ping_secret ||= rand(2**256).to_s(36)
- end
-
- def permission_to_update
- @bypass_arvados_authorization or super
- end
-
- def permission_to_create
- current_user and current_user.is_admin
- end
-end
diff --git a/services/api/app/models/link.rb b/services/api/app/models/link.rb
index 2eb6b88a0c..c7969d07ec 100644
--- a/services/api/app/models/link.rb
+++ b/services/api/app/models/link.rb
@@ -13,6 +13,7 @@ class Link < ArvadosModel
validate :name_links_are_obsolete
validate :permission_to_attach_to_objects
+ validate :validate_published_port, :if => Proc.new { link_class == 'published_port' }
before_update :restrict_alter_permissions
before_update :apply_max_overlapping_permissions
before_create :apply_max_overlapping_permissions
@@ -221,6 +222,12 @@ class Link < ArvadosModel
end
end
+ def validate_published_port
+ if head_uuid.length != 27 || head_uuid[6..10] != ContainerRequest.uuid_prefix
+ errors.add('head_uuid', 'must be a container request UUID')
+ end
+ end
+
# A user is permitted to create, update or modify a permission link
# if and only if they have "manage" permission on the object
# indicated by the permission link's head_uuid.
diff --git a/services/api/app/models/materialized_permission.rb b/services/api/app/models/materialized_permission.rb
deleted file mode 100644
index 24ba6737ae..0000000000
--- a/services/api/app/models/materialized_permission.rb
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class MaterializedPermission < ApplicationRecord
-end
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
deleted file mode 100644
index f384ba582b..0000000000
--- a/services/api/app/models/node.rb
+++ /dev/null
@@ -1,295 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'tempfile'
-
-class Node < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
-
- # Posgresql JSONB columns should NOT be declared as serialized, Rails 5
- # already know how to properly treat them.
- attribute :properties, :jsonbHash, default: {}
- attribute :info, :jsonbHash, default: {}
-
- before_validation :ensure_ping_secret
- after_update :dns_server_update
-
- # Only a controller can figure out whether or not the current API tokens
- # have access to the associated Job. They're expected to set
- # job_readable=true if the Job UUID can be included in the API response.
- belongs_to :job,
- foreign_key: 'job_uuid',
- primary_key: 'uuid',
- optional: true
- attr_accessor :job_readable
-
- UNUSED_NODE_IP = '127.40.4.0'
- MAX_VMS = 3
-
- api_accessible :user, :extend => :common do |t|
- t.add :hostname
- t.add :domain
- t.add :ip_address
- t.add :last_ping_at
- t.add :slot_number
- t.add :status
- t.add :api_job_uuid, as: :job_uuid
- t.add :crunch_worker_state
- t.add :properties
- end
- api_accessible :superuser, :extend => :user do |t|
- t.add :first_ping_at
- t.add :info
- t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers.keys }, :as => :nameservers
- end
-
- after_initialize do
- @bypass_arvados_authorization = false
- end
-
- def domain
- super || Rails.configuration.Containers.SLURM.Managed.ComputeNodeDomain
- end
-
- def api_job_uuid
- job_readable ? job_uuid : nil
- end
-
- def crunch_worker_state
- return 'down' if slot_number.nil?
- case self.info.andand['slurm_state']
- when 'alloc', 'comp', 'mix', 'drng'
- 'busy'
- when 'idle'
- 'idle'
- else
- 'down'
- end
- end
-
- def status
- if !self.last_ping_at
- if db_current_time - self.created_at > 5.minutes
- 'startup-fail'
- else
- 'pending'
- end
- elsif db_current_time - self.last_ping_at > 1.hours
- 'missing'
- else
- 'running'
- end
- end
-
- def ping(o)
- raise "must have :ip and :ping_secret" unless o[:ip] and o[:ping_secret]
-
- if o[:ping_secret] != self.info['ping_secret']
- logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.info['ping_secret']}\""
- raise ArvadosModel::UnauthorizedError.new("Incorrect ping_secret")
- end
-
- current_time = db_current_time
- self.last_ping_at = current_time
-
- @bypass_arvados_authorization = true
-
- # Record IP address
- if self.ip_address.nil?
- logger.info "#{self.uuid} ip_address= #{o[:ip]}"
- self.ip_address = o[:ip]
- self.first_ping_at = current_time
- end
-
- # Record instance ID if not already known
- if o[:ec2_instance_id]
- if !self.info['ec2_instance_id']
- self.info['ec2_instance_id'] = o[:ec2_instance_id]
- elsif self.info['ec2_instance_id'] != o[:ec2_instance_id]
- logger.debug "Multiple nodes have credentials for #{self.uuid}"
- raise "#{self.uuid} is already running at #{self.info['ec2_instance_id']} so rejecting ping from #{o[:ec2_instance_id]}"
- end
- end
-
- assign_slot
-
- # Record other basic stats
- ['total_cpu_cores', 'total_ram_mb', 'total_scratch_mb'].each do |key|
- if value = (o[key] or o[key.to_sym])
- self.properties[key] = value.to_i
- else
- self.properties.delete(key)
- end
- end
-
- save!
- end
-
- def assign_slot
- return if self.slot_number.andand > 0
- while true
- self.slot_number = self.class.available_slot_number
- if self.slot_number.nil?
- raise "No available node slots"
- end
- begin
- save!
- return assign_hostname
- rescue ActiveRecord::RecordNotUnique
- # try again
- end
- end
- end
-
- protected
-
- def assign_hostname
- if self.hostname.nil? and Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
- self.hostname = self.class.hostname_for_slot(self.slot_number)
- end
- end
-
- def self.available_slot_number
- # Join the sequence 1..max with the nodes table. Return the first
- # (i.e., smallest) value that doesn't match the slot_number of any
- # existing node.
- connection.exec_query('SELECT n FROM generate_series(1, $1) AS slot(n)
- LEFT JOIN nodes ON n=slot_number
- WHERE slot_number IS NULL
- LIMIT 1',
- # query label:
- 'Node.available_slot_number',
- # bind vars:
- [MAX_VMS],
- ).rows.first.andand.first
- end
-
- def ensure_ping_secret
- self.info['ping_secret'] ||= rand(2**256).to_s(36)
- end
-
- def dns_server_update
- if saved_change_to_ip_address? && ip_address
- Node.where('id != ? and ip_address = ?',
- id, ip_address).each do |stale_node|
- # One or more(!) stale node records have the same IP address
- # as the new node. Clear the ip_address field on the stale
- # nodes. Otherwise, we (via SLURM) might inadvertently connect
- # to the new node using the old node's hostname.
- stale_node.update!(ip_address: nil)
- end
- end
- if hostname_before_last_save && saved_change_to_hostname?
- self.class.dns_server_update(hostname_before_last_save, UNUSED_NODE_IP)
- end
- if hostname && (saved_change_to_hostname? || saved_change_to_ip_address?)
- self.class.dns_server_update(hostname, ip_address || UNUSED_NODE_IP)
- end
- end
-
- def self.dns_server_update hostname, ip_address
- ok = true
-
- ptr_domain = ip_address.
- split('.').reverse.join('.').concat('.in-addr.arpa')
-
- template_vars = {
- hostname: hostname,
- uuid_prefix: Rails.configuration.ClusterID,
- ip_address: ip_address,
- ptr_domain: ptr_domain,
- }
-
- if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
- !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty?)
- tmpfile = nil
- begin
- begin
- template = IO.read(Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate)
- rescue IOError, SystemCallError => e
- logger.error "Reading #{Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate}: #{e.message}"
- raise
- end
-
- hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
- Tempfile.open(["#{hostname}-", ".conf.tmp"],
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir) do |f|
- tmpfile = f.path
- f.puts template % template_vars
- end
- File.rename tmpfile, hostfile
- rescue IOError, SystemCallError => e
- logger.error "Writing #{hostfile}: #{e.message}"
- ok = false
- ensure
- if tmpfile and File.file? tmpfile
- # Cleanup remaining temporary file.
- File.unlink tmpfile
- end
- end
- end
-
- if !Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand.empty?
- cmd = Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand % template_vars
- if not system cmd
- logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
- ok = false
- end
- end
-
- if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
- !Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand.to_s.empty?)
- restartfile = File.join(Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, 'restart.txt')
- begin
- File.open(restartfile, 'w') do |f|
- # Typically, this is used to trigger a dns server restart
- f.puts Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand
- end
- rescue IOError, SystemCallError => e
- logger.error "Unable to write #{restartfile}: #{e.message}"
- ok = false
- end
- end
-
- ok
- end
-
- def self.hostname_for_slot(slot_number)
- config = Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
-
- return nil if !config
-
- sprintf(config, {:slot_number => slot_number})
- end
-
- # At startup, make sure all DNS entries exist. Otherwise, slurmctld
- # will refuse to start.
- if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
- !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty? and
- !Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname.empty?)
-
- (0..MAX_VMS-1).each do |slot_number|
- hostname = hostname_for_slot(slot_number)
- hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
- if !File.exist? hostfile
- n = Node.where(:slot_number => slot_number).first
- if n.nil? or n.ip_address.nil?
- dns_server_update(hostname, UNUSED_NODE_IP)
- else
- dns_server_update(hostname, n.ip_address)
- end
- end
- end
- end
-
- def permission_to_update
- @bypass_arvados_authorization or super
- end
-
- def permission_to_create
- current_user and current_user.is_admin
- end
-end
diff --git a/services/api/app/models/pipeline_instance.rb b/services/api/app/models/pipeline_instance.rb
deleted file mode 100644
index 0b0af8b87d..0000000000
--- a/services/api/app/models/pipeline_instance.rb
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class PipelineInstance < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :components, Hash
- serialize :properties, Hash
- serialize :components_summary, Hash
- belongs_to :pipeline_template,
- foreign_key: 'pipeline_template_uuid',
- primary_key: 'uuid',
- optional: true
-
- before_validation :bootstrap_components
- before_validation :update_state
- before_validation :verify_status
- before_validation :update_timestamps_when_state_changes
- before_create :set_state_before_save
- before_save :set_state_before_save
- before_create :create_disabled
- before_update :update_disabled
-
- api_accessible :user, extend: :common do |t|
- t.add :pipeline_template_uuid
- t.add :name
- t.add :components
- t.add :properties
- t.add :state
- t.add :components_summary
- t.add :description
- t.add :started_at
- t.add :finished_at
- end
-
- # Supported states for a pipeline instance
- States =
- [
- (New = 'New'),
- (Ready = 'Ready'),
- (RunningOnServer = 'RunningOnServer'),
- (RunningOnClient = 'RunningOnClient'),
- (Paused = 'Paused'),
- (Failed = 'Failed'),
- (Complete = 'Complete'),
- ]
-
- def self.limit_index_columns_read
- ["components"]
- end
-
- # if all components have input, the pipeline is Ready
- def components_look_ready?
- if !self.components || self.components.empty?
- return false
- end
-
- all_components_have_input = true
- self.components.each do |name, component|
- component['script_parameters'].andand.each do |parametername, parameter|
- parameter = { 'value' => parameter } unless parameter.is_a? Hash
- if parameter['value'].nil? and parameter['required']
- if parameter['output_of']
- next
- end
- all_components_have_input = false
- break
- end
- end
- end
- return all_components_have_input
- end
-
- def progress_table
- begin
- # v0 pipeline format
- nrow = -1
- components['steps'].collect do |step|
- nrow += 1
- row = [nrow, step['name']]
- if step['complete'] and step['complete'] != 0
- if step['output_data_locator']
- row << 1.0
- else
- row << 0.0
- end
- else
- row << 0.0
- if step['failed']
- self.state = Failed
- end
- end
- row << (step['warehousejob']['id'] rescue nil)
- row << (step['warehousejob']['revision'] rescue nil)
- row << step['output_data_locator']
- row << (Time.parse(step['warehousejob']['finishtime']) rescue nil)
- row
- end
- rescue
- []
- end
- end
-
- def progress_ratio
- t = progress_table
- return 0 if t.size < 1
- t.collect { |r| r[2] }.inject(0.0) { |sum,a| sum += a } / t.size
- end
-
- def self.queue
- self.where("state = 'RunningOnServer'")
- end
-
- def cancel(cascade: false, need_transaction: true)
- raise "No longer supported"
- end
-
- protected
- def bootstrap_components
- if pipeline_template and (!components or components.empty?)
- self.components = pipeline_template.components.deep_dup
- end
- end
-
- def update_state
- if components and progress_ratio == 1.0
- self.state = Complete
- end
- end
-
- def verify_status
- changed_attributes = self.changed
-
- if new_record? or 'components'.in? changed_attributes
- self.state ||= New
- if (self.state == New) and self.components_look_ready?
- self.state = Ready
- end
- end
-
- if !self.state.in?(States)
- errors.add :state, "'#{state.inspect} must be one of: [#{States.join ', '}]"
- throw(:abort)
- end
- end
-
- def set_state_before_save
- if self.components_look_ready? && (!self.state || self.state == New)
- self.state = Ready
- end
- end
-
- def update_timestamps_when_state_changes
- return if not (state_changed? or new_record?)
-
- case state
- when RunningOnServer, RunningOnClient
- self.started_at ||= db_current_time
- when Failed, Complete
- current_time = db_current_time
- self.started_at ||= current_time
- self.finished_at ||= current_time
- end
- end
-
-
- def create_disabled
- raise "Disabled"
- end
-
- def update_disabled
- raise "Disabled"
- end
-end
diff --git a/services/api/app/models/pipeline_template.rb b/services/api/app/models/pipeline_template.rb
deleted file mode 100644
index 7c694698e0..0000000000
--- a/services/api/app/models/pipeline_template.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class PipelineTemplate < ArvadosModel
- before_create :create_disabled
- before_update :update_disabled
-
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :components, Hash
-
- api_accessible :user, extend: :common do |t|
- t.add :name
- t.add :components
- t.add :description
- end
-
- def self.limit_index_columns_read
- ["components"]
- end
-
- def create_disabled
- raise "Disabled"
- end
-
- def update_disabled
- raise "Disabled"
- end
-end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
deleted file mode 100644
index 46f2de6ee4..0000000000
--- a/services/api/app/models/repository.rb
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Repository < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
-
- # Order is important here. We must validate the owner before we can
- # validate the name.
- validate :valid_owner
- validate :name_format, :if => Proc.new { |r| r.errors[:owner_uuid].empty? }
- validates(:name, uniqueness: true, allow_nil: false)
-
- api_accessible :user, extend: :common do |t|
- t.add :name
- t.add :fetch_url
- t.add :push_url
- t.add :clone_urls
- end
-
- def self.attributes_required_columns
- super.merge("clone_urls" => ["name"],
- "fetch_url" => ["name"],
- "push_url" => ["name"])
- end
-
- # Deprecated. Use clone_urls instead.
- def push_url
- ssh_clone_url
- end
-
- # Deprecated. Use clone_urls instead.
- def fetch_url
- ssh_clone_url
- end
-
- def clone_urls
- [ssh_clone_url, https_clone_url].compact
- end
-
- def server_path
- # Find where the repository is stored on the API server's filesystem,
- # and return that path, or nil if not found.
- # This method is only for the API server's internal use, and should not
- # be exposed through the public API. Following our current gitolite
- # setup, it searches for repositories stored by UUID, then name; and it
- # prefers bare repositories over checkouts.
- [["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
- [:uuid, :name].each do |path_attr|
- git_dir = File.join(Rails.configuration.Git.Repositories,
- repo_base % send(path_attr), *join_args)
- return git_dir if File.exist?(git_dir)
- end
- end
- nil
- end
-
- protected
-
- def permission_to_update
- if not super
- false
- elsif current_user.is_admin
- true
- elsif name_changed?
- current_user.uuid == owner_uuid
- else
- true
- end
- end
-
- def owner
- User.find_by_uuid(owner_uuid)
- end
-
- def valid_owner
- if owner.nil? or (owner.username.nil? and (owner.uuid != system_user_uuid))
- errors.add(:owner_uuid, "must refer to a user with a username")
- false
- end
- end
-
- def name_format
- if owner.uuid == system_user_uuid
- prefix_match = ""
- errmsg_start = "must be"
- else
- prefix_match = Regexp.escape(owner.username + "/")
- errmsg_start = "must be the owner's username, then '/', then"
- end
- if not (/^#{prefix_match}[A-Za-z][A-Za-z0-9]*$/.match(name))
- errors.add(:name,
- "#{errmsg_start} a letter followed by alphanumerics, expected pattern '#{prefix_match}[A-Za-z][A-Za-z0-9]*' but was '#{name}'")
- false
- end
- end
-
- def ssh_clone_url
- _clone_url Rails.configuration.Services.GitSSH.andand.ExternalURL, 'ssh://git@git.%s.arvadosapi.com'
- end
-
- def https_clone_url
- _clone_url Rails.configuration.Services.GitHTTP.andand.ExternalURL, 'https://git.%s.arvadosapi.com/'
- end
-
- def _clone_url config_var, default_base_fmt
- if not config_var
- return ""
- end
- prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
- if prefix == Rails.configuration.ClusterID and config_var != URI("")
- base = config_var
- else
- base = URI(default_base_fmt % prefix)
- end
- if base.path == ""
- base.path = "/"
- end
- if base.scheme == "ssh"
- '%s@%s:%s.git' % [base.user, base.host, name]
- else
- '%s%s.git' % [base, name]
- end
- end
-end
diff --git a/services/api/app/models/specimen.rb b/services/api/app/models/specimen.rb
deleted file mode 100644
index 32d5ed57f3..0000000000
--- a/services/api/app/models/specimen.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Specimen < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :properties, Hash
-
- api_accessible :user, extend: :common do |t|
- t.add :material
- t.add :properties
- end
-
- def properties
- @properties ||= Hash.new
- super
- end
-end
diff --git a/services/api/app/models/trait.rb b/services/api/app/models/trait.rb
deleted file mode 100644
index 2d3556b51d..0000000000
--- a/services/api/app/models/trait.rb
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-class Trait < ArvadosModel
- include HasUuid
- include KindAndEtag
- include CommonApiTemplate
- serialize :properties, Hash
-
- api_accessible :user, extend: :common do |t|
- t.add :name
- t.add :properties
- end
-end
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 5a95fb0b88..2892c9ec03 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -25,9 +25,6 @@ class User < ArvadosModel
before_update :prevent_privilege_escalation
before_update :prevent_inactive_admin
before_update :prevent_nonadmin_system_root
- before_update :verify_repositories_empty, :if => Proc.new {
- username.nil? and username_changed?
- }
after_update :setup_on_activate
before_create :check_auto_admin
@@ -49,16 +46,10 @@ class User < ArvadosModel
before_update :before_ownership_change
after_update :after_ownership_change
after_update :send_profile_created_notification
- after_update :sync_repository_names, :if => Proc.new {
- (uuid != system_user_uuid) and
- saved_change_to_username? and
- (not username_before_last_save.nil?)
- }
before_destroy :clear_permissions
after_destroy :remove_self_from_permissions
has_many :authorized_keys, foreign_key: 'authorized_user_uuid', primary_key: 'uuid'
- has_many :repositories, foreign_key: 'owner_uuid', primary_key: 'uuid'
default_scope { where('redirect_to_user_uuid is null') }
@@ -180,7 +171,7 @@ SELECT 1 FROM #{PERMISSION_VIEW}
def before_ownership_change
if owner_uuid_changed? and !self.owner_uuid_was.nil?
- MaterializedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all
+ ComputedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all
update_permissions self.owner_uuid_was, self.uuid, REVOKE_PERM
end
end
@@ -192,7 +183,7 @@ SELECT 1 FROM #{PERMISSION_VIEW}
end
def clear_permissions
- MaterializedPermission.where("user_uuid = ? and target_uuid != ?", uuid, uuid).delete_all
+ ComputedPermission.where("user_uuid = ? and target_uuid != ?", uuid, uuid).delete_all
end
def forget_cached_group_perms
@@ -200,7 +191,7 @@ SELECT 1 FROM #{PERMISSION_VIEW}
end
def remove_self_from_permissions
- MaterializedPermission.where("target_uuid = ?", uuid).delete_all
+ ComputedPermission.where("target_uuid = ?", uuid).delete_all
check_permissions_against_full_refresh
end
@@ -261,7 +252,7 @@ SELECT target_uuid, perm_level
end
# create links
- def setup(repo_name: nil, vm_uuid: nil, send_notification_email: nil)
+ def setup(vm_uuid: nil, send_notification_email: nil)
newly_invited = Link.where(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
link_class: 'permission').empty?
@@ -271,12 +262,6 @@ SELECT target_uuid, perm_level
# direction which makes this user visible to other users.
group_perms = add_to_all_users_group
- # Add git repo
- repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
- repo_name ||= "#{username}/#{username}"
- create_user_repo_link repo_name
- end
-
# Add virtual machine
if vm_uuid.nil? and !Rails.configuration.Users.AutoSetupNewUsersWithVmUUID.empty?
vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID
@@ -288,7 +273,7 @@ SELECT target_uuid, perm_level
# Send welcome email
if send_notification_email.nil?
- send_notification_email = Rails.configuration.Mail.SendUserSetupNotificationEmail
+ send_notification_email = Rails.configuration.Users.SendUserSetupNotificationEmail
end
if newly_invited and send_notification_email and !Rails.configuration.Users.UserSetupMailText.empty?
@@ -301,10 +286,10 @@ SELECT target_uuid, perm_level
forget_cached_group_perms
- return [repo_perm, vm_login_perm, *group_perms, self].compact
+ return [vm_login_perm, *group_perms, self].compact
end
- # delete user signatures, login, repo, and vm perms, and mark as inactive
+ # delete user signatures, login, and vm perms, and mark as inactive
def unsetup
if self.uuid == system_user_uuid
raise "System root user cannot be deactivated"
@@ -483,30 +468,13 @@ SELECT target_uuid, perm_level
klass.where(column => uuid).update_all(column => new_user.uuid)
end
- # Need to update repository names to new username
- if username
- old_repo_name_re = /^#{Regexp.escape(username)}\//
- Repository.where(:owner_uuid => uuid).each do |repo|
- repo.owner_uuid = new_user.uuid
- repo_name_sub = "#{new_user.username}/"
- name = repo.name.sub(old_repo_name_re, repo_name_sub)
- while (conflict = Repository.where(:name => name).first) != nil
- repo_name_sub += "migrated"
- name = repo.name.sub(old_repo_name_re, repo_name_sub)
- end
- repo.name = name
- repo.save!
- end
- end
-
# References to the merged user's "home project" are updated to
# point to new_owner_uuid.
ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
next if [ApiClientAuthorization,
AuthorizedKey,
Link,
- Log,
- Repository].include?(klass)
+ Log].include?(klass)
next if !klass.columns.collect(&:name).include?('owner_uuid')
klass.where(owner_uuid: uuid).update_all(owner_uuid: new_owner_uuid)
end
@@ -758,6 +726,7 @@ SELECT target_uuid, perm_level
super.merge(
'can_write' => ['owner_uuid', 'uuid'],
'can_manage' => ['owner_uuid', 'uuid'],
+ 'full_name' => ['first_name', 'last_name'],
)
end
@@ -889,24 +858,8 @@ SELECT target_uuid, perm_level
merged
end
- def create_user_repo_link(repo_name)
- # repo_name is optional
- if not repo_name
- logger.warn ("Repository name not given for #{self.uuid}.")
- return
- end
-
- repo = Repository.where(owner_uuid: uuid, name: repo_name).first_or_create!
- logger.info { "repo uuid: " + repo[:uuid] }
- repo_perm = Link.where(tail_uuid: uuid, head_uuid: repo.uuid,
- link_class: "permission",
- name: "can_manage").first_or_create!
- logger.info { "repo permission: " + repo_perm[:uuid] }
- return repo_perm
- end
-
# create login permission for the given vm_uuid, if it does not already exist
- def create_vm_login_permission_link(vm_uuid, repo_name)
+ def create_vm_login_permission_link(vm_uuid, username)
# vm uuid is optional
return if vm_uuid == ""
@@ -924,11 +877,11 @@ SELECT target_uuid, perm_level
login_perm = Link.
where(login_attrs).
- select { |link| link.properties["username"] == repo_name }.
+ select { |link| link.properties["username"] == username }.
first
login_perm ||= Link.
- create(login_attrs.merge(properties: {"username" => repo_name}))
+ create(login_attrs.merge(properties: {"username" => username}))
logger.info { "login permission: " + login_perm[:uuid] }
login_perm
@@ -1001,22 +954,6 @@ SELECT target_uuid, perm_level
end
end
- def verify_repositories_empty
- unless repositories.first.nil?
- errors.add(:username, "can't be unset when the user owns repositories")
- throw(:abort)
- end
- end
-
- def sync_repository_names
- old_name_re = /^#{Regexp.escape(username_before_last_save)}\//
- name_sub = "#{username}/"
- repositories.find_each do |repo|
- repo.name = repo.name.sub(old_name_re, name_sub)
- repo.save!
- end
- end
-
def identity_url_nil_if_empty
if identity_url == ""
self.identity_url = nil
diff --git a/services/api/app/models/workflow.rb b/services/api/app/models/workflow.rb
index 0268c4e979..b7746a2f4f 100644
--- a/services/api/app/models/workflow.rb
+++ b/services/api/app/models/workflow.rb
@@ -8,12 +8,15 @@ class Workflow < ArvadosModel
include CommonApiTemplate
validate :validate_definition
+ validate :validate_collection_uuid
before_save :set_name_and_description
+ before_save :link_with_collection
api_accessible :user, extend: :common do |t|
t.add :name
t.add :description
t.add :definition
+ t.add :collection_uuid
end
def validate_definition
@@ -24,6 +27,21 @@ class Workflow < ArvadosModel
end
end
+ def validate_collection_uuid
+ return if !collection_uuid_changed?
+
+ c = Collection.
+ readable_by(current_user).
+ find_by_uuid(collection_uuid)
+ if !c
+ errors.add :collection_uuid, "does not exist or do not have permission to read."
+ end
+
+ if c.properties["type"] != "workflow"
+ errors.add :collection_uuid, "properties does not have type: workflow"
+ end
+ end
+
def set_name_and_description
old_wf = {}
begin
@@ -45,10 +63,17 @@ class Workflow < ArvadosModel
end
def self.full_text_searchable_columns
- super - ["definition"]
+ super - ["definition", "collection_uuid"]
+ end
+
+ def link_with_collection
+ return if collection_uuid.nil? || !collection_uuid_changed?
+ Collection.find_by_uuid(collection_uuid).update_linked_workflows([self], false)
end
- def self.limit_index_columns_read
- ["definition"]
+ def self.readable_by(*users_list)
+ return super if users_list.select { |u| u.is_a?(User) && u.is_admin }.any?
+ super.where(collection_uuid: nil).or(where(Collection.readable_by(*users_list).where("collections.uuid = workflows.collection_uuid").arel.exists))
end
+
end
diff --git a/services/api/arvados-railsapi.service b/services/api/arvados-railsapi.service
new file mode 100644
index 0000000000..b1f26cf6b7
--- /dev/null
+++ b/services/api/arvados-railsapi.service
@@ -0,0 +1,66 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados API server Rails backend
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+StartLimitIntervalSec=0
+
+[Install]
+WantedBy=multi-user.target
+
+[Service]
+# It would be nicer to write a Type=simple process, but then Passenger
+# duplicates a lot of logs to stdout.
+Type=forking
+PIDFile=%t/%N/passenger.pid
+EnvironmentFile=-/etc/arvados/environment
+# Passenger web server settings come from (highest precedence first):
+# 1. Command line options
+# 2. PASSENGER_* environment variables
+# 3. /var/www/arvados-api/current/Passengerfile.json
+# You can change or add settings for this unit by running
+# `systemctl edit arvados-railsapi.service`.
+# Refer to the Passenger standalone configuration reference at
+#
+# for more information about options.
+Environment=PASSENGER_ADDRESS=localhost
+Environment=PASSENGER_ENVIRONMENT=production
+Environment=PASSENGER_LOG_FILE=log/production.log
+Environment=PASSENGER_PORT=8004
+WorkingDirectory=/var/www/arvados-api/current
+ExecStartPre=+/bin/install -d log tmp
+ExecStartPre=+/bin/chmod g+srwx log tmp
+ExecStartPre=+-/bin/chmod g+rw ${PASSENGER_LOG_FILE}
+# Note that `bundle exec` lines should have overrides from the package that
+# use specific versions of `bundle` and `passenger`.
+ExecStart=/usr/bin/bundle exec passenger start --daemonize --pid-file %t/%N/passenger.pid
+ExecStop=/usr/bin/bundle exec passenger stop --pid-file %t/%N/passenger.pid
+ExecReload=/usr/bin/bundle exec passenger-config reopen-logs
+Restart=always
+RestartSec=1
+
+ReadWritePaths=/var/www/arvados-api/current/log
+ReadWritePaths=/var/www/arvados-api/current/tmp
+ReadWritePaths=/var/www/arvados-api/shared/log
+RuntimeDirectory=%N
+
+DynamicUser=true
+PrivateTmp=true
+ProtectControlGroups=true
+ProtectHome=true
+ProtectSystem=strict
+
+LockPersonality=true
+NoNewPrivileges=true
+MemoryDenyWriteExecute=true
+PrivateDevices=true
+ProtectKernelModules=true
+ProtectKernelTunables=true
+RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK
+RestrictNamespaces=true
+RestrictRealtime=true
+SystemCallFilter=@system-service
diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb
index f8b9ff8ecd..19a5dc8776 100644
--- a/services/api/config/arvados_config.rb
+++ b/services/api/config/arvados_config.rb
@@ -84,7 +84,6 @@ arvcfg = ConfigLoader.new
arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
arvcfg.declare_config "ManagementToken", String, :ManagementToken
arvcfg.declare_config "SystemRootToken", String
-arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
arvcfg.declare_config "API.DisabledAPIs", Hash, :disable_api_methods, ->(cfg, k, v) { arrayToHash cfg, "API.DisabledAPIs", v }
arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
@@ -94,7 +93,6 @@ arvcfg.declare_config "API.RequestTimeout", ActiveSupport::Duration
arvcfg.declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
-arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Hash, :auto_setup_name_blacklist, ->(cfg, k, v) { arrayToHash cfg, "Users.AutoSetupUsernameBlacklist", v }
arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
@@ -132,33 +130,15 @@ arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_defa
arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
arvcfg.declare_config "Containers.AlwaysUsePreemptibleInstances", Boolean, :preemptible_instances
-arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
-arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
-arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
-arvcfg.declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
-arvcfg.declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
-arvcfg.declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
-arvcfg.declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
arvcfg.declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
arvcfg.declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
-arvcfg.declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
-arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
-arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
-arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
-arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
-arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
-arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Hash, :compute_node_nameservers, ->(cfg, k, v) { arrayToHash cfg, "Containers.SLURM.Managed.ComputeNodeNameservers", v }
-arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
-arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
-arvcfg.declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
-arvcfg.declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
-arvcfg.declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+arvcfg.declare_config "Services.ContainerWebServices.ExternalURL", URI
+arvcfg.declare_config "Services.ContainerWebServices.ExternalPortMin", Integer
+arvcfg.declare_config "Services.ContainerWebServices.ExternalPortMax", Integer
arvcfg.declare_config "Services.Controller.ExternalURL", URI
arvcfg.declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
arvcfg.declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
-arvcfg.declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
-arvcfg.declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
arvcfg.declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
h = if cfg["RemoteClusters"] then
cfg["RemoteClusters"].deep_dup
@@ -258,14 +238,6 @@ if default_storage_classes.length == 0
end
$arvados_config["DefaultStorageClasses"] = default_storage_classes.sort
-#
-# Special case for test database where there's no database.yml,
-# because the Arvados config.yml doesn't have a concept of multiple
-# rails environments.
-#
-if ::Rails.env.to_s == "test" && db_config["test"].nil?
- $arvados_config["PostgreSQL"]["Connection"]["dbname"] = "arvados_test"
-end
if ::Rails.env.to_s == "test"
# Use template0 when creating a new database. Avoids
# character-encoding/collation problems.
@@ -301,9 +273,9 @@ end
database_url = "postgresql://#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["user"]}:"+
"#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["password"]}@"+
"#{dbhost}/#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["dbname"]}?"+
- "template=#{$arvados_config["PostgreSQL"]["Connection"]["template"]}&"+
- "encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
- "collation=#{$arvados_config["PostgreSQL"]["Connection"]["collation"]}&"+
+ "template=#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["template"].to_s}&"+
+ "encoding=#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["client_encoding"].to_s}&"+
+ "collation=#{CGI.escape $arvados_config["PostgreSQL"]["Connection"]["collation"].to_s}&"+
"pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
ENV["DATABASE_URL"] = database_url
@@ -319,5 +291,5 @@ Server::Application.configure do
# We don't rely on cookies for authentication, so instead of
# requiring a signing key in config, we assign a new random one at
# startup.
- secrets.secret_key_base = rand(1<<255).to_s(36)
+ credentials.secret_key_base = rand(1<<255).to_s(36)
end
diff --git a/services/api/config/boot.rb b/services/api/config/boot.rb
index 282011619d..5841684dc3 100644
--- a/services/api/config/boot.rb
+++ b/services/api/config/boot.rb
@@ -1,3 +1,14 @@
ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__)
+# Setting an environment variable before loading rack is the only way
+# to change rack's request size limit for an urlencoded POST body.
+# Rack::QueryParser accepts an initialization argument to override the
+# default, but rack only ever uses its global default_parser, and
+# there is no facility for overriding that at runtime.
+#
+# Our strategy is to rely on the more configurable downstream servers
+# (Nginx and arvados-controller) to reject oversized requests before
+# they hit this server at all.
+ENV["RACK_QUERY_PARSER_BYTESIZE_LIMIT"] = (4 << 30).to_s
+
require "bundler/setup" # Set up gems listed in the Gemfile.
diff --git a/services/api/config/initializers/legacy_jobs_api.rb b/services/api/config/initializers/legacy_jobs_api.rb
deleted file mode 100644
index b6a2895f78..0000000000
--- a/services/api/config/initializers/legacy_jobs_api.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Config must be done before we files; otherwise they
-# won't be able to use Rails.configuration.* to initialize their
-# classes.
-
-require 'enable_jobs_api'
-
-Rails.application.configure do
- begin
- if ENV["ARVADOS_CONFIG"] != "none" && ActiveRecord::Base.connection.tables.include?('jobs')
- check_enable_legacy_jobs_api
- end
- rescue ActiveRecord::NoDatabaseError
- # Since rails 5.2, all initializers are run by rake tasks (like db:create),
- # see: https://github.com/rails/rails/issues/32870
- end
-end
diff --git a/services/api/config/routes.rb b/services/api/config/routes.rb
index b87e86f664..4f90a33be1 100644
--- a/services/api/config/routes.rb
+++ b/services/api/config/routes.rb
@@ -3,8 +3,6 @@
# SPDX-License-Identifier: AGPL-3.0
Rails.application.routes.draw do
- themes_for_rails
-
# OPTIONS requests are not allowed at routes that use cookies.
['/auth/*a', '/login', '/logout'].each do |nono|
match nono, to: 'user_sessions#cross_origin_forbidden', via: 'OPTIONS'
@@ -19,7 +17,6 @@ Rails.application.routes.draw do
post 'create_system_auth', on: :collection
get 'current', on: :collection
end
- resources :api_clients
resources :authorized_keys
resources :collections do
get 'provenance', on: :member
@@ -34,8 +31,6 @@ Rails.application.routes.draw do
post 'trash', on: :member
post 'untrash', on: :member
end
- resources :humans
- resources :job_tasks
resources :containers do
get 'auth', on: :member
post 'lock', on: :member
@@ -47,33 +42,11 @@ Rails.application.routes.draw do
resources :container_requests do
get 'container_status', on: :member
end
- resources :jobs do
- get 'queue', on: :collection
- get 'queue_size', on: :collection
- post 'cancel', on: :member
- post 'lock', on: :member
- end
- resources :keep_disks do
- post 'ping', on: :collection
- end
resources :keep_services do
get 'accessible', on: :collection
end
resources :links
resources :logs
- resources :nodes do
- post 'ping', on: :member
- end
- resources :pipeline_instances do
- post 'cancel', on: :member
- end
- resources :pipeline_templates
- resources :workflows
- resources :repositories do
- get 'get_all_permissions', on: :collection
- end
- resources :specimens
- resources :traits
resources :user_agreements do
get 'signatures', on: :collection
post 'sign', on: :collection
@@ -91,6 +64,11 @@ Rails.application.routes.draw do
get 'logins', on: :member
get 'get_all_logins', on: :collection
end
+ resources :workflows
+ resources :credentials do
+ get 'secret', on: :member
+ end
+ get '/computed_permissions', to: 'computed_permissions#index'
get '/permissions/:uuid', to: 'links#get_permissions'
end
end
diff --git a/services/api/db/migrate/20221230155924_bigint_id.rb b/services/api/db/migrate/20221230155924_bigint_id.rb
index 932cb025dc..20791ae017 100644
--- a/services/api/db/migrate/20221230155924_bigint_id.rb
+++ b/services/api/db/migrate/20221230155924_bigint_id.rb
@@ -5,9 +5,6 @@
class BigintId < ActiveRecord::Migration[5.2]
disable_ddl_transaction!
def up
- old_value = query_value('SHOW statement_timeout')
- execute "SET statement_timeout TO '0'"
-
change_column :api_client_authorizations, :id, :bigint
change_column :api_client_authorizations, :api_client_id, :bigint
change_column :api_client_authorizations, :user_id, :bigint
@@ -33,8 +30,6 @@ class BigintId < ActiveRecord::Migration[5.2]
change_column :traits, :id, :bigint
change_column :virtual_machines, :id, :bigint
change_column :workflows, :id, :bigint
-
- execute "SET statement_timeout TO #{quote(old_value)}"
end
def down
diff --git a/services/api/db/migrate/20240329173437_add_output_glob_to_containers.rb b/services/api/db/migrate/20240329173437_add_output_glob_to_containers.rb
new file mode 100644
index 0000000000..481cad123f
--- /dev/null
+++ b/services/api/db/migrate/20240329173437_add_output_glob_to_containers.rb
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputGlobToContainers < ActiveRecord::Migration[7.0]
+ def change
+ add_column :containers, :output_glob, :text, default: '[]'
+ add_column :container_requests, :output_glob, :text, default: '[]'
+ end
+end
diff --git a/services/api/db/migrate/20240402162733_add_output_glob_index_to_containers.rb b/services/api/db/migrate/20240402162733_add_output_glob_index_to_containers.rb
new file mode 100644
index 0000000000..6769601576
--- /dev/null
+++ b/services/api/db/migrate/20240402162733_add_output_glob_index_to_containers.rb
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputGlobIndexToContainers < ActiveRecord::Migration[4.2]
+ def up
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, md5(output_glob), container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'
+ end
+end
diff --git a/services/api/db/migrate/20240604183200_exclude_uuids_and_hashes_from_text_search.rb b/services/api/db/migrate/20240604183200_exclude_uuids_and_hashes_from_text_search.rb
new file mode 100644
index 0000000000..e9f4374029
--- /dev/null
+++ b/services/api/db/migrate/20240604183200_exclude_uuids_and_hashes_from_text_search.rb
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ExcludeUuidsAndHashesFromTextSearch < ActiveRecord::Migration[7.0]
+ def trgm_indexes
+ [
+ # Table name, index name, pre-migration full_text_trgm
+ ["collections", "collections_trgm_text_search_idx", "(coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(portable_data_hash,'') || ' ' || coalesce(uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(properties::text,'') || ' ' || coalesce(file_names,''))"],
+ # container_requests handled by 20240820202230_exclude_container_image_from_text_search.rb
+ ["groups", "groups_trgm_text_search_idx", "(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(group_class,'') || ' ' || coalesce(properties::text,''))"],
+ ["workflows", "workflows_trgm_text_search_idx", "(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,''))"],
+ ]
+ end
+
+ def up
+ trgm_indexes.each do |model, indx, _|
+ execute "DROP INDEX IF EXISTS #{indx}"
+ execute "CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)"
+ end
+ end
+
+ def down
+ trgm_indexes.each do |model, indx, full_text_trgm|
+ execute "DROP INDEX IF EXISTS #{indx}"
+ execute "CREATE INDEX #{indx} ON #{model} USING gin((#{full_text_trgm}) gin_trgm_ops)"
+ end
+ end
+end
diff --git a/services/api/db/migrate/20240618121312_create_uuid_locks.rb b/services/api/db/migrate/20240618121312_create_uuid_locks.rb
new file mode 100644
index 0000000000..3c9c1c195c
--- /dev/null
+++ b/services/api/db/migrate/20240618121312_create_uuid_locks.rb
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateUuidLocks < ActiveRecord::Migration[7.0]
+ def change
+ create_table :uuid_locks, id: false do |t|
+ t.string :uuid, null: false, index: {unique: true}
+ t.integer :n, null: false, default: 0
+ end
+ end
+end
diff --git a/services/api/db/migrate/20240627201747_set_default_api_client_id.rb b/services/api/db/migrate/20240627201747_set_default_api_client_id.rb
new file mode 100644
index 0000000000..f828cae296
--- /dev/null
+++ b/services/api/db/migrate/20240627201747_set_default_api_client_id.rb
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SetDefaultApiClientId < ActiveRecord::Migration[7.0]
+ def change
+ ActiveRecord::Base.connection.execute 'ALTER TABLE api_client_authorizations ALTER COLUMN api_client_id SET DEFAULT 0'
+ end
+end
diff --git a/services/api/db/migrate/20240820202230_exclude_container_image_from_text_search.rb b/services/api/db/migrate/20240820202230_exclude_container_image_from_text_search.rb
new file mode 100644
index 0000000000..625aa5cabf
--- /dev/null
+++ b/services/api/db/migrate/20240820202230_exclude_container_image_from_text_search.rb
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ExcludeContainerImageFromTextSearch < ActiveRecord::Migration[7.0]
+ def trgm_indexes
+ [
+ # Table name, index name, pre-migration full_text_trgm
+ ["container_requests", "container_requests_trgm_text_search_idx", "(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(properties::text,'') || ' ' || coalesce(state,'') || ' ' || coalesce(requesting_container_uuid,'') || ' ' || coalesce(container_uuid,'') || ' ' || coalesce(runtime_constraints::text,'') || ' ' || coalesce(container_image,'') || ' ' || coalesce(environment::text,'') || ' ' || coalesce(cwd,'') || ' ' || coalesce(command::text,'') || ' ' || coalesce(output_path,'') || ' ' || coalesce(filters,'') || ' ' || coalesce(scheduling_parameters::text,'') || ' ' || coalesce(output_uuid,'') || ' ' || coalesce(log_uuid,'') || ' ' || coalesce(output_name,'') || ' ' || coalesce(output_properties::text,''))"],
+ ]
+ end
+
+ def up
+ trgm_indexes.each do |model, indx, _|
+ execute "DROP INDEX IF EXISTS #{indx}"
+ execute "CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)"
+ end
+ end
+
+ def down
+ trgm_indexes.each do |model, indx, full_text_trgm|
+ execute "DROP INDEX IF EXISTS #{indx}"
+ execute "CREATE INDEX #{indx} ON #{model} USING gin((#{full_text_trgm}) gin_trgm_ops)"
+ end
+ end
+end
diff --git a/services/api/db/migrate/20241118110000_index_on_container_request_name.rb b/services/api/db/migrate/20241118110000_index_on_container_request_name.rb
new file mode 100644
index 0000000000..c739269ef3
--- /dev/null
+++ b/services/api/db/migrate/20241118110000_index_on_container_request_name.rb
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class IndexOnContainerRequestName < ActiveRecord::Migration[7.0]
+ def up
+ add_index :container_requests, ["name", "owner_uuid"]
+ end
+
+ def down
+ remove_index :container_requests, ["name", "owner_uuid"]
+ end
+end
diff --git a/services/api/db/migrate/20250115145250_drop_fts_index_again.rb b/services/api/db/migrate/20250115145250_drop_fts_index_again.rb
new file mode 100644
index 0000000000..ee4da68864
--- /dev/null
+++ b/services/api/db/migrate/20250115145250_drop_fts_index_again.rb
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Fulltext search indexes were removed in
+# 7f4d69cf43a7a743a491105665b3b878a3cfd11c (#15430), but then for no
+# apparent reason dcdf385b2852acf95f41e2340d07cd68cb34e371 (#12430)
+# re-added the FTS index for container_requests.
+class DropFtsIndexAgain < ActiveRecord::Migration[7.0]
+ def up
+ execute "DROP INDEX IF EXISTS container_requests_full_text_search_idx"
+ end
+
+ def down
+ # No-op because the index was not used by prior versions either.
+ end
+end
diff --git a/services/api/db/migrate/20250312141843_add_refreshes_at_to_api_client_authorizations.rb b/services/api/db/migrate/20250312141843_add_refreshes_at_to_api_client_authorizations.rb
new file mode 100644
index 0000000000..b9fc218014
--- /dev/null
+++ b/services/api/db/migrate/20250312141843_add_refreshes_at_to_api_client_authorizations.rb
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRefreshesAtToApiClientAuthorizations < ActiveRecord::Migration[7.1]
+ def change
+ add_column :api_client_authorizations, :refreshes_at, :timestamp, null: true
+ add_index :api_client_authorizations, :refreshes_at
+ end
+end
diff --git a/services/api/db/migrate/20250315222222_add_services_and_published_ports.rb b/services/api/db/migrate/20250315222222_add_services_and_published_ports.rb
new file mode 100644
index 0000000000..a1b7ff10e5
--- /dev/null
+++ b/services/api/db/migrate/20250315222222_add_services_and_published_ports.rb
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddServicesAndPublishedPorts < ActiveRecord::Migration[7.1]
+ def change
+ add_column :containers, :service, :boolean, null: false, :default => false
+ add_column :container_requests, :service, :boolean, null: false, :default => false
+
+ add_column :containers, :published_ports, :jsonb, :default => {}
+ add_column :container_requests, :published_ports, :jsonb, :default => {}
+
+ add_index :links, :name, :where => "link_class = 'published_port'", :unique => true
+ end
+end
diff --git a/services/api/db/migrate/20250402131700_add_collection_uuid_to_workflows.rb b/services/api/db/migrate/20250402131700_add_collection_uuid_to_workflows.rb
new file mode 100644
index 0000000000..f6cb1714de
--- /dev/null
+++ b/services/api/db/migrate/20250402131700_add_collection_uuid_to_workflows.rb
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCollectionUuidToWorkflows < ActiveRecord::Migration[7.1]
+
+ def up
+ remove_index :workflows, name: 'workflows_search_idx'
+ add_column :workflows, :collection_uuid, :string, null: true
+ add_index :workflows, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "collection_uuid"], name: 'workflows_search_index'
+ end
+
+ def down
+ remove_index :workflows, name: 'workflows_search_index'
+ remove_column :workflows, :collection_uuid
+ add_index :workflows, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"], name: 'workflows_search_idx'
+ end
+
+end
diff --git a/services/api/db/migrate/20250422103000_create_credentials_table.rb b/services/api/db/migrate/20250422103000_create_credentials_table.rb
new file mode 100644
index 0000000000..ccb96ca5d0
--- /dev/null
+++ b/services/api/db/migrate/20250422103000_create_credentials_table.rb
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateCredentialsTable < ActiveRecord::Migration[7.1]
+ def change
+ create_table :credentials, :id => :string, :primary_key => :uuid do |t|
+ t.string :owner_uuid, :null => false
+ t.datetime :created_at, :null => false
+ t.datetime :modified_at, :null => false
+ t.string :modified_by_user_uuid
+ t.string :name
+ t.text :description
+ t.string :credential_class
+ t.jsonb :scopes, :default => []
+ t.string :external_id
+ t.text :secret
+ t.datetime :expires_at, :null => false
+ end
+ add_index :credentials, :uuid, unique: true
+ add_index :credentials, :owner_uuid
+ add_index :credentials, [:owner_uuid, :name], unique: true
+ add_index :credentials, [:uuid, :owner_uuid, :modified_by_user_uuid, :name, :credential_class, :external_id]
+ end
+end
diff --git a/services/api/db/migrate/20250426201300_priority_update_check_trash_at.rb b/services/api/db/migrate/20250426201300_priority_update_check_trash_at.rb
new file mode 100644
index 0000000000..2e6908e6bb
--- /dev/null
+++ b/services/api/db/migrate/20250426201300_priority_update_check_trash_at.rb
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PriorityUpdateCheckTrashAt < ActiveRecord::Migration[7.1]
+ def up
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint
+ LANGUAGE sql
+ AS $$
+/* Determine the priority of an individual container.
+ The "inherited" priority comes from the path we followed from the root, the parent container
+ priority hasn't been updated in the table yet but we need to behave it like it has been.
+*/
+select coalesce(max(case when containers.uuid = inherited_from then inherited
+ when containers.priority is not NULL then containers.priority
+ else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint
+ end), 0) from
+ container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid
+ where container_requests.container_uuid = for_container_uuid and
+ container_requests.state = 'Committed' and
+ container_requests.priority > 0 and
+ container_requests.owner_uuid not in (select group_uuid from trashed_groups WHERE trash_at <= statement_timestamp());
+$$;
+}
+ end
+
+ def down
+ end
+end
diff --git a/services/api/db/migrate/20250527181323_add_container_ports.rb b/services/api/db/migrate/20250527181323_add_container_ports.rb
new file mode 100644
index 0000000000..435c972b5b
--- /dev/null
+++ b/services/api/db/migrate/20250527181323_add_container_ports.rb
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddContainerPorts < ActiveRecord::Migration[7.1]
+ def change
+ create_table :container_ports, :id => false do |t|
+ t.integer :external_port, :null => false
+ t.integer :container_port, :null => false
+ t.string :container_uuid, :null => false
+ end
+ add_index :container_ports, :external_port, unique: true
+ add_index :container_ports, :container_uuid
+ end
+end
diff --git a/services/api/db/seeds.rb b/services/api/db/seeds.rb
index b40bd4d0ed..e32f099215 100644
--- a/services/api/db/seeds.rb
+++ b/services/api/db/seeds.rb
@@ -2,7 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
-# This file seeds the database with initial/default values.
+# This file seeds the database with initial/default values if needed.
+# It is safe to re-run on an existing database.
#
# It is invoked by `rake db:seed` and `rake db:setup`.
diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql
index c0d4263d97..f48d9ad5a9 100644
--- a/services/api/db/structure.sql
+++ b/services/api/db/structure.sql
@@ -209,7 +209,7 @@ select coalesce(max(case when containers.uuid = inherited_from then inherited
where container_requests.container_uuid = for_container_uuid and
container_requests.state = 'Committed' and
container_requests.priority > 0 and
- container_requests.owner_uuid not in (select group_uuid from trashed_groups);
+ container_requests.owner_uuid not in (select group_uuid from trashed_groups WHERE trash_at <= statement_timestamp());
$$;
@@ -340,7 +340,7 @@ $$;
SET default_tablespace = '';
-SET default_with_oids = false;
+SET default_table_access_method = heap;
--
-- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -
@@ -349,7 +349,7 @@ SET default_with_oids = false;
CREATE TABLE public.api_client_authorizations (
id bigint NOT NULL,
api_token character varying(255) NOT NULL,
- api_client_id bigint NOT NULL,
+ api_client_id bigint DEFAULT 0 NOT NULL,
user_id bigint NOT NULL,
created_by_ip_address character varying(255),
last_used_by_ip_address character varying(255),
@@ -359,7 +359,8 @@ CREATE TABLE public.api_client_authorizations (
updated_at timestamp without time zone NOT NULL,
default_owner_uuid character varying(255),
scopes text DEFAULT '["all"]'::text,
- uuid character varying(255) NOT NULL
+ uuid character varying(255) NOT NULL,
+ refreshes_at timestamp without time zone
);
@@ -527,6 +528,17 @@ CREATE SEQUENCE public.collections_id_seq
ALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;
+--
+-- Name: container_ports; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.container_ports (
+ external_port integer NOT NULL,
+ container_port integer NOT NULL,
+ container_uuid character varying NOT NULL
+);
+
+
--
-- Name: container_requests; Type: TABLE; Schema: public; Owner: -
--
@@ -568,7 +580,10 @@ CREATE TABLE public.container_requests (
runtime_token text,
output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
output_properties jsonb DEFAULT '{}'::jsonb,
- cumulative_cost double precision DEFAULT 0.0 NOT NULL
+ cumulative_cost double precision DEFAULT 0.0 NOT NULL,
+ output_glob text DEFAULT '[]'::text,
+ service boolean DEFAULT false NOT NULL,
+ published_ports jsonb DEFAULT '{}'::jsonb
);
@@ -634,7 +649,10 @@ CREATE TABLE public.containers (
output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
output_properties jsonb DEFAULT '{}'::jsonb,
cost double precision DEFAULT 0.0 NOT NULL,
- subrequests_cost double precision DEFAULT 0.0 NOT NULL
+ subrequests_cost double precision DEFAULT 0.0 NOT NULL,
+ output_glob text DEFAULT '[]'::text,
+ service boolean DEFAULT false NOT NULL,
+ published_ports jsonb DEFAULT '{}'::jsonb
);
@@ -657,6 +675,26 @@ CREATE SEQUENCE public.containers_id_seq
ALTER SEQUENCE public.containers_id_seq OWNED BY public.containers.id;
+--
+-- Name: credentials; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.credentials (
+ uuid character varying NOT NULL,
+ owner_uuid character varying NOT NULL,
+ created_at timestamp(6) without time zone NOT NULL,
+ modified_at timestamp(6) without time zone NOT NULL,
+ modified_by_user_uuid character varying,
+ name character varying,
+ description text,
+ credential_class character varying,
+ scopes jsonb DEFAULT '[]'::jsonb,
+ external_id character varying,
+ secret text,
+ expires_at timestamp(6) without time zone NOT NULL
+);
+
+
--
-- Name: frozen_groups; Type: TABLE; Schema: public; Owner: -
--
@@ -1386,6 +1424,16 @@ CREATE SEQUENCE public.users_id_seq
ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
+--
+-- Name: uuid_locks; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.uuid_locks (
+ uuid character varying NOT NULL,
+ n integer DEFAULT 0 NOT NULL
+);
+
+
--
-- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -
--
@@ -1437,7 +1485,8 @@ CREATE TABLE public.workflows (
name character varying(255),
description text,
definition text,
- updated_at timestamp without time zone NOT NULL
+ updated_at timestamp without time zone NOT NULL,
+ collection_uuid character varying
);
@@ -1677,6 +1726,14 @@ ALTER TABLE ONLY public.containers
ADD CONSTRAINT containers_pkey PRIMARY KEY (id);
+--
+-- Name: credentials credentials_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.credentials
+ ADD CONSTRAINT credentials_pkey PRIMARY KEY (uuid);
+
+
--
-- Name: groups groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@@ -1852,7 +1909,7 @@ CREATE INDEX collections_search_index ON public.collections USING btree (owner_u
-- Name: collections_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX collections_trgm_text_search_idx ON public.collections USING gin (((((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, ''::text))) public.gin_trgm_ops);
+CREATE INDEX collections_trgm_text_search_idx ON public.collections USING gin (((((((((COALESCE(name, ''::character varying))::text || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, ''::text))) public.gin_trgm_ops);
--
@@ -1873,7 +1930,7 @@ CREATE INDEX container_requests_search_index ON public.container_requests USING
-- Name: container_requests_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX container_requests_trgm_text_search_idx ON public.container_requests USING gin (((((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text) || ' '::text) || COALESCE((output_properties)::text, ''::text))) public.gin_trgm_ops);
+CREATE INDEX container_requests_trgm_text_search_idx ON public.container_requests USING gin (((((((((((((((((((((((((((COALESCE(name, ''::character varying))::text || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_name, ''::character varying))::text) || ' '::text) || COALESCE((output_properties)::text, ''::text))) public.gin_trgm_ops);
--
@@ -1901,7 +1958,7 @@ CREATE INDEX groups_search_index ON public.groups USING btree (uuid, owner_uuid,
-- Name: groups_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX groups_trgm_text_search_idx ON public.groups USING gin (((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))) public.gin_trgm_ops);
+CREATE INDEX groups_trgm_text_search_idx ON public.groups USING gin (((((((((COALESCE(name, ''::character varying))::text || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))) public.gin_trgm_ops);
--
@@ -1911,6 +1968,13 @@ CREATE INDEX groups_trgm_text_search_idx ON public.groups USING gin ((((((((((((
CREATE INDEX humans_search_index ON public.humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
+--
+-- Name: idx_on_uuid_owner_uuid_modified_by_user_uuid_name_c_8f8cf5e570; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX idx_on_uuid_owner_uuid_modified_by_user_uuid_name_c_8f8cf5e570 ON public.credentials USING btree (uuid, owner_uuid, modified_by_user_uuid, name, credential_class, external_id);
+
+
--
-- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -
--
@@ -1932,6 +1996,13 @@ CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON public.api_c
CREATE INDEX index_api_client_authorizations_on_expires_at ON public.api_client_authorizations USING btree (expires_at);
+--
+-- Name: index_api_client_authorizations_on_refreshes_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_client_authorizations_on_refreshes_at ON public.api_client_authorizations USING btree (refreshes_at);
+
+
--
-- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -
--
@@ -2079,6 +2150,20 @@ CREATE INDEX index_collections_on_trash_at ON public.collections USING btree (tr
CREATE UNIQUE INDEX index_collections_on_uuid ON public.collections USING btree (uuid);
+--
+-- Name: index_container_ports_on_container_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_ports_on_container_uuid ON public.container_ports USING btree (container_uuid);
+
+
+--
+-- Name: index_container_ports_on_external_port; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_container_ports_on_external_port ON public.container_ports USING btree (external_port);
+
+
--
-- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2100,6 +2185,13 @@ CREATE INDEX index_container_requests_on_created_at_and_uuid ON public.container
CREATE INDEX index_container_requests_on_modified_at_and_uuid ON public.container_requests USING btree (modified_at, uuid);
+--
+-- Name: index_container_requests_on_name_and_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_name_and_owner_uuid ON public.container_requests USING btree (name, owner_uuid);
+
+
--
-- Name: index_container_requests_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2167,7 +2259,7 @@ CREATE INDEX index_containers_on_queued_state ON public.containers USING btree (
-- Name: index_containers_on_reuse_columns; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));
+CREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, md5(output_glob), container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));
--
@@ -2191,6 +2283,27 @@ CREATE INDEX index_containers_on_secret_mounts_md5 ON public.containers USING bt
CREATE UNIQUE INDEX index_containers_on_uuid ON public.containers USING btree (uuid);
+--
+-- Name: index_credentials_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_credentials_on_owner_uuid ON public.credentials USING btree (owner_uuid);
+
+
+--
+-- Name: index_credentials_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_credentials_on_owner_uuid_and_name ON public.credentials USING btree (owner_uuid, name);
+
+
+--
+-- Name: index_credentials_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_credentials_on_uuid ON public.credentials USING btree (uuid);
+
+
--
-- Name: index_frozen_groups_on_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2492,6 +2605,13 @@ CREATE INDEX index_links_on_head_uuid ON public.links USING btree (head_uuid);
CREATE INDEX index_links_on_modified_at_and_uuid ON public.links USING btree (modified_at, uuid);
+--
+-- Name: index_links_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_links_on_name ON public.links USING btree (name) WHERE ((link_class)::text = 'published_port'::text);
+
+
--
-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2842,6 +2962,13 @@ CREATE UNIQUE INDEX index_users_on_username ON public.users USING btree (usernam
CREATE UNIQUE INDEX index_users_on_uuid ON public.users USING btree (uuid);
+--
+-- Name: index_uuid_locks_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_uuid_locks_on_uuid ON public.uuid_locks USING btree (uuid);
+
+
--
-- Name: index_virtual_machines_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -3081,17 +3208,17 @@ CREATE INDEX virtual_machines_search_index ON public.virtual_machines USING btre
--
--- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -
+-- Name: workflows_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX workflows_search_idx ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX workflows_search_index ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, collection_uuid);
--
-- Name: workflows_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX workflows_trgm_text_search_idx ON public.workflows USING gin (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text))) public.gin_trgm_ops);
+CREATE INDEX workflows_trgm_text_search_idx ON public.workflows USING gin (((((COALESCE(name, ''::character varying))::text || ' '::text) || COALESCE(description, ''::text))) public.gin_trgm_ops);
--
@@ -3101,219 +3228,233 @@ CREATE INDEX workflows_trgm_text_search_idx ON public.workflows USING gin ((((((
SET search_path TO "$user", public;
INSERT INTO "schema_migrations" (version) VALUES
-('20121016005009'),
-('20130105203021'),
-('20130105224358'),
-('20130105224618'),
-('20130107181109'),
-('20130107212832'),
-('20130109175700'),
-('20130109220548'),
-('20130113214204'),
-('20130116024233'),
-('20130116215213'),
-('20130118002239'),
-('20130122020042'),
-('20130122201442'),
-('20130122221616'),
-('20130123174514'),
-('20130123180224'),
-('20130123180228'),
-('20130125220425'),
-('20130128202518'),
-('20130128231343'),
-('20130130205749'),
-('20130203104818'),
-('20130203104824'),
-('20130203115329'),
-('20130207195855'),
-('20130218181504'),
-('20130226170000'),
-('20130313175417'),
-('20130315155820'),
-('20130315183626'),
-('20130315213205'),
-('20130318002138'),
-('20130319165853'),
-('20130319180730'),
-('20130319194637'),
-('20130319201431'),
-('20130319235957'),
-('20130320000107'),
-('20130326173804'),
-('20130326182917'),
-('20130415020241'),
-('20130425024459'),
-('20130425214427'),
-('20130523060112'),
-('20130523060213'),
-('20130524042319'),
-('20130528134100'),
-('20130606183519'),
-('20130608053730'),
-('20130610202538'),
-('20130611163736'),
-('20130612042554'),
-('20130617150007'),
-('20130626002829'),
-('20130626022810'),
-('20130627154537'),
-('20130627184333'),
-('20130708163414'),
-('20130708182912'),
-('20130708185153'),
-('20130724153034'),
-('20131007180607'),
-('20140117231056'),
-('20140124222114'),
-('20140129184311'),
-('20140317135600'),
-('20140319160547'),
-('20140321191343'),
-('20140324024606'),
-('20140325175653'),
-('20140402001908'),
-('20140407184311'),
-('20140421140924'),
-('20140421151939'),
-('20140421151940'),
-('20140422011506'),
-('20140423132913'),
-('20140423133559'),
-('20140501165548'),
-('20140519205916'),
-('20140527152921'),
-('20140530200539'),
-('20140601022548'),
-('20140602143352'),
-('20140607150616'),
-('20140611173003'),
-('20140627210837'),
-('20140709172343'),
-('20140714184006'),
-('20140811184643'),
-('20140817035914'),
-('20140818125735'),
-('20140826180337'),
-('20140828141043'),
-('20140909183946'),
-('20140911221252'),
-('20140918141529'),
-('20140918153541'),
-('20140918153705'),
-('20140924091559'),
-('20141111133038'),
-('20141208164553'),
-('20141208174553'),
-('20141208174653'),
-('20141208185217'),
-('20150122175935'),
-('20150123142953'),
-('20150203180223'),
-('20150206210804'),
-('20150206230342'),
-('20150216193428'),
-('20150303210106'),
-('20150312151136'),
-('20150317132720'),
-('20150324152204'),
-('20150423145759'),
-('20150512193020'),
-('20150526180251'),
-('20151202151426'),
-('20151215134304'),
-('20151229214707'),
-('20160208210629'),
-('20160209155729'),
-('20160324144017'),
-('20160506175108'),
-('20160509143250'),
-('20160808151559'),
-('20160819195557'),
-('20160819195725'),
-('20160901210110'),
-('20160909181442'),
-('20160926194129'),
-('20161019171346'),
-('20161111143147'),
-('20161115171221'),
-('20161115174218'),
-('20161213172944'),
-('20161222153434'),
-('20161223090712'),
-('20170102153111'),
-('20170105160301'),
-('20170105160302'),
-('20170216170823'),
-('20170301225558'),
-('20170319063406'),
-('20170328215436'),
-('20170330012505'),
-('20170419173031'),
-('20170419173712'),
-('20170419175801'),
-('20170628185847'),
-('20170704160233'),
-('20170706141334'),
-('20170824202826'),
-('20170906224040'),
-('20171027183824'),
-('20171208203841'),
-('20171212153352'),
-('20180216203422'),
-('20180228220311'),
-('20180313180114'),
-('20180501182859'),
-('20180514135529'),
-('20180607175050'),
-('20180608123145'),
-('20180806133039'),
-('20180820130357'),
-('20180820132617'),
-('20180820135808'),
-('20180824152014'),
-('20180824155207'),
-('20180904110712'),
-('20180913175443'),
-('20180915155335'),
-('20180917200000'),
-('20180917205609'),
-('20180919001158'),
-('20181001175023'),
-('20181004131141'),
-('20181005192222'),
-('20181011184200'),
-('20181213183234'),
-('20190214214814'),
-('20190322174136'),
-('20190422144631'),
-('20190523180148'),
-('20190808145904'),
-('20190809135453'),
-('20190905151603'),
-('20200501150153'),
-('20200602141328'),
-('20200914203202'),
-('20201103170213'),
-('20201105190435'),
-('20201202174753'),
-('20210108033940'),
-('20210126183521'),
-('20210621204455'),
-('20210816191509'),
-('20211027154300'),
-('20220224203102'),
-('20220301155729'),
-('20220303204419'),
-('20220401153101'),
-('20220505112900'),
-('20220726034131'),
-('20220804133317'),
-('20221219165512'),
-('20221230155924'),
-('20230421142716'),
-('20230503224107'),
-('20230815160000'),
-('20230821000000'),
+('20250527181323'),
+('20250426201300'),
+('20250422103000'),
+('20250402131700'),
+('20250315222222'),
+('20250312141843'),
+('20250115145250'),
+('20241118110000'),
+('20240820202230'),
+('20240627201747'),
+('20240618121312'),
+('20240604183200'),
+('20240402162733'),
+('20240329173437'),
+('20231013000000'),
('20230922000000'),
-('20231013000000');
+('20230821000000'),
+('20230815160000'),
+('20230503224107'),
+('20230421142716'),
+('20221230155924'),
+('20221219165512'),
+('20220804133317'),
+('20220726034131'),
+('20220505112900'),
+('20220401153101'),
+('20220303204419'),
+('20220301155729'),
+('20220224203102'),
+('20211027154300'),
+('20210816191509'),
+('20210621204455'),
+('20210126183521'),
+('20210108033940'),
+('20201202174753'),
+('20201105190435'),
+('20201103170213'),
+('20200914203202'),
+('20200602141328'),
+('20200501150153'),
+('20190905151603'),
+('20190809135453'),
+('20190808145904'),
+('20190523180148'),
+('20190422144631'),
+('20190322174136'),
+('20190214214814'),
+('20181213183234'),
+('20181011184200'),
+('20181005192222'),
+('20181004131141'),
+('20181001175023'),
+('20180919001158'),
+('20180917205609'),
+('20180917200000'),
+('20180915155335'),
+('20180913175443'),
+('20180904110712'),
+('20180824155207'),
+('20180824152014'),
+('20180820135808'),
+('20180820132617'),
+('20180820130357'),
+('20180806133039'),
+('20180608123145'),
+('20180607175050'),
+('20180514135529'),
+('20180501182859'),
+('20180313180114'),
+('20180228220311'),
+('20180216203422'),
+('20171212153352'),
+('20171208203841'),
+('20171027183824'),
+('20170906224040'),
+('20170824202826'),
+('20170706141334'),
+('20170704160233'),
+('20170628185847'),
+('20170419175801'),
+('20170419173712'),
+('20170419173031'),
+('20170330012505'),
+('20170328215436'),
+('20170319063406'),
+('20170301225558'),
+('20170216170823'),
+('20170105160302'),
+('20170105160301'),
+('20170102153111'),
+('20161223090712'),
+('20161222153434'),
+('20161213172944'),
+('20161115174218'),
+('20161115171221'),
+('20161111143147'),
+('20161019171346'),
+('20160926194129'),
+('20160909181442'),
+('20160901210110'),
+('20160819195725'),
+('20160819195557'),
+('20160808151559'),
+('20160509143250'),
+('20160506175108'),
+('20160324144017'),
+('20160209155729'),
+('20160208210629'),
+('20151229214707'),
+('20151215134304'),
+('20151202151426'),
+('20150526180251'),
+('20150512193020'),
+('20150423145759'),
+('20150324152204'),
+('20150317132720'),
+('20150312151136'),
+('20150303210106'),
+('20150216193428'),
+('20150206230342'),
+('20150206210804'),
+('20150203180223'),
+('20150123142953'),
+('20150122175935'),
+('20141208185217'),
+('20141208174653'),
+('20141208174553'),
+('20141208164553'),
+('20141111133038'),
+('20140924091559'),
+('20140918153705'),
+('20140918153541'),
+('20140918141529'),
+('20140911221252'),
+('20140909183946'),
+('20140828141043'),
+('20140826180337'),
+('20140818125735'),
+('20140817035914'),
+('20140811184643'),
+('20140714184006'),
+('20140709172343'),
+('20140627210837'),
+('20140611173003'),
+('20140607150616'),
+('20140602143352'),
+('20140601022548'),
+('20140530200539'),
+('20140527152921'),
+('20140519205916'),
+('20140501165548'),
+('20140423133559'),
+('20140423132913'),
+('20140422011506'),
+('20140421151940'),
+('20140421151939'),
+('20140421140924'),
+('20140407184311'),
+('20140402001908'),
+('20140325175653'),
+('20140324024606'),
+('20140321191343'),
+('20140319160547'),
+('20140317135600'),
+('20140129184311'),
+('20140124222114'),
+('20140117231056'),
+('20131007180607'),
+('20130724153034'),
+('20130708185153'),
+('20130708182912'),
+('20130708163414'),
+('20130627184333'),
+('20130627154537'),
+('20130626022810'),
+('20130626002829'),
+('20130617150007'),
+('20130612042554'),
+('20130611163736'),
+('20130610202538'),
+('20130608053730'),
+('20130606183519'),
+('20130528134100'),
+('20130524042319'),
+('20130523060213'),
+('20130523060112'),
+('20130425214427'),
+('20130425024459'),
+('20130415020241'),
+('20130326182917'),
+('20130326173804'),
+('20130320000107'),
+('20130319235957'),
+('20130319201431'),
+('20130319194637'),
+('20130319180730'),
+('20130319165853'),
+('20130318002138'),
+('20130315213205'),
+('20130315183626'),
+('20130315155820'),
+('20130313175417'),
+('20130226170000'),
+('20130218181504'),
+('20130207195855'),
+('20130203115329'),
+('20130203104824'),
+('20130203104818'),
+('20130130205749'),
+('20130128231343'),
+('20130128202518'),
+('20130125220425'),
+('20130123180228'),
+('20130123180224'),
+('20130123174514'),
+('20130122221616'),
+('20130122201442'),
+('20130122020042'),
+('20130118002239'),
+('20130116215213'),
+('20130116024233'),
+('20130113214204'),
+('20130109220548'),
+('20130109175700'),
+('20130107212832'),
+('20130107181109'),
+('20130105224618'),
+('20130105224358'),
+('20130105203021'),
+('20121016005009');
diff --git a/services/api/fpm-info.sh b/services/api/fpm-info.sh
index cccbc1b56b..335232a3c0 100644
--- a/services/api/fpm-info.sh
+++ b/services/api/fpm-info.sh
@@ -2,13 +2,93 @@
#
# SPDX-License-Identifier: AGPL-3.0
-fpm_depends+=('git >= 1.7.10')
+fpm_depends+=(
+ # Dependencies to build gems
+ bison
+ make
+ "ruby >= 2.7.0"
+ # Postinst script dependencies
+ diffutils
+ # Passenger dependencies
+ curl
+ procps
+ tar
+ # Dependencies of our API server code
+ "git >= 1.7.10"
+ shared-mime-info
+)
case "$TARGET" in
- centos*|rocky*)
- fpm_depends+=(libcurl-devel postgresql-devel bison make automake gcc gcc-c++ postgresql shared-mime-info)
+ rocky9)
+ fpm_depends+=(
+ # Dependencies to build gems
+ automake
+ gcc-c++
+ libcurl-devel
+ libyaml-devel
+ postgresql
+ postgresql-devel
+ redhat-rpm-config
+ "ruby-devel >= 2.7.0"
+ zlib-devel
+ # Passenger runtime dependencies
+ libnsl
+ openssl-devel
+ rubygem-rake
+ # nginx compilation dependencies
+ pcre2-devel
+ )
+ ;;
+ rocky*)
+ fpm_depends+=(
+ # Dependencies to build gems
+ automake
+ gcc-c++
+ libcurl-devel
+ libyaml-devel
+ postgresql
+ postgresql-devel
+ redhat-rpm-config
+ "ruby-devel >= 2.7.0"
+ zlib-devel
+ # Passenger runtime dependencies
+ libnsl
+ openssl-devel
+ rubygem-rake
+ )
+ ;;
+ ubuntu2004)
+ fpm_depends+=(
+ # Dependencies to build gems
+ g++
+ libcurl-ssl-dev
+ libpq-dev
+ libyaml-dev
+ postgresql-client
+ "ruby-dev >= 2.7.0"
+ zlib1g-dev
+ # Passenger runtime dependencies
+ # libnsl2 is excluded because it was included as part of glibc
+ libnss-systemd
+ libssl-dev
+ rake
+ )
;;
debian* | ubuntu*)
- fpm_depends+=(libcurl-ssl-dev libpq-dev g++ bison zlib1g-dev make postgresql-client shared-mime-info)
+ fpm_depends+=(
+ # Dependencies to build gems
+ g++
+ libcurl-ssl-dev
+ libpq-dev
+ libyaml-dev
+ postgresql-client
+ "ruby-dev >= 2.7.0"
+ zlib1g-dev
+ # Passenger runtime dependencies
+ libnsl2
+ libnss-systemd
+ libssl-dev
+ rake
+ )
;;
esac
diff --git a/services/api/lib/can_be_an_owner.rb b/services/api/lib/can_be_an_owner.rb
index e09037819c..b6a7c768e3 100644
--- a/services/api/lib/can_be_an_owner.rb
+++ b/services/api/lib/can_be_an_owner.rb
@@ -14,11 +14,28 @@ module CanBeAnOwner
# record when other objects refer to it.
ActiveRecord::Base.connection.tables.each do |t|
next if t == base.table_name
- next if t == 'schema_migrations'
- next if t == 'permission_refresh_lock'
- next if t == 'ar_internal_metadata'
- next if t == 'commit_ancestors'
- next if t == 'commits'
+ next if t.in?([
+ # in-use tables that should be skipped
+ 'ar_internal_metadata',
+ 'permission_refresh_lock',
+ 'schema_migrations',
+ 'uuid_locks',
+ # obsolete tables from removed APIs
+ 'api_clients',
+ 'commit_ancestors',
+ 'commits',
+ 'humans',
+ 'jobs',
+ 'job_tasks',
+ 'keep_disks',
+ 'materialized_permissions',
+ 'nodes',
+ 'pipeline_instances',
+ 'pipeline_templates',
+ 'repositories',
+ 'specimens',
+ 'traits',
+ ])
klass = t.classify.constantize
next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
base.has_many(t.to_sym,
diff --git a/services/api/lib/common_api_template.rb b/services/api/lib/common_api_template.rb
index 8aac264516..c50011771f 100644
--- a/services/api/lib/common_api_template.rb
+++ b/services/api/lib/common_api_template.rb
@@ -11,13 +11,11 @@ module CommonApiTemplate
end
base.extend(ClassMethods)
base.api_accessible :common do |t|
- t.add :href
t.add :kind
t.add :etag
t.add :uuid
t.add :owner_uuid
t.add :created_at
- t.add :modified_by_client_uuid
t.add :modified_by_user_uuid
t.add :modified_at
end
diff --git a/services/api/lib/create_superuser_token.rb b/services/api/lib/create_superuser_token.rb
deleted file mode 100755
index 7a18d97058..0000000000
--- a/services/api/lib/create_superuser_token.rb
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Install the supplied string (or a randomly generated token, if none
-# is given) as an API token that authenticates to the system user account.
-
-module CreateSuperUserToken
- require File.dirname(__FILE__) + '/../config/boot'
- require File.dirname(__FILE__) + '/../config/environment'
-
- include ApplicationHelper
-
- def create_superuser_token supplied_token=nil
- act_as_system_user do
- # If token is supplied, verify that it indeed is a superuser token
- if supplied_token
- api_client_auth = ApiClientAuthorization.
- where(api_token: supplied_token).
- first
- if !api_client_auth
- # fall through to create a token
- elsif !api_client_auth.user.uuid.match(/-000000000000000$/)
- raise "Token exists but is not a superuser token."
- elsif api_client_auth.scopes != ['all']
- raise "Token exists but has limited scope #{api_client_auth.scopes.inspect}."
- end
- end
-
- # need to create a token
- if !api_client_auth
- # Get (or create) trusted api client
- apiClient = ApiClient.
- find_or_create_by(url_prefix: "ssh://root@localhost/",
- is_trusted: true)
-
- # Check if there is an unexpired superuser token corresponding to this api client
- api_client_auth =
- ApiClientAuthorization.
- where(user_id: system_user.id).
- where(api_client_id: apiClient.id).
- where_serialized(:scopes, ['all']).
- where('(expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)').
- first
-
- # none exist; create one with the supplied token
- if !api_client_auth
- api_client_auth = ApiClientAuthorization.
- new(user: system_user,
- api_client_id: apiClient.id,
- created_by_ip_address: '::1',
- api_token: supplied_token)
- api_client_auth.save!
- end
- end
-
- "v2/" + api_client_auth.uuid + "/" + api_client_auth.api_token
- end
- end
-end
diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb
index 7c99c911f8..4e69754a8e 100644
--- a/services/api/lib/current_api_client.rb
+++ b/services/api/lib/current_api_client.rb
@@ -7,10 +7,6 @@ module CurrentApiClient
Thread.current[:user]
end
- def current_api_client
- Thread.current[:api_client]
- end
-
def current_api_client_authorization
Thread.current[:api_client_authorization]
end
@@ -19,15 +15,6 @@ module CurrentApiClient
Thread.current[:api_url_base]
end
- def current_default_owner
- # owner_uuid for newly created objects
- ((current_api_client_authorization &&
- current_api_client_authorization.default_owner_uuid) ||
- (current_user && current_user.default_owner_uuid) ||
- (current_user && current_user.uuid) ||
- nil)
- end
-
# Where is the client connecting from?
def current_api_client_ip_address
Thread.current[:api_client_ip_address]
@@ -213,26 +200,6 @@ module CurrentApiClient
end
end
- def anonymous_user_token_api_client
- $anonymous_user_token_api_client = check_cache($anonymous_user_token_api_client) do
- act_as_system_user do
- ActiveRecord::Base.transaction do
- ApiClient.find_or_create_by!(is_trusted: false, url_prefix: "", name: "AnonymousUserToken")
- end
- end
- end
- end
-
- def system_root_token_api_client
- $system_root_token_api_client = check_cache($system_root_token_api_client) do
- act_as_system_user do
- ActiveRecord::Base.transaction do
- ApiClient.find_or_create_by!(is_trusted: true, url_prefix: "", name: "SystemRootToken")
- end
- end
- end
- end
-
def empty_collection_pdh
'd41d8cd98f00b204e9800998ecf8427e+0'
end
@@ -287,8 +254,6 @@ module CurrentApiClient
$anonymous_user = nil
$public_project_group = nil
$public_project_group_read_permission = nil
- $anonymous_user_token_api_client = nil
- $system_root_token_api_client = nil
$empty_collection = nil
end
module_function :reset_system_globals
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
deleted file mode 100644
index 6718d384ee..0000000000
--- a/services/api/lib/enable_jobs_api.rb
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-Disable_update_jobs_api_method_list = ConfigLoader.to_OrderedOptions({
- "jobs.create"=>{},
- "pipeline_instances.create"=>{},
- "pipeline_templates.create"=>{},
- "jobs.update"=>{},
- "pipeline_instances.update"=>{},
- "pipeline_templates.update"=>{},
- "job_tasks.create"=>{},
- "job_tasks.update"=>{}
- })
-
-Disable_jobs_api_method_list = ConfigLoader.to_OrderedOptions({
- "jobs.create"=>{},
- "pipeline_instances.create"=>{},
- "pipeline_templates.create"=>{},
- "jobs.get"=>{},
- "pipeline_instances.get"=>{},
- "pipeline_templates.get"=>{},
- "jobs.list"=>{},
- "pipeline_instances.list"=>{},
- "pipeline_templates.list"=>{},
- "jobs.index"=>{},
- "pipeline_instances.index"=>{},
- "pipeline_templates.index"=>{},
- "jobs.update"=>{},
- "pipeline_instances.update"=>{},
- "pipeline_templates.update"=>{},
- "jobs.queue"=>{},
- "jobs.queue_size"=>{},
- "job_tasks.create"=>{},
- "job_tasks.get"=>{},
- "job_tasks.list"=>{},
- "job_tasks.index"=>{},
- "job_tasks.update"=>{},
- "jobs.show"=>{},
- "pipeline_instances.show"=>{},
- "pipeline_templates.show"=>{},
- "job_tasks.show"=>{}})
-
-def check_enable_legacy_jobs_api
- # Create/update is permanently disabled (legacy functionality has been removed)
- Rails.configuration.API.DisabledAPIs.merge! Disable_update_jobs_api_method_list
-
- if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
- (Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
- ActiveRecord::Base.connection.select_value("SELECT COUNT(*) FROM jobs LIMIT 1") == 0)
- Rails.configuration.API.DisabledAPIs.merge! Disable_jobs_api_method_list
- end
-end
diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb
index 9a360c538b..df1b6a07a3 100644
--- a/services/api/lib/load_param.rb
+++ b/services/api/lib/load_param.rb
@@ -49,7 +49,12 @@ module LoadParam
end
# Load params[:limit], params[:offset] and params[:order]
- # into @limit, @offset, @orders
+ # into @limit, @offset, @orders.
+ #
+ # If fill_table_names is true, @orders will be populated with fully
+ # qualified columns (table_name.column_name). Otherwise, column
+ # names might be ambiguous and the caller should call
+ # optimize_orders(@orders) to fix them.
def load_limit_offset_order_params(fill_table_names: true)
if params[:limit]
unless params[:limit].to_s.match(/^\d+$/)
@@ -93,14 +98,14 @@ module LoadParam
# The attr can have its table unspecified if it happens to be for the current "model_class" (the first case)
# or it can be fully specified with the database tablename (the second case) (e.g. "collections.name").
# NB that the security check for the second case table_name will not work if the model
- # has used set_table_name to use an alternate table name from the Rails standard.
+ # has used table_name= to use an alternate table name from the Rails standard.
# I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
# would be a place to start if this ever becomes necessary.
if (attr.match(/^[a-z][_a-z0-9]+$/) &&
model_class.columns.collect(&:name).index(attr) &&
['asc','desc'].index(direction.downcase))
if fill_table_names
- @orders << "#{table_name}.#{attr} #{direction.downcase}"
+ @orders << "#{model_class.table_name}.#{attr} #{direction.downcase}"
else
@orders << "#{attr} #{direction.downcase}"
end
@@ -115,37 +120,48 @@ module LoadParam
end
end
- # If the client-specified orders don't amount to a full ordering
- # (e.g., [] or ['owner_uuid desc']), fall back on the default
- # orders to ensure repeating the same request (possibly with
- # different limit/offset) will return records in the same order.
- #
- # Clean up the resulting list of orders such that no column
- # uselessly appears twice (Postgres might not optimize this out
- # for us) and no columns uselessly appear after a unique column
- # (Postgres does not optimize this out for us; as of 9.2, "order
- # by id, modified_at desc, uuid" is slow but "order by id" is
- # fast).
- orders_given_and_default = @orders + model_class.default_orders
+ if fill_table_names
+ @orders = optimize_orders(@orders, model_class: model_class)
+ end
+
+ @distinct = params[:distinct] && true
+ end
+
+ # If the client-specified orders don't amount to a full ordering
+ # (e.g., [] or ['owner_uuid desc']), fall back on the default
+ # orders to ensure repeating the same request (possibly with
+ # different limit/offset) will return records in the same order.
+ #
+ # Clean up the resulting list of orders such that no column
+ # uselessly appears twice (Postgres might not optimize this out
+ # for us) and no columns uselessly appear after a unique column
+ # (Postgres does not optimize this out for us; as of 9.2, "order
+ # by id, modified_at desc, uuid" is slow but "order by id" is
+ # fast).
+ def optimize_orders(orders_given, model_class:)
+ orders_given_and_default = orders_given + model_class.default_orders
order_cols_used = {}
- @orders = []
+ optimized = []
orders_given_and_default.each do |order|
otablecol = order.split(' ')[0]
next if order_cols_used[otablecol]
order_cols_used[otablecol] = true
- @orders << order
+ optimized << order
- otable, ocol = otablecol.split('.')
- if otable == table_name and model_class.unique_columns.include?(ocol)
+ if otablecol.index('.')
+ otable, ocol = otablecol.split('.')
+ else
+ otable, ocol = model_class.table_name, otablecol
+ end
+ if otable == model_class.table_name && model_class.unique_columns.include?(ocol)
# we already have a full ordering; subsequent entries would be
# superfluous
break
end
end
-
- @distinct = params[:distinct] && true
+ return optimized
end
def load_select_param
@@ -161,6 +177,14 @@ module LoadParam
end
end
+ if @select
+ # The modified_by_client_uuid field is no longer offered. For
+ # the sake of compatibility with workbench2, ignore it when a
+ # client asks for it explicitly (rather than returning an
+ # "invalid field" error).
+ @select -= ['modified_by_client_uuid']
+ end
+
if @select && @orders
# Any ordering columns must be selected when doing select,
# otherwise it is an SQL error, so filter out invaliding orderings.
diff --git a/services/api/lib/record_filters.rb b/services/api/lib/record_filters.rb
index e51223254f..671fe2acb9 100644
--- a/services/api/lib/record_filters.rb
+++ b/services/api/lib/record_filters.rb
@@ -35,7 +35,7 @@ module RecordFilters
raise ArgumentError.new("Full text search operator is no longer supported")
end
if attrs_in == 'any'
- attrs = model_class.searchable_columns(operator)
+ attrs = model_class.any_searchable_columns(operator)
elsif attrs_in.is_a? Array
attrs = attrs_in
else
@@ -67,6 +67,12 @@ module RecordFilters
attr_model_class = Container
attr_table_name = "containers"
subproperty = subproperty[1].split(".", 2)
+ elsif subproperty.length == 2 && subproperty[0] == 'collection' && model_table_name == "workflows"
+ # attr is "tablename.colname" -- e.g., ["collection.properties.category", "=", "WGS"]
+ joins = ["JOIN collections ON workflows.collection_uuid = collections.uuid"]
+ attr_model_class = Collection
+ attr_table_name = "collections"
+ subproperty = subproperty[1].split(".", 2)
else
attr_model_class = model_class
attr_table_name = model_table_name
@@ -293,4 +299,19 @@ module RecordFilters
{:cond_out => conds_out, :param_out => param_out, :joins => joins}
end
+ def apply_filters query, filters
+ ft = record_filters filters, self
+ if not ft[:cond_out].any?
+ return query
+ end
+ ft[:joins].each do |t|
+ query = query.joins(t)
+ end
+ query.where('(' + ft[:cond_out].join(') AND (') + ')',
+ *ft[:param_out])
+ end
+
+ def attribute_column attr
+ self.columns.select { |col| col.name == attr.to_s }.first
+ end
end
diff --git a/services/api/lib/tasks/statement_timeout.rake b/services/api/lib/tasks/statement_timeout.rake
new file mode 100644
index 0000000000..52ed0ae014
--- /dev/null
+++ b/services/api/lib/tasks/statement_timeout.rake
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Rake::Task.tasks.each do |task|
+ if task.name =~ /^(db:migrate(:.*)?|db:rollback)$/
+ task.enhance(["db:disable_timeout"])
+ end
+end
+
+namespace :db do
+ desc 'disable postgresql statement_timeout and lock_timeout before running migrations'
+ task disable_timeout: :environment do
+ ActiveRecord::ConnectionAdapters::AbstractAdapter.set_callback :checkout, :before, ->(conn) do
+ # override the default timeouts set by
+ # config/initializers/db_timeout.rb
+ conn.execute "SET statement_timeout = 0"
+ conn.execute "SET lock_timeout = 0"
+ end
+ end
+end
diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb
index 50611c305d..9dbd391bab 100644
--- a/services/api/lib/trashable.rb
+++ b/services/api/lib/trashable.rb
@@ -43,6 +43,14 @@ module Trashable
true
end
+ def default_delete_after_trash_interval
+ Rails.configuration.Collections.DefaultTrashLifetime
+ end
+
+ def minimum_delete_after_trash_interval
+ Rails.configuration.Collections.BlobSigningTTL
+ end
+
def default_trash_interval
if trash_at_changed? && !delete_at_changed?
# If trash_at is updated without touching delete_at,
@@ -50,7 +58,7 @@ module Trashable
if trash_at.nil?
self.delete_at = nil
else
- self.delete_at = trash_at + Rails.configuration.Collections.DefaultTrashLifetime.seconds
+ self.delete_at = trash_at + self.default_delete_after_trash_interval
end
elsif !trash_at || !delete_at || trash_at > delete_at
# Not trash, or bogus arguments? Just validate in
@@ -65,7 +73,7 @@ module Trashable
earliest_delete = [
@validation_timestamp,
trash_at_was,
- ].compact.min + Rails.configuration.Collections.BlobSigningTTL
+ ].compact.min + minimum_delete_after_trash_interval
# The previous value of delete_at is also an upper bound on the
# longest-lived permission token. For example, if TTL=14,
@@ -91,12 +99,22 @@ module Trashable
end
module TrashableController
+ def self.included(base)
+ def base._trash_method_description
+ match = name.match(/\b(\w+)Controller$/)
+ "Trash a #{match[1].singularize.underscore.humanize.downcase}."
+ end
+ def base._untrash_method_description
+ match = name.match(/\b(\w+)Controller$/)
+ "Untrash a #{match[1].singularize.underscore.humanize.downcase}."
+ end
+ end
+
def destroy
if !@object.is_trashed
@object.update!(trash_at: db_current_time)
end
- earliest_delete = (@object.trash_at +
- Rails.configuration.Collections.BlobSigningTTL)
+ earliest_delete = (@object.trash_at + @object.minimum_delete_after_trash_interval)
if @object.delete_at > earliest_delete
@object.update!(delete_at: earliest_delete)
end
@@ -111,18 +129,22 @@ module TrashableController
end
def untrash
- if @object.is_trashed
- @object.trash_at = nil
+ if !@object.is_trashed
+ raise ArvadosModel::InvalidStateTransitionError.new("Item is not trashed, cannot untrash")
+ end
- if params[:ensure_unique_name]
- @object.save_with_unique_name!
- else
- @object.save!
- end
+ if db_current_time >= @object.delete_at
+ raise ArvadosModel::InvalidStateTransitionError.new("delete_at time has already passed, cannot untrash")
+ end
+
+ @object.trash_at = nil
+
+ if params[:ensure_unique_name]
+ @object.save_with_unique_name!
else
- raise ArvadosModel::InvalidStateTransitionError.new("Item is not trashed, cannot untrash")
+ @object.save!
end
+
show
end
-
end
diff --git a/services/api/lib/update_priorities.rb b/services/api/lib/update_priorities.rb
index 94115340df..8fc21ab798 100644
--- a/services/api/lib/update_priorities.rb
+++ b/services/api/lib/update_priorities.rb
@@ -7,8 +7,9 @@ def row_lock_for_priority_update container_uuid
# immediate parent containers. This ensures we have locked
# everything that gets touched by either a priority update or state
# update.
+ # This method assumes we are already in a transaction.
ActiveRecord::Base.connection.exec_query %{
- select 1 from containers where containers.uuid in (
+ select containers.id from containers where containers.uuid in (
select pri_container_uuid from container_tree($1)
UNION
select container_requests.requesting_container_uuid from container_requests
@@ -16,16 +17,18 @@ UNION
and container_requests.state = 'Committed'
and container_requests.requesting_container_uuid is not NULL
)
- order by containers.uuid for update
+ order by containers.id for update of containers
}, 'select_for_update_priorities', [container_uuid]
end
def update_priorities starting_container_uuid
- # Ensure the row locks were taken in order
- row_lock_for_priority_update starting_container_uuid
+ Container.transaction do
+ # Ensure the row locks were taken in order
+ row_lock_for_priority_update starting_container_uuid
- ActiveRecord::Base.connection.exec_query %{
+ ActiveRecord::Base.connection.exec_query %{
update containers set priority=computed.upd_priority from container_tree_priorities($1) as computed
where containers.uuid = computed.pri_container_uuid and priority != computed.upd_priority
}, 'update_priorities', [starting_container_uuid]
+ end
end
diff --git a/services/api/script/arvados-git-sync.rb b/services/api/script/arvados-git-sync.rb
deleted file mode 100755
index 9f8f050c10..0000000000
--- a/services/api/script/arvados-git-sync.rb
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/env ruby
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'rubygems'
-require 'pp'
-require 'arvados'
-require 'tempfile'
-require 'yaml'
-require 'fileutils'
-
-# This script does the actual gitolite config management on disk.
-#
-# Ward Vandewege
-
-# Default is development
-production = ARGV[0] == "production"
-
-ENV["RAILS_ENV"] = "development"
-ENV["RAILS_ENV"] = "production" if production
-
-DEBUG = 1
-
-# load and merge in the environment-specific application config info
-# if present, overriding base config parameters as specified
-path = File.absolute_path('../../config/arvados-clients.yml', __FILE__)
-if File.exist?(path) then
- cp_config = File.open(path) do |f|
- YAML.safe_load(f, filename: path)[ENV['RAILS_ENV']]
- end
-else
- puts "Please create a\n #{path}\n file"
- exit 1
-end
-
-gitolite_url = cp_config['gitolite_url']
-gitolite_arvados_git_user_key = cp_config['gitolite_arvados_git_user_key']
-
-gitolite_tmpdir = cp_config['gitolite_tmp']
-gitolite_admin = File.join(gitolite_tmpdir, 'gitolite-admin')
-gitolite_admin_keydir = File.join(gitolite_admin, 'keydir')
-gitolite_keydir = File.join(gitolite_admin, 'keydir', 'arvados')
-
-ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
-ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
-if cp_config['arvados_api_host_insecure']
- ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
-else
- ENV.delete('ARVADOS_API_HOST_INSECURE')
-end
-
-def ensure_directory(path, mode)
- begin
- Dir.mkdir(path, mode)
- rescue Errno::EEXIST
- end
-end
-
-def replace_file(path, contents)
- unlink_now = true
- dirname, basename = File.split(path)
- FileUtils.mkpath(dirname)
- new_file = Tempfile.new([basename, ".tmp"], dirname)
- begin
- new_file.write(contents)
- new_file.flush
- File.rename(new_file, path)
- unlink_now = false
- ensure
- new_file.close(unlink_now)
- end
-end
-
-def file_has_contents?(path, contents)
- begin
- IO.read(path) == contents
- rescue Errno::ENOENT
- false
- end
-end
-
-module TrackCommitState
- module ClassMethods
- # Note that all classes that include TrackCommitState will have
- # @@need_commit = true if any of them set it. Since this flag reports
- # a boolean state of the underlying git repository, that's OK in the
- # current implementation.
- @@need_commit = false
-
- def changed?
- @@need_commit
- end
-
- def ensure_in_git(path, contents)
- unless file_has_contents?(path, contents)
- replace_file(path, contents)
- system("git", "add", path)
- @@need_commit = true
- end
- end
- end
-
- def ensure_in_git(path, contents)
- self.class.ensure_in_git(path, contents)
- end
-
- def self.included(base)
- base.extend(ClassMethods)
- end
-end
-
-class UserSSHKeys
- include TrackCommitState
-
- def initialize(user_keys_map, key_dir)
- @user_keys_map = user_keys_map
- @key_dir = key_dir
- @installed = {}
- end
-
- def install(filename, pubkey)
- unless pubkey.nil?
- key_path = File.join(@key_dir, filename)
- ensure_in_git(key_path, pubkey)
- end
- @installed[filename] = true
- end
-
- def ensure_keys_for_user(user_uuid)
- return unless key_list = @user_keys_map.delete(user_uuid)
- key_list.map { |k| k[:public_key] }.compact.each_with_index do |pubkey, ii|
- # Handle putty-style ssh public keys
- pubkey.sub!(/^(Comment: "r[^\n]*\n)(.*)$/m,'ssh-rsa \2 \1')
- pubkey.sub!(/^(Comment: "d[^\n]*\n)(.*)$/m,'ssh-dss \2 \1')
- pubkey.gsub!(/\n/,'')
- pubkey.strip!
- install("#{user_uuid}@#{ii}.pub", pubkey)
- end
- end
-
- def installed?(filename)
- @installed[filename]
- end
-end
-
-class Repository
- include TrackCommitState
-
- @@aliases = {}
-
- def initialize(arv_repo, user_keys)
- @arv_repo = arv_repo
- @user_keys = user_keys
- end
-
- def self.ensure_system_config(conf_root)
- ensure_in_git(File.join(conf_root, "conf", "gitolite.conf"),
- %Q{include "auto/*.conf"\ninclude "admin/*.conf"\n})
- ensure_in_git(File.join(conf_root, "arvadosaliases.pl"), alias_config)
-
- conf_path = File.join(conf_root, "conf", "admin", "arvados.conf")
- conf_file = %Q{
-@arvados_git_user = arvados_git_user
-
-repo gitolite-admin
- RW = @arvados_git_user
-
-}
- ensure_directory(File.dirname(conf_path), 0755)
- ensure_in_git(conf_path, conf_file)
- end
-
- def ensure_config(conf_root)
- if name and (File.exist?(auto_conf_path(conf_root, name)))
- # This gitolite installation knows the repository by name, rather than
- # UUID. Leave it configured that way until a separate migration is run.
- basename = name
- else
- basename = uuid
- @@aliases[name] = uuid unless name.nil?
- end
- conf_file = "\nrepo #{basename}\n"
- @arv_repo[:user_permissions].sort.each do |user_uuid, perm|
- conf_file += "\t#{perm[:gitolite_permissions]}\t= #{user_uuid}\n"
- @user_keys.ensure_keys_for_user(user_uuid)
- end
- ensure_in_git(auto_conf_path(conf_root, basename), conf_file)
- end
-
- private
-
- def auto_conf_path(conf_root, basename)
- File.join(conf_root, "conf", "auto", "#{basename}.conf")
- end
-
- def uuid
- @arv_repo[:uuid]
- end
-
- def name
- if @arv_repo[:name].nil?
- nil
- else
- @clean_name ||=
- @arv_repo[:name].sub(/^[^A-Za-z]+/, "").gsub(/[^\w\.\/]/, "")
- end
- end
-
- def self.alias_config
- conf_s = "{\n"
- @@aliases.sort.each do |(repo_name, repo_uuid)|
- conf_s += "\t'#{repo_name}' \t=> '#{repo_uuid}',\n"
- end
- conf_s += "};\n"
- conf_s
- end
-end
-
-begin
- # Get our local gitolite-admin repo up to snuff
- if not File.exist?(gitolite_admin) then
- ensure_directory(gitolite_tmpdir, 0700)
- Dir.chdir(gitolite_tmpdir)
- `git clone #{gitolite_url}`
- Dir.chdir(gitolite_admin)
- else
- Dir.chdir(gitolite_admin)
- `git pull`
- end
-
- arv = Arvados.new
- permissions = arv.repository.get_all_permissions
-
- ensure_directory(gitolite_keydir, 0700)
- admin_user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_admin_keydir)
- # Make sure the arvados_git_user key is installed; put it in gitolite_admin_keydir
- # because that is where gitolite will try to put it if we do not.
- admin_user_ssh_keys.install('arvados_git_user.pub', gitolite_arvados_git_user_key)
-
- user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_keydir)
- permissions[:repositories].each do |repo_record|
- repo = Repository.new(repo_record, user_ssh_keys)
- repo.ensure_config(gitolite_admin)
- end
- Repository.ensure_system_config(gitolite_admin)
-
- # Clean up public key files that should not be present
- Dir.chdir(gitolite_keydir)
- stale_keys = Dir.glob('*.pub').reject do |key_file|
- user_ssh_keys.installed?(key_file)
- end
- if stale_keys.any?
- stale_keys.each { |key_file| puts "Extra file #{key_file}" }
- system("git", "rm", "--quiet", *stale_keys)
- end
-
- if UserSSHKeys.changed? or Repository.changed? or stale_keys.any?
- message = "#{Time.now().to_s}: update from API"
- Dir.chdir(gitolite_admin)
- `git add --all`
- `git commit -m '#{message}'`
- `git push`
- end
-
-rescue => bang
- puts "Error: " + bang.to_s
- puts bang.backtrace.join("\n")
- exit 1
-end
-
diff --git a/services/api/script/create_superuser_token.rb b/services/api/script/create_superuser_token.rb
deleted file mode 100755
index 3d5de35fd7..0000000000
--- a/services/api/script/create_superuser_token.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env ruby
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Install the supplied string (or a randomly generated token, if none
-# is given) as an API token that authenticates to the system user
-# account.
-#
-# Print the token on stdout.
-
-require './lib/create_superuser_token'
-include CreateSuperUserToken
-
-supplied_token = ARGV[0]
-
-token = CreateSuperUserToken.create_superuser_token supplied_token
-puts token
diff --git a/services/api/script/migrate-gitolite-to-uuid-storage.rb b/services/api/script/migrate-gitolite-to-uuid-storage.rb
deleted file mode 100755
index 98f25ca537..0000000000
--- a/services/api/script/migrate-gitolite-to-uuid-storage.rb
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env ruby
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-#
-# Prior to April 2015, Arvados Gitolite integration stored repositories by
-# name. To improve user repository management, we switched to storing
-# repositories by UUID, and aliasing them to names. This makes it easy to
-# have rich name hierarchies, and allow users to rename repositories.
-#
-# This script will migrate a name-based Gitolite configuration to a UUID-based
-# one. To use it:
-#
-# 1. Change the value of REPOS_DIR below, if needed.
-# 2. Install this script in the same directory as `update-gitolite.rb`.
-# 3. Ensure that no *other* users can access Gitolite: edit gitolite's
-# authorized_keys file so it only contains the arvados_git_user key,
-# and disable the update-gitolite cron job.
-# 4. Run this script: `ruby migrate-gitolite-to-uuid-storage.rb production`.
-# 5. Undo step 3.
-
-require 'rubygems'
-require 'pp'
-require 'arvados'
-require 'tempfile'
-require 'yaml'
-
-REPOS_DIR = "/var/lib/gitolite/repositories"
-
-# Default is development
-production = ARGV[0] == "production"
-
-ENV["RAILS_ENV"] = "development"
-ENV["RAILS_ENV"] = "production" if production
-
-DEBUG = 1
-
-# load and merge in the environment-specific application config info
-# if present, overriding base config parameters as specified
-path = File.dirname(__FILE__) + '/config/arvados-clients.yml'
-if File.exist?(path) then
- cp_config = File.open(path) do |f|
- YAML.safe_load(f, filename: path)[ENV['RAILS_ENV']]
- end
-else
- puts "Please create a\n " + File.dirname(__FILE__) + "/config/arvados-clients.yml\n file"
- exit 1
-end
-
-gitolite_url = cp_config['gitolite_url']
-gitolite_arvados_git_user_key = cp_config['gitolite_arvados_git_user_key']
-
-gitolite_tmpdir = File.join(File.absolute_path(File.dirname(__FILE__)),
- cp_config['gitolite_tmp'])
-gitolite_admin = File.join(gitolite_tmpdir, 'gitolite-admin')
-gitolite_keydir = File.join(gitolite_admin, 'keydir', 'arvados')
-
-ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
-ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
-if cp_config['arvados_api_host_insecure']
- ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
-else
- ENV.delete('ARVADOS_API_HOST_INSECURE')
-end
-
-def ensure_directory(path, mode)
- begin
- Dir.mkdir(path, mode)
- rescue Errno::EEXIST
- end
-end
-
-def replace_file(path, contents)
- unlink_now = true
- dirname, basename = File.split(path)
- new_file = Tempfile.new([basename, ".tmp"], dirname)
- begin
- new_file.write(contents)
- new_file.flush
- File.rename(new_file, path)
- unlink_now = false
- ensure
- new_file.close(unlink_now)
- end
-end
-
-def file_has_contents?(path, contents)
- begin
- IO.read(path) == contents
- rescue Errno::ENOENT
- false
- end
-end
-
-module TrackCommitState
- module ClassMethods
- # Note that all classes that include TrackCommitState will have
- # @@need_commit = true if any of them set it. Since this flag reports
- # a boolean state of the underlying git repository, that's OK in the
- # current implementation.
- @@need_commit = false
-
- def changed?
- @@need_commit
- end
-
- def ensure_in_git(path, contents)
- unless file_has_contents?(path, contents)
- replace_file(path, contents)
- system("git", "add", path)
- @@need_commit = true
- end
- end
- end
-
- def ensure_in_git(path, contents)
- self.class.ensure_in_git(path, contents)
- end
-
- def self.included(base)
- base.extend(ClassMethods)
- end
-end
-
-class Repository
- include TrackCommitState
-
- @@aliases = {}
-
- def initialize(arv_repo)
- @arv_repo = arv_repo
- end
-
- def self.ensure_system_config(conf_root)
- ensure_in_git(File.join(conf_root, "arvadosaliases.pl"), alias_config)
- end
-
- def self.rename_repos(repos_root)
- @@aliases.each_pair do |uuid, name|
- begin
- File.rename(File.join(repos_root, "#{name}.git/"),
- File.join(repos_root, "#{uuid}.git"))
- rescue Errno::ENOENT
- end
- if name == "arvados"
- Dir.chdir(repos_root) { File.symlink("#{uuid}.git/", "arvados.git") }
- end
- end
- end
-
- def ensure_config(conf_root)
- return if name.nil?
- @@aliases[uuid] = name
- name_conf_path = auto_conf_path(conf_root, name)
- return unless File.exist?(name_conf_path)
- conf_file = IO.read(name_conf_path)
- conf_file.gsub!(/^repo #{Regexp.escape(name)}$/m, "repo #{uuid}")
- ensure_in_git(auto_conf_path(conf_root, uuid), conf_file)
- File.unlink(name_conf_path)
- system("git", "rm", "--quiet", name_conf_path)
- end
-
- private
-
- def auto_conf_path(conf_root, basename)
- File.join(conf_root, "conf", "auto", "#{basename}.conf")
- end
-
- def uuid
- @arv_repo[:uuid]
- end
-
- def name
- if @arv_repo[:name].nil?
- nil
- else
- @clean_name ||=
- @arv_repo[:name].sub(/^[^A-Za-z]+/, "").gsub(/[^\w\.\/]/, "")
- end
- end
-
- def self.alias_config
- conf_s = "{\n"
- @@aliases.sort.each do |(repo_name, repo_uuid)|
- conf_s += "\t'#{repo_name}' \t=> '#{repo_uuid}',\n"
- end
- conf_s += "};\n"
- conf_s
- end
-end
-
-begin
- # Get our local gitolite-admin repo up to snuff
- if not File.exist?(gitolite_admin) then
- ensure_directory(gitolite_tmpdir, 0700)
- Dir.chdir(gitolite_tmpdir)
- `git clone #{gitolite_url}`
- Dir.chdir(gitolite_admin)
- else
- Dir.chdir(gitolite_admin)
- `git pull`
- end
-
- arv = Arvados.new
- permissions = arv.repository.get_all_permissions
-
- permissions[:repositories].each do |repo_record|
- repo = Repository.new(repo_record)
- repo.ensure_config(gitolite_admin)
- end
- Repository.ensure_system_config(gitolite_admin)
-
- message = "#{Time.now().to_s}: migrate to storing repositories by UUID"
- Dir.chdir(gitolite_admin)
- `git add --all`
- `git commit -m '#{message}'`
- Repository.rename_repos(REPOS_DIR)
- `git push`
-
-rescue => bang
- puts "Error: " + bang.to_s
- puts bang.backtrace.join("\n")
- exit 1
-end
-
diff --git a/services/api/test/factories/api_client.rb b/services/api/test/factories/api_client.rb
deleted file mode 100644
index ee2017bf72..0000000000
--- a/services/api/test/factories/api_client.rb
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FactoryBot.define do
- factory :api_client do
- is_trusted { false }
- to_create do |instance|
- CurrentApiClientHelper.act_as_system_user do
- instance.save!
- end
- end
- end
-end
diff --git a/services/api/test/factories/api_client_authorization.rb b/services/api/test/factories/api_client_authorization.rb
index af2660a5ff..2aa03ac2f0 100644
--- a/services/api/test/factories/api_client_authorization.rb
+++ b/services/api/test/factories/api_client_authorization.rb
@@ -4,12 +4,8 @@
FactoryBot.define do
factory :api_client_authorization do
- api_client
scopes { ['all'] }
- trait :trusted do
- association :api_client, factory: :api_client, is_trusted: true
- end
factory :token do
# Just provides shorthand for "create :api_client_authorization"
end
diff --git a/services/api/test/fixtures/api_client_authorizations.yml b/services/api/test/fixtures/api_client_authorizations.yml
index c6ade21f8b..112906729f 100644
--- a/services/api/test/fixtures/api_client_authorizations.yml
+++ b/services/api/test/fixtures/api_client_authorizations.yml
@@ -4,30 +4,33 @@
# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+# This record's api_token is the SystemRootToken used by the test
+# suite (in fact, sdk/python/tests/run_test_server.py copies it from
+# this file into the test suite config file). That token is accepted
+# regardless of database contents (see
+# ApiClientAuthorization.check_system_root_token) but having a fixture
+# for it allows test cases to access it the same way as other token
+# fixtures, i.e., api_client_authorizations(:system_user).
system_user:
- uuid: zzzzz-gj3su-017z32aux8dg2s1
- api_client: untrusted
+ uuid: zzzzz-gj3su-000000000000000
user: system_user
api_token: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy
expires_at: 2038-01-01 00:00:00
admin:
uuid: zzzzz-gj3su-027z32aux8dg2s1
- api_client: untrusted
user: admin
api_token: 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h
expires_at: 2038-01-01 00:00:00
admin_trustedclient:
uuid: zzzzz-gj3su-037z32aux8dg2s1
- api_client: trusted_workbench
user: admin
api_token: 1a9ffdcga2o7cw8q12dndskomgs1ygli3ns9k2o9hgzgmktc78
expires_at: 2038-01-01 00:00:00
data_manager:
uuid: zzzzz-gj3su-047z32aux8dg2s1
- api_client: untrusted
user: system_user
api_token: 320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1
expires_at: 2038-01-01 00:00:00
@@ -40,35 +43,30 @@ data_manager:
miniadmin:
uuid: zzzzz-gj3su-057z32aux8dg2s1
- api_client: untrusted
user: miniadmin
api_token: 2zb2y9pw3e70270te7oe3ewaantea3adyxjascvkz0zob7q7xb
expires_at: 2038-01-01 00:00:00
rominiadmin:
uuid: zzzzz-gj3su-067z32aux8dg2s1
- api_client: untrusted
user: rominiadmin
api_token: 5tsb2pc3zlatn1ortl98s2tqsehpby88wmmnzmpsjmzwa6payh
expires_at: 2038-01-01 00:00:00
active:
uuid: zzzzz-gj3su-077z32aux8dg2s1
- api_client: untrusted
user: active
api_token: 3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
expires_at: 2038-01-01 00:00:00
active_trustedclient:
uuid: zzzzz-gj3su-087z32aux8dg2s1
- api_client: trusted_workbench
user: active
api_token: 27bnddk6x2nmq00a1e3gq43n9tsl5v87a3faqar2ijj8tud5en
expires_at: 2038-01-01 00:00:00
active_noscope:
uuid: zzzzz-gj3su-097z32aux8dg2s1
- api_client: untrusted
user: active
api_token: activenoscopeabcdefghijklmnopqrstuvwxyz12345678901
expires_at: 2038-01-01 00:00:00
@@ -76,28 +74,24 @@ active_noscope:
project_viewer:
uuid: zzzzz-gj3su-107z32aux8dg2s1
- api_client: untrusted
user: project_viewer
api_token: projectviewertoken1234567890abcdefghijklmnopqrstuv
expires_at: 2038-01-01 00:00:00
project_viewer_trustedclient:
uuid: zzzzz-gj3su-117z32aux8dg2s1
- api_client: trusted_workbench
user: project_viewer
api_token: projectviewertrustedtoken1234567890abcdefghijklmno
expires_at: 2038-01-01 00:00:00
subproject_admin:
uuid: zzzzz-gj3su-127z32aux8dg2s1
- api_client: untrusted
user: subproject_admin
api_token: subprojectadmintoken1234567890abcdefghijklmnopqrst
expires_at: 2038-01-01 00:00:00
admin_vm:
uuid: zzzzz-gj3su-137z32aux8dg2s1
- api_client: untrusted
user: admin
api_token: adminvirtualmachineabcdefghijklmnopqrstuvwxyz12345
expires_at: 2038-01-01 00:00:00
@@ -106,7 +100,6 @@ admin_vm:
admin_noscope:
uuid: zzzzz-gj3su-147z32aux8dg2s1
- api_client: untrusted
user: admin
api_token: adminnoscopeabcdefghijklmnopqrstuvwxyz123456789012
expires_at: 2038-01-01 00:00:00
@@ -114,7 +107,6 @@ admin_noscope:
active_all_collections:
uuid: zzzzz-gj3su-157z32aux8dg2s1
- api_client: untrusted
user: active
api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567
expires_at: 2038-01-01 00:00:00
@@ -122,23 +114,13 @@ active_all_collections:
active_userlist:
uuid: zzzzz-gj3su-167z32aux8dg2s1
- api_client: untrusted
user: active
api_token: activeuserlistabcdefghijklmnopqrstuvwxyz1234568900
expires_at: 2038-01-01 00:00:00
scopes: ["GET /arvados/v1/users"]
-active_specimens:
- uuid: zzzzz-gj3su-177z32aux8dg2s1
- api_client: untrusted
- user: active
- api_token: activespecimensabcdefghijklmnopqrstuvwxyz123456890
- expires_at: 2038-01-01 00:00:00
- scopes: ["GET /arvados/v1/specimens/"]
-
active_apitokens:
uuid: zzzzz-gj3su-187z32aux8dg2s1
- api_client: trusted_workbench
user: active
api_token: activeapitokensabcdefghijklmnopqrstuvwxyz123456789
expires_at: 2038-01-01 00:00:00
@@ -147,7 +129,6 @@ active_apitokens:
active_readonly:
uuid: zzzzz-gj3su-197z32aux8dg2s1
- api_client: untrusted
user: active
api_token: activereadonlyabcdefghijklmnopqrstuvwxyz1234568790
expires_at: 2038-01-01 00:00:00
@@ -155,72 +136,68 @@ active_readonly:
spectator:
uuid: zzzzz-gj3su-207z32aux8dg2s1
- api_client: untrusted
user: spectator
api_token: zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu
expires_at: 2038-01-01 00:00:00
-spectator_specimens:
+foo:
+ uuid: zzzzz-gj3su-fohzae5ib1aseiv
+ user: user_foo_in_sharing_group
+ api_token: lokah4xip8ahgee8oof5zitah3ohdai6je9cu1uogh4bai3ohw
+ expires_at: 2038-01-01 00:00:00
+
+foo_collections:
uuid: zzzzz-gj3su-217z32aux8dg2s1
- api_client: untrusted
- user: spectator
- api_token: spectatorspecimensabcdefghijklmnopqrstuvwxyz123245
+ user: user_foo_in_sharing_group
+ api_token: spectatorcollectionscdefghijklmnopqrstuvwxyz123245
expires_at: 2038-01-01 00:00:00
- scopes: ["GET /arvados/v1/specimens", "GET /arvados/v1/specimens/",
- "POST /arvados/v1/specimens"]
+ scopes: ["GET /arvados/v1/collections", "GET /arvados/v1/collections/",
+ "POST /arvados/v1/collections"]
inactive:
uuid: zzzzz-gj3su-227z32aux8dg2s1
- api_client: untrusted
user: inactive
api_token: 5s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
expires_at: 2038-01-01 00:00:00
inactive_uninvited:
uuid: zzzzz-gj3su-237z32aux8dg2s1
- api_client: untrusted
user: inactive_uninvited
api_token: 62mhllc0otp78v08e3rpa3nsmf8q8ogk47f7u5z4erp5gpj9al
expires_at: 2038-01-01 00:00:00
inactive_uninvited_trustedclient:
uuid: zzzzz-gj3su-228z32aux8dg2s1
- api_client: trusted_workbench
user: inactive_uninvited
api_token: 7s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
expires_at: 2038-01-01 00:00:00
inactive_but_signed_user_agreement:
uuid: zzzzz-gj3su-247z32aux8dg2s1
- api_client: untrusted
user: inactive_but_signed_user_agreement
api_token: 64k3bzw37iwpdlexczj02rw3m333rrb8ydvn2qq99ohv68so5k
expires_at: 2038-01-01 00:00:00
expired:
uuid: zzzzz-gj3su-257z32aux8dg2s1
- api_client: untrusted
user: active
api_token: 2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx
expires_at: 1970-01-01 00:00:00
expired_trustedclient:
uuid: zzzzz-gj3su-267z32aux8dg2s1
- api_client: trusted_workbench
user: active
api_token: 5hpni7izokzcatku2896xxwqdbt5ptomn04r6auc7fohnli82v
expires_at: 1970-01-01 00:00:00
valid_token_deleted_user:
uuid: zzzzz-gj3su-277z32aux8dg2s1
- api_client: trusted_workbench
user_id: 1234567
api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b
expires_at: 2038-01-01 00:00:00
anonymous:
uuid: zzzzz-gj3su-287z32aux8dg2s1
- api_client: untrusted
user: anonymous
api_token: 4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi
expires_at: 2038-01-01 00:00:00
@@ -228,119 +205,96 @@ anonymous:
job_reader:
uuid: zzzzz-gj3su-297z32aux8dg2s1
- api_client: untrusted
user: job_reader
api_token: e99512cdc0f3415c2428b9758f33bdfb07bc3561b00e86e7e6
expires_at: 2038-01-01 00:00:00
job_reader2:
uuid: zzzzz-gj3su-jobreader2auth1
- api_client: untrusted
user: job_reader2
api_token: jobreader2415c2428b9758f33bdfb07bc3561b0jobreader2
expires_at: 2038-01-01 00:00:00
active_no_prefs:
uuid: zzzzz-gj3su-307z32aux8dg2s1
- api_client: untrusted
user: active_no_prefs
api_token: 3kg612cdc0f3415c2428b9758f33bdfb07bc3561b00e86qdmi
expires_at: 2038-01-01 00:00:00
active_no_prefs_profile_no_getting_started_shown:
uuid: zzzzz-gj3su-317z32aux8dg2s1
- api_client: untrusted
user: active_no_prefs_profile_no_getting_started_shown
api_token: 3kg612cdc0f3415c242856758f33bdfb07bc3561b00e86qdmi
expires_at: 2038-01-01 00:00:00
active_no_prefs_profile_with_getting_started_shown:
uuid: zzzzz-gj3su-327z32aux8dg2s1
- api_client: untrusted
user: active_no_prefs_profile_with_getting_started_shown
api_token: 3kg612cdc0f3415c245786758f33bdfb07babcd1b00e86qdmi
expires_at: 2038-01-01 00:00:00
active_with_prefs_profile_no_getting_started_shown:
uuid: zzzzz-gj3su-337z32aux8dg2s1
- api_client: untrusted
user: active_with_prefs_profile_no_getting_started_shown
api_token: 3kg612cdc0f3415c245786758f33bdfb07befgh1b00e86qdmi
expires_at: 2038-01-01 00:00:00
user_foo_in_sharing_group:
uuid: zzzzz-gj3su-347z32aux8dg2s1
- api_client: untrusted
user: user_foo_in_sharing_group
api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u
expires_at: 2038-01-01 00:00:00
user_bar_in_sharing_group:
uuid: zzzzz-gj3su-62hryf5fht531mz
- api_client: untrusted
user: user_bar_in_sharing_group
api_token: 5vy55akwq85vghh80wc2cuxl4p8psay73lkpqf5c2cxvp6rmm6
expires_at: 2038-01-01 00:00:00
user1_with_load:
uuid: zzzzz-gj3su-357z32aux8dg2s1
- api_client: untrusted
user: user1_with_load
api_token: 1234k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
expires_at: 2038-01-01 00:00:00
fuse:
uuid: zzzzz-gj3su-367z32aux8dg2s1
- api_client: untrusted
user: fuse
api_token: 4nagbkv8eap0uok7pxm72nossq5asihls3yn5p4xmvqx5t5e7p
expires_at: 2038-01-01 00:00:00
-dispatch1:
- uuid: zzzzz-gj3su-k9dvestay1plssr
- api_client: untrusted
- user: system_user
- api_token: kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw
- expires_at: 2038-01-01 00:00:00
-
dispatch2:
uuid: zzzzz-gj3su-jrriu629zljsnuf
- api_client: untrusted
user: system_user
api_token: pbe3v4v5oag83tjwxjh0a551j44xdu8t7ol5ljw3ixsq8oh50q
expires_at: 2038-01-01 00:00:00
running_container_auth:
uuid: zzzzz-gj3su-077z32aux8dg2s2
- api_client: untrusted
user: active
api_token: it2gl94mgu3rbn5s2d06vzh73ns1y6cthct0tvg82qdlsxvbwk
expires_at: 2038-01-01 00:00:00
running_container_with_logs_auth:
uuid: zzzzz-gj3su-n4xycwjpvvi776n
- api_client: untrusted
user: active
api_token: mkpdp5jbytt471lw9so1by2t5ylciojdur845rfn4dtm0etl33
expires_at: 2038-01-01 00:00:00
running_to_be_deleted_container_auth:
uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq
- api_client: untrusted
user: active
api_token: ge1pez7dkk7nqntwcsj922g2b7a2t27xz6nsx39r15kbcqmp55
expires_at: 2038-01-01 00:00:00
permission_perftest:
uuid: zzzzz-gj3su-077z32anoj93boo
- api_client: untrusted
user: permission_perftest
api_token: 3kg6k6lzmp9kjabonentustoecn5bahbt2fod9zru30k1jqdmi
expires_at: 2038-01-01 00:00:00
foo_collection_sharing_token:
uuid: zzzzz-gj3su-gf02tdm4g1z3e3u
- api_client: untrusted
user: active
api_token: iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss
expires_at: 2038-01-01 00:00:00
@@ -351,21 +305,18 @@ foo_collection_sharing_token:
container_runtime_token:
uuid: zzzzz-gj3su-2nj68s291f50gd9
- api_client: untrusted
user: container_runtime_token_user
api_token: 2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw
expires_at: 2038-01-01 00:00:00
crt_user:
uuid: zzzzz-gj3su-3r47qqy5ja5d54v
- api_client: untrusted
user: container_runtime_token_user
api_token: 13z1tz9deoryml3twep0vsahi4862097pe5lsmesugnkgpgpwk
expires_at: 2038-01-01 00:00:00
runtime_token_limited_scope:
uuid: zzzzz-gj3su-2fljvypjrr4yr9m
- api_client: untrusted
user: container_runtime_token_user
api_token: 1fwc3be1m13qkypix2gd01i4bq5ju483zjfc0cf4babjseirbm
expires_at: 2038-01-01 00:00:00
diff --git a/services/api/test/fixtures/api_clients.yml b/services/api/test/fixtures/api_clients.yml
deleted file mode 100644
index 9965718f99..0000000000
--- a/services/api/test/fixtures/api_clients.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
-
-trusted_workbench:
- uuid: zzzzz-ozdt8-teyxzyd8qllg11h
- owner_uuid: zzzzz-tpzed-000000000000000
- name: Official Workbench
- url_prefix: https://official-workbench.local/
- is_trusted: true
-
-untrusted:
- uuid: zzzzz-ozdt8-obw7foaks3qjyej
- owner_uuid: zzzzz-tpzed-000000000000000
- name: Untrusted
- url_prefix: https://untrusted.local/
- is_trusted: false
-
-system_root_token_api_client:
- uuid: zzzzz-ozdt8-pbw7foaks3qjyej
- owner_uuid: zzzzz-tpzed-000000000000000
- name: SystemRootToken
- url_prefix: ""
- is_trusted: true
diff --git a/services/api/test/fixtures/collections.yml b/services/api/test/fixtures/collections.yml
index 72aad1d68e..85e8bbb3eb 100644
--- a/services/api/test/fixtures/collections.yml
+++ b/services/api/test/fixtures/collections.yml
@@ -8,7 +8,6 @@ user_agreement:
portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T19:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2013-12-26T19:22:54Z
updated_at: 2013-12-26T19:22:54Z
@@ -21,7 +20,6 @@ collection_owned_by_active:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T18:22:54Z
updated_at: 2014-02-03T18:22:54Z
@@ -35,7 +33,6 @@ collection_owned_by_active_with_file_stats:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -51,7 +48,6 @@ collection_owned_by_active_past_version_1:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T18:22:54Z
updated_at: 2014-02-03T18:22:54Z
@@ -65,7 +61,6 @@ foo_file:
portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2015-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-03T17:22:54Z
updated_at: 2015-02-03T17:22:54Z
@@ -78,7 +73,6 @@ bar_file:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2015-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-03T17:22:54Z
updated_at: 2015-02-03T17:22:54Z
@@ -91,7 +85,6 @@ baz_file:
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -104,7 +97,6 @@ w_a_z_file:
portable_data_hash: 44a8da9ec82098323895cd14e178386f+56
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-09T10:53:38Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-09T10:55:38Z
updated_at: 2015-02-09T10:55:38Z
@@ -118,7 +110,6 @@ w_a_z_file_version_1:
portable_data_hash: ba4ba4c7b99a58806b1ed70ea1263afe+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-09T10:53:38Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-09T10:55:38Z
updated_at: 2015-02-09T10:55:38Z
@@ -132,7 +123,6 @@ multilevel_collection_1:
portable_data_hash: f9ddda46bb293b6847da984e3aa735db+290
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -146,7 +136,6 @@ multilevel_collection_2:
portable_data_hash: 8591cc5caeca80fc62fd529ba1d63bf3+118
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -160,7 +149,6 @@ docker_image:
portable_data_hash: fa3c1a9cb6783f85f2ecda037e07b8c3+167
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-06-11T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-06-11T17:22:54Z
updated_at: 2014-06-11T17:22:54Z
@@ -174,7 +162,6 @@ docker_image_1_12:
portable_data_hash: d740a57097711e08eb9b2a93518f20ab+174
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2016-10-19 08:50:45.653552268 Z
- modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2016-10-19 08:50:45.652930000 Z
updated_at: 2016-10-19 08:50:45.652930000 Z
@@ -189,7 +176,6 @@ unlinked_docker_image:
portable_data_hash: 9ae44d5792468c58bcf85ce7353c7027+124
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-06-11T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-06-11T17:22:54Z
updated_at: 2014-06-11T17:22:54Z
@@ -202,7 +188,6 @@ empty:
portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-06-11T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-06-11T17:22:54Z
updated_at: 2014-06-11T17:22:54Z
@@ -224,7 +209,6 @@ fuse_filters_test_foo:
uuid: zzzzz-4zz18-4e2kjqv891jl3p3
current_version_uuid: zzzzz-4zz18-4e2kjqv891jl3p3
portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
owner_uuid: zzzzz-tpzed-fusefiltertest1
created_at: 2024-02-09T12:01:00Z
@@ -239,7 +223,6 @@ fuse_filters_test_bar:
uuid: zzzzz-4zz18-qpxqtq2wbjnu630
current_version_uuid: zzzzz-4zz18-qpxqtq2wbjnu630
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
owner_uuid: zzzzz-tpzed-fusefiltertest1
created_at: 2024-02-09T12:02:00Z
@@ -254,7 +237,6 @@ fuse_filters_test_baz:
uuid: zzzzz-4zz18-ls97ezovrkkpfxz
current_version_uuid: zzzzz-4zz18-ls97ezovrkkpfxz
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
owner_uuid: zzzzz-tpzed-fusefiltertest1
created_at: 2024-02-09T12:03:00Z
@@ -314,7 +296,6 @@ baz_file_in_asubproject:
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -327,7 +308,6 @@ collection_to_move_around_in_aproject:
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -342,7 +322,6 @@ expired_collection:
portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -359,7 +338,6 @@ expired_collection_past_version:
portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:12:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:17:54Z
updated_at: 2014-02-03T17:17:54Z
@@ -376,7 +354,6 @@ trashed_on_next_sweep:
portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2016-12-07T22:01:00.123456Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2016-12-27T22:01:30.123456Z
updated_at: 2016-12-27T22:01:30.123456Z
@@ -394,7 +371,6 @@ deleted_on_next_sweep:
portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2016-12-07T22:01:00.234567Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2016-12-27T22:01:30.234567Z
updated_at: 2016-12-27T22:01:30.234567Z
@@ -410,7 +386,6 @@ collection_expires_in_future:
portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -425,7 +400,6 @@ unique_expired_collection:
portable_data_hash: 4ad199f90029935844dc3f098f4fca2a+49
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -441,7 +415,6 @@ unique_expired_collection2:
portable_data_hash: 64a2bed1ef0f40fe3a7d39bcf2584cb8+50
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -451,29 +424,12 @@ unique_expired_collection2:
manifest_text: ". 29d7797f1888013986899bc9083783fa+3 0:3:expired2\n"
name: unique_expired_collection2
-# a collection with a log file that can be parsed by the log viewer
-# This collection hash matches the following log text:
-# 2014-01-01_12:00:01 zzzzz-8i9sb-abcdefghijklmno 0 log message 1
-# 2014-01-01_12:00:02 zzzzz-8i9sb-abcdefghijklmno 0 log message 2
-# 2014-01-01_12:00:03 zzzzz-8i9sb-abcdefghijklmno 0 log message 3
-#
-real_log_collection:
- uuid: zzzzz-4zz18-op4e2lbej01tcvu
- current_version_uuid: zzzzz-4zz18-op4e2lbej01tcvu
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-09-01 12:00:00
- modified_at: 2014-09-01 12:00:00
- portable_data_hash: 0b9a7787660e1fce4a93f33e01376ba6+81
- manifest_text: ". cdd549ae79fe6640fa3d5c6261d8303c+195 0:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt\n"
- name: real_log_collection
-
collection_in_home_project_with_same_name_as_in_aproject:
uuid: zzzzz-4zz18-12342x4u7ftabcd
current_version_uuid: zzzzz-4zz18-12342x4u7ftabcd
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -486,7 +442,6 @@ collection_in_aproject_with_same_name_as_in_home_project:
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -521,7 +476,6 @@ collection_with_files_in_subdir:
portable_data_hash: 7eb64275355980ebc93411b44050c137+281
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -563,7 +517,6 @@ collection_1_owned_by_fuse:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-0fusedrivertest
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -576,7 +529,6 @@ collection_2_owned_by_fuse:
portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
owner_uuid: zzzzz-tpzed-0fusedrivertest
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -589,7 +541,6 @@ collection_in_fuse_project:
portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -635,7 +586,6 @@ collection_with_unique_words_to_test_full_text_search:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -739,7 +689,6 @@ collection_with_empty_properties:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -753,7 +702,6 @@ collection_with_one_property:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -769,7 +717,6 @@ collection_with_repeated_filenames_and_contents_in_two_dirs_1:
portable_data_hash: ce437b12aa73ab34f7af5227f556c9e6+142
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -782,7 +729,6 @@ collection_with_repeated_filenames_and_contents_in_two_dirs_2:
portable_data_hash: f3a67fad3a19c31c658982fb8158fa58+144
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -795,7 +741,6 @@ foo_and_bar_files_in_dir:
portable_data_hash: 870369fc72738603c2fad16664e50e2d+58
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -808,7 +753,6 @@ multi_level_to_combine:
portable_data_hash: 7a6ef4c162a5c6413070a8bd0bffc818+150
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -822,7 +766,6 @@ collection_with_several_supported_file_types:
portable_data_hash: 020d82cf7dedb70fd2b7788b5d0634da+269
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -835,7 +778,6 @@ collection_with_several_unsupported_file_types:
portable_data_hash: 71ac42f87464ee5f9fd396d560d400c3+59
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -848,7 +790,6 @@ collection_not_readable_by_active:
portable_data_hash: b9e51a238ce08a698e7d7f8f101aee18+55
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -861,7 +802,6 @@ collection_to_remove_and_rename_files:
portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -874,7 +814,6 @@ collection_with_tags_owned_by_active:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -890,7 +829,6 @@ trashed_collection_to_test_name_conflict_on_untrash:
portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -906,7 +844,6 @@ same_name_as_trashed_coll_to_test_name_conflict_on_untrash:
portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -919,7 +856,6 @@ collection_in_trashed_subproject:
portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-j7d0g-trashedproject2
created_at: 2014-02-03T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
@@ -932,7 +868,6 @@ collection_with_prop1_value1:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -947,7 +882,6 @@ collection_with_prop1_value2:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -962,7 +896,6 @@ collection_with_prop1_value3:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -977,7 +910,6 @@ collection_with_prop1_other1:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -992,7 +924,6 @@ collection_with_prop2_1:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1007,7 +938,6 @@ collection_with_prop2_5:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1022,7 +952,6 @@ collection_with_list_prop_odd:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1037,7 +966,6 @@ collection_with_list_prop_even:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1052,7 +980,6 @@ collection_with_listprop_elem1:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1067,7 +994,6 @@ collection_with_uri_prop:
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-02-13T17:22:54Z
updated_at: 2015-02-13T17:22:54Z
@@ -1076,17 +1002,27 @@ collection_with_uri_prop:
properties:
"http://schema.org/example": "value1"
+container_log_collection:
+ uuid: zzzzz-4zz18-logcollection00
+ current_version_uuid: zzzzz-4zz18-logcollection00
+ portable_data_hash: b1e66f713c04d28ddbaced89096f4838+210
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2020-10-29T00:51:44.075594000Z
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2020-10-29T00:51:44.072109000Z
+ manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
+ name: a real log collection for a completed container
+
log_collection:
uuid: zzzzz-4zz18-logcollection01
current_version_uuid: zzzzz-4zz18-logcollection01
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-10-29T00:51:44.075594000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-10-29T00:51:44.072109000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
- name: a real log collection for a completed container
+ name: a real log collection for a completed container request
log_collection2:
uuid: zzzzz-4zz18-logcollection02
@@ -1094,7 +1030,6 @@ log_collection2:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-10-29T00:51:44.075594000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-10-29T00:51:44.072109000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
@@ -1106,7 +1041,6 @@ diagnostics_request_container_log_collection:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-11-02T00:20:44.007557000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-11-02T00:20:44.005381000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
@@ -1118,7 +1052,6 @@ hasher1_log_collection:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-11-02T00:16:55.272606000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-11-02T00:16:55.267006000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
@@ -1130,7 +1063,6 @@ hasher2_log_collection:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-11-02T00:20:23.547251000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-11-02T00:20:23.545275000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
@@ -1142,7 +1074,6 @@ hasher3_log_collection:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-11-02T00:20:38.789204000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-11-02T00:20:38.787329000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
@@ -1154,7 +1085,6 @@ diagnostics_request_container_log_collection2:
portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2020-11-03T16:17:53.351593000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2020-11-03T16:17:53.346969000Z
manifest_text: ". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\040for\\040container\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n"
diff --git a/services/api/test/fixtures/container_requests.yml b/services/api/test/fixtures/container_requests.yml
index 71c7a54df3..a96074f329 100644
--- a/services/api/test/fixtures/container_requests.yml
+++ b/services/api/test/fixtures/container_requests.yml
@@ -20,10 +20,12 @@ queued:
runtime_constraints:
vcpus: 1
ram: 123
- cuda:
- driver_version: ""
- hardware_capability: ""
+ gpu:
device_count: 0
+ driver_version: ""
+ hardware_target: []
+ stack: ""
+ vram: 0
mounts: {}
running:
@@ -1056,6 +1058,36 @@ runtime_token:
ram: 123
mounts: {}
+read_foo_write_bar:
+ uuid: zzzzz-xvdhp-readfoowritebar
+ owner_uuid: zzzzz-tpzed-000000000000000
+ state: Final
+ created_at: 2024-01-11 11:11:11.111111111 Z
+ updated_at: 2024-01-11 11:11:11.111111111 Z
+ modified_at: 2024-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ container_image: test
+ cwd: /
+ mounts:
+ stdin:
+ kind: collection
+ portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ path: /foo
+ stdout:
+ kind: file
+ path: /mnt/out/bar
+ /mnt/out:
+ kind: tmp
+ capacity: 1000
+ container_uuid: zzzzz-dz642-readfoowritebar
+ log_uuid: zzzzz-4zz18-logcollection01
+ output_uuid: zzzzz-4zz18-ehbhgtheo8909or
+ output_path: test
+ command: ["echo", "-n", "bar"]
+ runtime_constraints:
+ ram: 10000000
+ vcpus: 1
+
# Test Helper trims the rest of the file
diff --git a/services/api/test/fixtures/containers.yml b/services/api/test/fixtures/containers.yml
index 46bc1e50f9..0bca782946 100644
--- a/services/api/test/fixtures/containers.yml
+++ b/services/api/test/fixtures/containers.yml
@@ -16,10 +16,12 @@ queued:
runtime_constraints:
ram: 12000000000
vcpus: 4
- cuda:
+ gpu:
+ stack: ""
driver_version: ""
hardware_capability: ""
device_count: 0
+ vram: 0
mounts:
/tmp:
kind: tmp
@@ -80,7 +82,7 @@ locked:
uuid: zzzzz-dz642-lockedcontainer
owner_uuid: zzzzz-tpzed-000000000000000
state: Locked
- locked_by_uuid: zzzzz-gj3su-k9dvestay1plssr
+ locked_by_uuid: zzzzz-gj3su-000000000000000
priority: 0
created_at: <%= 2.minute.ago.to_fs(:db) %>
updated_at: <%= 2.minute.ago.to_fs(:db) %>
@@ -460,7 +462,7 @@ runtime_token:
kind: tmp
capacity: 24000000000
-cuda_container:
+legacy_cuda_container:
uuid: zzzzz-dz642-cudagpcontainer
owner_uuid: zzzzz-tpzed-000000000000000
state: Complete
@@ -470,6 +472,32 @@ cuda_container:
updated_at: 2016-01-11 11:11:11.111111111 Z
started_at: 2016-01-11 11:11:11.111111111 Z
finished_at: 2016-01-12 11:12:13.111111111 Z
+ container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ cwd: test
+ log: ea10d51bcf88862dbcc36eb292017dfd+45
+ output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ output_path: test
+ command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
+ runtime_constraints:
+ cuda:
+ device_count: 1
+ driver_version: "11.0"
+ hardware_capability: "9.0"
+ ram: 12000000000
+ vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+gpu_container:
+ uuid: zzzzz-dz642-gengpucontainer
+ owner_uuid: zzzzz-tpzed-000000000000000
+ state: Complete
+ exit_code: 0
+ priority: 1
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ started_at: 2016-01-11 11:11:11.111111111 Z
+ finished_at: 2016-01-12 11:12:13.111111111 Z
container_image: test
cwd: test
log: ea10d51bcf88862dbcc36eb292017dfd+45
@@ -479,9 +507,44 @@ cuda_container:
runtime_constraints:
ram: 12000000000
vcpus: 4
- cuda:
+ gpu:
driver_version: "11.0"
- hardware_capability: "9.0"
+ hardware_target: ["9.0"]
device_count: 1
+ stack: "cuda"
+ vram: 8000000000
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+read_foo_write_bar:
+ uuid: zzzzz-dz642-readfoowritebar
+ owner_uuid: zzzzz-tpzed-000000000000000
+ state: Complete
+ exit_code: 0
+ priority: 1
+ created_at: 2024-01-11 11:11:11.111111111 Z
+ updated_at: 2024-01-11 11:11:11.111111111 Z
+ started_at: 2024-01-11 11:11:11.111111111 Z
+ finished_at: 2024-01-12 11:12:13.111111111 Z
+ container_image: test
+ cwd: /
+ mounts:
+ stdin:
+ kind: collection
+ portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ path: /foo
+ stdout:
+ kind: file
+ path: /mnt/out/bar
+ /mnt/out:
+ kind: tmp
+ capacity: 1000
+ log: ea10d51bcf88862dbcc36eb292017dfd+45
+ output: fa7aeb5140e2848d39b416daeef4ffc5+45
+ output_path: test
+ command: ["echo", "-n", "bar"]
+ runtime_constraints:
+ ram: 10000000
+ vcpus: 1
secret_mounts: {}
secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
diff --git a/services/api/test/fixtures/groups.yml b/services/api/test/fixtures/groups.yml
index 9034ac6ee7..36c57aecc7 100644
--- a/services/api/test/fixtures/groups.yml
+++ b/services/api/test/fixtures/groups.yml
@@ -41,7 +41,6 @@ activeandfriends:
uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-08-22 14:02:18.481582707 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-08-22 14:02:18.481319501 Z
name: Active User and friends
@@ -87,7 +86,6 @@ aproject:
uuid: zzzzz-j7d0g-v955i6s2oi1cbso
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -99,7 +97,6 @@ asubproject:
uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -111,7 +108,6 @@ afiltergroup:
uuid: zzzzz-j7d0g-thisfiltergroup
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -124,7 +120,6 @@ afiltergroup2:
uuid: zzzzz-j7d0g-afiltergrouptwo
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -137,7 +132,6 @@ afiltergroup3:
uuid: zzzzz-j7d0g-filtergroupthre
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -150,7 +144,6 @@ afiltergroup4:
uuid: zzzzz-j7d0g-filtergroupfour
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -163,7 +156,6 @@ afiltergroup5:
uuid: zzzzz-j7d0g-filtergroupfive
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -175,7 +167,6 @@ afiltergroup5:
fuse_filters_test_project:
uuid: zzzzz-j7d0g-fusefiltertest1
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2024-02-09T12:00:00Z
modified_at: 2024-02-09T12:00:01Z
@@ -187,7 +178,6 @@ future_project_viewing_group:
uuid: zzzzz-j7d0g-futrprojviewgrp
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -198,7 +188,6 @@ future_project_viewing_group:
bad_group_has_ownership_cycle_a:
uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
owner_uuid: zzzzz-j7d0g-0077nzts8c178lw
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-05-03 18:50:08 -0400
modified_at: 2014-05-03 18:50:08 -0400
@@ -209,7 +198,6 @@ bad_group_has_ownership_cycle_a:
bad_group_has_ownership_cycle_b:
uuid: zzzzz-j7d0g-0077nzts8c178lw
owner_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-05-03 18:50:08 -0400
modified_at: 2014-05-03 18:50:08 -0400
@@ -265,7 +253,6 @@ empty_project:
uuid: zzzzz-j7d0g-9otoxmrksam74q6
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-12-16 15:56:27.967534940 Z
- modified_by_client_uuid: ~
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-12-16 15:56:27.967358199 Z
name: Empty project
@@ -277,7 +264,6 @@ project_with_10_collections:
uuid: zzzzz-j7d0g-0010collections
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -289,7 +275,6 @@ project_with_201_collections:
uuid: zzzzz-j7d0g-0201collections
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -301,7 +286,6 @@ project_with_10_pipelines:
uuid: zzzzz-j7d0g-000010pipelines
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -313,7 +297,6 @@ project_with_2_pipelines_and_60_crs:
uuid: zzzzz-j7d0g-nnncrspipelines
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -325,7 +308,6 @@ project_with_25_pipelines:
uuid: zzzzz-j7d0g-000025pipelines
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -337,7 +319,6 @@ fuse_owned_project:
uuid: zzzzz-j7d0g-0000ownedbyfuse
owner_uuid: zzzzz-tpzed-0fusedrivertest
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -351,7 +332,6 @@ project_owns_itself:
uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
owner_uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
created_at: 2014-11-05 22:31:24.258424340 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: 6pbr1-tpzed-000000000000000
modified_at: 2014-11-05 22:31:24.258242890 Z
name: zzzzz-j7d0g-7rqh7hdshd5yp5t
@@ -365,7 +345,6 @@ subproject_in_active_user_home_project_to_test_unique_key_violation:
uuid: zzzzz-j7d0g-subprojsamenam1
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2013-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2013-04-21 15:37:48 -0400
updated_at: 2013-04-21 15:37:48 -0400
@@ -377,7 +356,6 @@ subproject_in_asubproject_with_same_name_as_one_in_active_user_home:
uuid: zzzzz-j7d0g-subprojsamenam2
owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
created_at: 2013-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2013-04-21 15:37:48 -0400
updated_at: 2013-04-21 15:37:48 -0400
@@ -389,7 +367,6 @@ starred_and_shared_active_user_project:
uuid: zzzzz-j7d0g-starredshared01
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -403,7 +380,7 @@ trashed_project:
name: trashed project
group_class: project
trash_at: 2001-01-01T00:00:00Z
- delete_at: 2008-03-01T00:00:00Z
+ delete_at: 2038-03-01T00:00:00Z
is_trashed: true
modified_at: 2001-01-01T00:00:00Z
@@ -434,3 +411,13 @@ trashed_on_next_sweep:
delete_at: 2038-03-01T00:00:00Z
is_trashed: false
modified_at: 2001-01-01T00:00:00Z
+
+trashed_role_on_next_sweep:
+ uuid: zzzzz-j7d0g-soontobetrashd2
+ owner_uuid: zzzzz-tpzed-000000000000000
+ name: soon to be trashed role group
+ group_class: role
+ trash_at: 2001-01-01T00:00:00Z
+ delete_at: 2001-01-01T00:00:00Z
+ is_trashed: false
+ modified_at: 2001-01-01T00:00:00Z
diff --git a/services/api/test/fixtures/humans.yml b/services/api/test/fixtures/humans.yml
deleted file mode 100644
index eee61efefe..0000000000
--- a/services/api/test/fixtures/humans.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# File exists to ensure the table gets cleared during DatabaseController#reset
diff --git a/services/api/test/fixtures/job_tasks.yml b/services/api/test/fixtures/job_tasks.yml
deleted file mode 100644
index 6a857a02f2..0000000000
--- a/services/api/test/fixtures/job_tasks.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-running_job_task_1:
- uuid: zzzzz-ot0gb-runningjobtask1
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- job_uuid: zzzzz-8i9sb-with2components
-
-running_job_task_2:
- uuid: zzzzz-ot0gb-runningjobtask2
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- job_uuid: zzzzz-8i9sb-with2components
diff --git a/services/api/test/fixtures/jobs.yml b/services/api/test/fixtures/jobs.yml
deleted file mode 100644
index 54b38259ba..0000000000
--- a/services/api/test/fixtures/jobs.yml
+++ /dev/null
@@ -1,768 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-running:
- uuid: zzzzz-8i9sb-pshmckwoma9plh7
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 2.7.minute.ago.to_fs(:db) %>
- started_at: <%= 2.7.minute.ago.to_fs(:db) %>
- finished_at: ~
- script: hash
- repository: active/foo
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-running_cancelled:
- uuid: zzzzz-8i9sb-4cf0nhn6xte809j
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: <%= 1.minute.ago.to_fs(:db) %>
- cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: ~
- script: hash
- repository: active/foo
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Cancelled
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-uses_nonexistent_script_version:
- uuid: zzzzz-8i9sb-7m339pu0x9mla88
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- created_at: <%= 5.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: <%= 2.minute.ago.to_fs(:db) %>
- script: hash
- repository: active/foo
- running: false
- success: true
- output: d41d8cd98f00b204e9800998ecf8427e+0
- priority: 0
- log: d41d8cd98f00b204e9800998ecf8427e+0
- is_locked_by_uuid: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 0
- done: 1
- runtime_constraints: {}
- state: Complete
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-foobar:
- uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- script: hash
- repository: active/foo
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- script_parameters:
- input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: <%= 2.minute.ago.to_fs(:db) %>
- running: false
- success: true
- output: fa7aeb5140e2848d39b416daeef4ffc5+45
- priority: 0
- log: ea10d51bcf88862dbcc36eb292017dfd+45
- is_locked_by_uuid: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 0
- done: 1
- runtime_constraints: {}
- state: Complete
- script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
-
-barbaz:
- uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: 1
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: <%= 2.minute.ago.to_fs(:db) %>
- running: false
- success: true
- repository: active/foo
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- priority: 0
- log: d41d8cd98f00b204e9800998ecf8427e+0
- is_locked_by_uuid: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 0
- done: 1
- runtime_constraints: {}
- state: Complete
- script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
-
-runningbarbaz:
- uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: 1
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: <%= 2.minute.ago.to_fs(:db) %>
- running: true
- success: ~
- repository: active/foo
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- priority: 0
- log: d41d8cd98f00b204e9800998ecf8427e+0
- is_locked_by_uuid: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 1
- done: 0
- runtime_constraints: {}
- state: Running
- script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
-
-previous_job_run:
- uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- finished_at: <%= 13.minutes.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "1"
- success: true
- log: d41d8cd98f00b204e9800998ecf8427e+0
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- state: Complete
- script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
-
-previous_job_run_nil_log:
- uuid: zzzzz-8i9sb-cjs4pklxxjykqq3
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- finished_at: <%= 13.minutes.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "3"
- success: true
- log: ~
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- state: Complete
- script_parameters_digest: 445702df4029b8a6e7075b451ff1256a
-
-previous_ancient_job_run:
- uuid: zzzzz-8i9sb-ahd7cie8jah9qui
- created_at: <%= 366.days.ago.to_fs(:db) %>
- finished_at: <%= 365.days.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "2"
- success: true
- log: d41d8cd98f00b204e9800998ecf8427e+0
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- state: Complete
- script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
-
-previous_docker_job_run:
- uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "1"
- runtime_constraints:
- docker_image: arvados/apitestfixture
- success: true
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
- state: Complete
- script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
- log: ea10d51bcf88862dbcc36eb292017dfd+45
-
-previous_ancient_docker_image_job_run:
- uuid: zzzzz-8i9sb-t3b460aolxxuldl
- created_at: <%= 144.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "2"
- runtime_constraints:
- docker_image: arvados/apitestfixture
- success: true
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- docker_image_locator: b519d9cb706a29fc7ea24dbea2f05851+93
- state: Complete
- script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
-
-previous_job_run_with_arvados_sdk_version:
- uuid: zzzzz-8i9sb-eoo0321or2dw2jg
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 31ce37fe365b3dc204300a3e4c396ad333ed0556
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "1"
- runtime_constraints:
- arvados_sdk_version: commit2
- docker_image: arvados/apitestfixture
- arvados_sdk_version: 00634b2b8a492d6f121e3cf1d6587b821136a9a7
- docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
- success: true
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- state: Complete
- script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
- log: ea10d51bcf88862dbcc36eb292017dfd+45
-
-previous_job_run_no_output:
- uuid: zzzzz-8i9sb-cjs4pklxxjykppp
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "2"
- success: true
- output: ~
- state: Complete
- script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
-
-previous_job_run_superseded_by_hash_branch:
- # This supplied_script_version is a branch name with later commits.
- uuid: zzzzz-8i9sb-aeviezu5dahph3e
- created_at: <%= 15.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/shabranchnames
- script: testscript
- script_version: 7387838c69a21827834586cc42b467ff6c63293b
- supplied_script_version: 738783
- script_parameters: {}
- success: true
- output: d41d8cd98f00b204e9800998ecf8427e+0
- state: Complete
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-nondeterminisic_job_run:
- uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: hash2
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- an_integer: "1"
- success: true
- nondeterministic: true
- state: Complete
- script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
-
-nearly_finished_job:
- uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
- created_at: <%= 14.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: arvados
- script: doesnotexist
- script_version: 309e25a64fe994867db8459543af372f850e25b9
- script_parameters:
- input: b519d9cb706a29fc7ea24dbea2f05851+249025
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: ~
- running: true
- success: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 1
- done: 0
- runtime_constraints: {}
- state: Complete
- script_parameters_digest: 7ea26d58a79b7f5db9f90fb1e33d3006
-
-queued:
- uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
- created_at: <%= 1.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- started_at: ~
- finished_at: ~
- script: foo
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters: {}
- running: ~
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: ~
- tasks_summary: {}
- runtime_constraints: {}
- state: Queued
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-# A job with a log collection that can be parsed by the log viewer.
-job_with_real_log:
- uuid: zzzzz-8i9sb-0vsrcqi7whchuil
- created_at: 2014-09-01 12:00:00
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- log: 0b9a7787660e1fce4a93f33e01376ba6+81
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- state: Complete
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-cancelled:
- uuid: zzzzz-8i9sb-4cf0abc123e809j
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: <%= 1.minute.ago.to_fs(:db) %>
- cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: ~
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- running: false
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Cancelled
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-job_in_subproject:
- uuid: zzzzz-8i9sb-subprojectjob01
- created_at: 2014-10-15 12:00:00
- owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
- log: ~
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-job_in_trashed_project:
- uuid: zzzzz-8i9sb-subprojectjob02
- created_at: 2014-10-15 12:00:00
- owner_uuid: zzzzz-j7d0g-trashedproject2
- log: ~
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-running_will_be_completed:
- uuid: zzzzz-8i9sb-rshmckwoma9pjh8
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: ~
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-d9tiejq69daie8f
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-graph_stage1:
- uuid: zzzzz-8i9sb-graphstage10000
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- repository: active/foo
- script: hash
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- output: fa7aeb5140e2848d39b416daeef4ffc5+45
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-graph_stage2:
- uuid: zzzzz-8i9sb-graphstage20000
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- repository: active/foo
- script: hash2
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- input2: "stuff"
- output: 65b17c95fdbc9800fc48acda4e9dcd0b+93
- script_parameters_digest: 4900033ec5cfaf8a63566f3664aeaa70
-
-graph_stage3:
- uuid: zzzzz-8i9sb-graphstage30000
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- repository: active/foo
- script: hash2
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- input2: "stuff2"
- output: ea10d51bcf88862dbcc36eb292017dfd+45
- script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
-
-job_with_latest_version:
- uuid: zzzzz-8i9sb-nj8ioxnrvjtyk2b
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- script: hash
- repository: active/foo
- script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- supplied_script_version: main
- script_parameters:
- input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- started_at: <%= 2.minute.ago.to_fs(:db) %>
- finished_at: <%= 1.minute.ago.to_fs(:db) %>
- running: false
- success: true
- output: fa7aeb5140e2848d39b416daeef4ffc5+45
- priority: 0
- log: ea10d51bcf88862dbcc36eb292017dfd+45
- is_locked_by_uuid: ~
- tasks_summary:
- failed: 0
- todo: 0
- running: 0
- done: 1
- runtime_constraints: {}
- state: Complete
- script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
-
-running_job_in_publicly_accessible_project:
- uuid: zzzzz-8i9sb-n7omg50bvt0m1nf
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/bar
- script: running_job_script
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Running
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- input2: "stuff2"
- script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
-
-completed_job_in_publicly_accessible_project:
- uuid: zzzzz-8i9sb-jyq01m7in1jlofj
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: completed_job_script
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- input2: "stuff2"
- log: zzzzz-4zz18-4en62shvi99lxd4
- output: b519d9cb706a29fc7ea24dbea2f05851+93
- script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
- started_at: <%= 10.minute.ago.to_fs(:db) %>
- finished_at: <%= 5.minute.ago.to_fs(:db) %>
-
-job_in_publicly_accessible_project_but_other_objects_elsewhere:
- uuid: zzzzz-8i9sb-jyq01muyhgr4ofj
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- repository: active/foo
- script: completed_job_script
- script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
- state: Complete
- script_parameters:
- input: fa7aeb5140e2848d39b416daeef4ffc5+45
- input2: "stuff2"
- log: zzzzz-4zz18-fy296fx3hot09f7
- output: zzzzz-4zz18-bv31uwvy3neko21
- script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
-
-running_job_with_components:
- uuid: zzzzz-8i9sb-with2components
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- started_at: <%= 3.minute.ago.to_fs(:db) %>
- finished_at: ~
- script: hash
- repository: active/foo
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- components:
- component1: zzzzz-8i9sb-jyq01m7in1jlofj
- component2: zzzzz-d1hrv-partdonepipelin
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
-
-# This main level job is in running state with one job and one pipeline instance components
-running_job_with_components_at_level_1:
- uuid: zzzzz-8i9sb-jobcomponentsl1
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- components:
- component1: zzzzz-8i9sb-jobcomponentsl2
- component2: zzzzz-d1hrv-picomponentsl02
-
-# This running job, a child of level_1, has one child component
-running_job_with_components_at_level_2:
- uuid: zzzzz-8i9sb-jobcomponentsl2
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- components:
- component1: zzzzz-8i9sb-job1atlevel3noc
-
-# The below two running jobs, children of level_2, have no child components
-running_job_1_with_components_at_level_3:
- uuid: zzzzz-8i9sb-job1atlevel3noc
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
-
-running_job_2_with_components_at_level_3:
- uuid: zzzzz-8i9sb-job2atlevel3noc
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
-
-# The two jobs below are so confused, they have circular relationship
-running_job_1_with_circular_component_relationship:
- uuid: zzzzz-8i9sb-job1withcirculr
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- components:
- component1: zzzzz-8i9sb-job2withcirculr
-
-running_job_2_with_circular_component_relationship:
- uuid: zzzzz-8i9sb-job2withcirculr
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: ~
- cancelled_by_user_uuid: ~
- cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- finished_at: ~
- repository: active/foo
- script: hash
- script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
- script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
- running: true
- success: ~
- output: ~
- priority: 0
- log: ~
- is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- tasks_summary:
- failed: 0
- todo: 3
- running: 1
- done: 1
- runtime_constraints: {}
- state: Running
- components:
- component1: zzzzz-8i9sb-job1withcirculr
diff --git a/services/api/test/fixtures/keep_disks.yml b/services/api/test/fixtures/keep_disks.yml
deleted file mode 100644
index 5cccf498af..0000000000
--- a/services/api/test/fixtures/keep_disks.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-nonfull:
- uuid: zzzzz-penuu-5w2o2t1q5wy7fhn
- owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
- node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
- last_read_at: <%= 1.minute.ago.to_fs(:db) %>
- last_write_at: <%= 2.minute.ago.to_fs(:db) %>
- last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
- ping_secret: z9xz2tc69dho51g1dmkdy5fnupdhsprahcwxdbjs0zms4eo6i
-
-full:
- uuid: zzzzz-penuu-4kmq58ui07xuftx
- owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
- node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
- last_read_at: <%= 1.minute.ago.to_fs(:db) %>
- last_write_at: <%= 2.day.ago.to_fs(:db) %>
- last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
- ping_secret: xx3ieejcufbjy4lli6yt5ig4e8w5l2hhgmbyzpzuq38gri6lj
-
-nonfull2:
- uuid: zzzzz-penuu-1ydrih9k2er5j11
- owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
- node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
- keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
- last_read_at: <%= 1.minute.ago.to_fs(:db) %>
- last_write_at: <%= 2.minute.ago.to_fs(:db) %>
- last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
- ping_secret: 4rs260ibhdum1d242xy23qv320rlerc0j7qg9vyqnchbgmjeek
diff --git a/services/api/test/fixtures/links.yml b/services/api/test/fixtures/links.yml
index 00d5971534..f3abb43cee 100644
--- a/services/api/test/fixtures/links.yml
+++ b/services/api/test/fixtures/links.yml
@@ -6,7 +6,6 @@ user_agreement_required:
uuid: zzzzz-o0j2j-j2qe76q7s3c8aro
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T19:52:21Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2013-12-26T19:52:21Z
updated_at: 2013-12-26T19:52:21Z
@@ -20,7 +19,6 @@ user_agreement_readable:
uuid: zzzzz-o0j2j-qpf60gg4fwjlmex
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -34,7 +32,6 @@ all_users_can_read_anonymous_group:
uuid: zzzzz-o0j2j-0lhbqyjab4g0bwp
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2015-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2015-01-24 20:42:26 -0800
updated_at: 2015-01-24 20:42:26 -0800
@@ -48,7 +45,6 @@ active_user_member_of_all_users_group:
uuid: zzzzz-o0j2j-ctbysaduejxfrs5
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -62,7 +58,6 @@ active_user_can_manage_group:
uuid: zzzzz-o0j2j-3sa30nd3bqn1msh
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03 15:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-02-03 15:42:26 -0800
updated_at: 2014-02-03 15:42:26 -0800
@@ -76,7 +71,6 @@ user_agreement_signed_by_active:
uuid: zzzzz-o0j2j-4x85a69tqlrud1z
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T20:52:21Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
@@ -90,7 +84,6 @@ user_agreement_signed_by_inactive:
uuid: zzzzz-o0j2j-lh7er2o3k6bmetw
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T20:52:21Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
@@ -104,7 +97,6 @@ spectator_user_member_of_all_users_group:
uuid: zzzzz-o0j2j-0s8ql1redzf8kvn
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -118,7 +110,6 @@ inactive_user_member_of_all_users_group:
uuid: zzzzz-o0j2j-osckxpy5hl5fjk5
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T20:52:21Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
@@ -132,7 +123,6 @@ inactive_signed_ua_user_member_of_all_users_group:
uuid: zzzzz-o0j2j-qkhyjcr6tidk652
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T20:52:21Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
@@ -146,7 +136,6 @@ foo_file_readable_by_active:
uuid: zzzzz-o0j2j-dp1d8395ldqw22r
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -160,7 +149,6 @@ foo_file_readable_by_federated_active:
uuid: zzzzz-o0j2j-dp1d8395ldqw23r
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -174,7 +162,6 @@ foo_file_readable_by_active_duplicate_permission:
uuid: zzzzz-o0j2j-2qlmhgothiur55r
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-000000000000000
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -188,7 +175,6 @@ foo_file_readable_by_active_redundant_permission_via_private_group:
uuid: zzzzz-o0j2j-5s8ry7sn6bwxb7w
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-000000000000000
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -202,7 +188,6 @@ foo_file_readable_by_project_viewer:
uuid: zzzzz-o0j2j-fp1d8395ldqw22p
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -216,7 +201,6 @@ bar_file_readable_by_active:
uuid: zzzzz-o0j2j-8hppiuduf8eqdng
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -230,7 +214,6 @@ bar_file_readable_by_spectator:
uuid: zzzzz-o0j2j-0mhldkqozsltcli
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -244,7 +227,6 @@ baz_file_publicly_readable:
uuid: zzzzz-o0j2j-132ne3lk954vtoc
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -254,109 +236,10 @@ baz_file_publicly_readable:
head_uuid: zzzzz-4zz18-y9vne9npefyxh8g
properties: {}
-barbaz_job_readable_by_spectator:
- uuid: zzzzz-o0j2j-cpy7p41hpk531e1
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-01-24 20:42:26 -0800
- updated_at: 2014-01-24 20:42:26 -0800
- tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
- link_class: permission
- name: can_read
- head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
- properties: {}
-
-runningbarbaz_job_readable_by_spectator:
- uuid: zzzzz-o0j2j-cpy7p41hpk531e2
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-01-24 20:42:26 -0800
- updated_at: 2014-01-24 20:42:26 -0800
- tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
- link_class: permission
- name: can_read
- head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
- properties: {}
-
-arvados_repository_readable_by_all_users:
- uuid: zzzzz-o0j2j-allcanreadarvrp
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-01-24 20:42:26 -0800
- updated_at: 2014-01-24 20:42:26 -0800
- tail_uuid: zzzzz-j7d0g-fffffffffffffff
- link_class: permission
- name: can_read
- head_uuid: zzzzz-s0uqq-arvadosrepo0123
- properties: {}
-
-foo_repository_readable_by_spectator:
- uuid: zzzzz-o0j2j-cpy7p41hpk5xxx
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-01-24 20:42:26 -0800
- updated_at: 2014-01-24 20:42:26 -0800
- tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
- link_class: permission
- name: can_read
- head_uuid: zzzzz-s0uqq-382brsig8rp3666
- properties: {}
-
-foo_repository_manageable_by_active:
- uuid: zzzzz-o0j2j-8tdfjd8g0s4rn1k
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-01-24 20:42:26 -0800
- updated_at: 2014-01-24 20:42:26 -0800
- tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- link_class: permission
- name: can_manage
- head_uuid: zzzzz-s0uqq-382brsig8rp3666
- properties: {}
-
-repository3_readable_by_active:
- uuid: zzzzz-o0j2j-43iem9bdtefa76g
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-09-23 13:52:46 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-09-23 13:52:46 -0400
- updated_at: 2014-09-23 13:52:46 -0400
- tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- link_class: permission
- name: can_read
- head_uuid: zzzzz-s0uqq-38orljkqpyo1j61
- properties: {}
-
-repository4_writable_by_active:
- uuid: zzzzz-o0j2j-lio9debdt6yhkil
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-09-23 13:52:46 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-09-23 13:52:46 -0400
- updated_at: 2014-09-23 13:52:46 -0400
- tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- link_class: permission
- name: can_write
- head_uuid: zzzzz-s0uqq-38oru8hnk57ht34
- properties: {}
-
miniadmin_user_is_a_testusergroup_admin:
uuid: zzzzz-o0j2j-38vvkciz7qc12j9
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-04-01 13:53:33 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-04-01 13:53:33 -0400
updated_at: 2014-04-01 13:53:33 -0400
@@ -370,7 +253,6 @@ rominiadmin_user_is_a_testusergroup_admin:
uuid: zzzzz-o0j2j-6b0hz5hr107mc90
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-04-01 13:53:33 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-04-01 13:53:33 -0400
updated_at: 2014-04-01 13:53:33 -0400
@@ -384,7 +266,6 @@ testusergroup_can_manage_active_user:
uuid: zzzzz-o0j2j-2vaqhxz6hsf4k1d
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-04-01 13:56:10 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-04-01 13:56:10 -0400
updated_at: 2014-04-01 13:56:10 -0400
@@ -398,7 +279,6 @@ test_timestamps:
uuid: zzzzz-o0j2j-4abnk2w5t86x4uc
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-15 13:17:14 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-15 13:17:14 -0400
updated_at: 2014-04-15 13:17:14 -0400
@@ -413,7 +293,6 @@ admin_can_write_aproject:
uuid: zzzzz-o0j2j-adminmgsproject
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -427,7 +306,6 @@ project_viewer_member_of_all_users_group:
uuid: zzzzz-o0j2j-cdnq6627g0h0r2x
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2015-07-28T21:34:41.361747000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2015-07-28T21:34:41.361747000Z
updated_at: 2015-07-28T21:34:41.361747000Z
@@ -441,7 +319,6 @@ project_viewer_can_read_project:
uuid: zzzzz-o0j2j-projviewerreadp
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -455,7 +332,6 @@ subproject_admin_can_manage_subproject:
uuid: zzzzz-o0j2j-subprojadminlnk
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-10-15 10:00:00 -0000
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-10-15 10:00:00 -0000
updated_at: 2014-10-15 10:00:00 -0000
@@ -469,7 +345,6 @@ foo_collection_tag:
uuid: zzzzz-o0j2j-eedahfaho8aphiv
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -483,7 +358,6 @@ active_user_can_manage_bad_group_cx2al9cqkmsf1hs:
uuid: zzzzz-o0j2j-ezv55ahzc9lvjwe
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-03 18:50:08 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-03 18:50:08 -0400
updated_at: 2014-05-03 18:50:08 -0400
@@ -497,7 +371,6 @@ multilevel_collection_1_readable_by_active:
uuid: zzzzz-o0j2j-dp1d8395ldqw22j
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -520,7 +393,6 @@ has_symbol_keys_in_database_somehow:
uuid: zzzzz-o0j2j-enl1wg58310loc6
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-28 16:24:02.314722162 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-28 16:24:02.314484982 Z
tail_uuid: ~
@@ -543,7 +415,6 @@ bug2931_link_with_null_head_uuid:
uuid: zzzzz-o0j2j-uru66qok2wruasb
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-30 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-30 14:30:00.184019565 Z
updated_at: 2014-05-30 14:30:00.183829316 Z
@@ -557,7 +428,6 @@ anonymous_group_can_read_anonymously_accessible_project:
uuid: zzzzz-o0j2j-15gpzezqjg4bc4z
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-30 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-30 14:30:00.184019565 Z
updated_at: 2014-05-30 14:30:00.183829316 Z
@@ -571,7 +441,6 @@ anonymous_user_can_read_anonymously_accessible_project:
uuid: zzzzz-o0j2j-82nbli3jptwksj1
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-30 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-30 14:30:00.184019565 Z
updated_at: 2014-05-30 14:30:00.183829316 Z
@@ -585,7 +454,6 @@ user_agreement_readable_by_anonymously_accessible_project:
uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h
owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-06-13 20:42:26 -0800
updated_at: 2014-06-13 20:42:26 -0800
@@ -596,7 +464,6 @@ active_user_permission_to_docker_image_collection:
uuid: zzzzz-o0j2j-dp1d8395ldqw33s
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -610,7 +477,6 @@ active_user_permission_to_unlinked_docker_image_collection:
uuid: zzzzz-o0j2j-g5i0sa8cr3b1psf
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -624,7 +490,6 @@ crt_user_permission_to_unlinked_docker_image_collection:
uuid: zzzzz-o0j2j-20zvdi9b4odcfz3
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -638,7 +503,6 @@ docker_image_collection_hash:
uuid: zzzzz-o0j2j-dockercollhasha
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-11 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-11 14:30:00.184019565 Z
updated_at: 2014-06-11 14:30:00.183829316 Z
@@ -653,7 +517,6 @@ docker_image_collection_tag:
uuid: zzzzz-o0j2j-dockercolltagbb
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-11 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-11 14:30:00.184019565 Z
updated_at: 2014-06-11 14:30:00.183829316 Z
@@ -668,7 +531,6 @@ docker_image_collection_tag2:
uuid: zzzzz-o0j2j-dockercolltagbc
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-11 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-11 14:30:00.184019565 Z
updated_at: 2014-06-11 14:30:00.183829316 Z
@@ -683,7 +545,6 @@ docker_image_collection_hextag:
uuid: zzzzz-o0j2j-2591ao7zubhaoxh
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2017-02-13 21:41:06.769936997 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2017-02-13 21:41:06.769422000 Z
tail_uuid: ~
@@ -697,7 +558,6 @@ docker_1_12_image_hash:
uuid: zzzzz-o0j2j-f58l58fn65n8v6k
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2017-02-13 21:35:12.602828136 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2017-02-13 21:35:12.602309000 Z
tail_uuid: ~
@@ -711,7 +571,6 @@ docker_1_12_image_tag:
uuid: zzzzz-o0j2j-dybsy0m3u96jkbv
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2017-02-13 21:37:47.441406362 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2017-02-13 21:37:47.440882000 Z
tail_uuid: ~
@@ -725,7 +584,6 @@ docker_1_12_image_hextag:
uuid: zzzzz-o0j2j-06hzef4u1hbk1g5
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2017-02-13 21:37:47.441406362 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2017-02-13 21:37:47.440882000 Z
tail_uuid: ~
@@ -743,7 +601,6 @@ ancient_docker_image_collection_hash:
uuid: zzzzz-o0j2j-dockercollhashz
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-12 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-12 14:30:00.184019565 Z
updated_at: 2014-06-12 14:30:00.183829316 Z
@@ -758,7 +615,6 @@ ancient_docker_image_collection_tag:
uuid: zzzzz-o0j2j-dockercolltagzz
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-12 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-12 14:30:00.184019565 Z
updated_at: 2014-06-12 14:30:00.183829316 Z
@@ -773,7 +629,6 @@ docker_image_tag_like_hash:
uuid: zzzzz-o0j2j-dockerhashtagaa
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-06-11 14:30:00.184389725 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-06-11 14:30:00.184019565 Z
updated_at: 2014-06-11 14:30:00.183829316 Z
@@ -784,86 +639,10 @@ docker_image_tag_like_hash:
properties:
image_timestamp: "2014-06-10T14:30:00.184019565Z"
-job_reader_can_read_previous_job_run:
- # Permission link giving job_reader permission
- # to read previous_job_run
- uuid: zzzzz-o0j2j-8bbd851795ebafd
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-tpzed-000000000000000
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-06-13 20:42:26 -0800
- updated_at: 2014-06-13 20:42:26 -0800
- link_class: permission
- name: can_read
- tail_uuid: zzzzz-tpzed-905b42d1dd4a354
- head_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
-
-job_reader_can_read_foo_repo:
- # Permission link giving job_reader permission
- # to read foo_repo
- uuid: zzzzz-o0j2j-072ec05dc9487f8
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-tpzed-000000000000000
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-06-13 20:42:26 -0800
- updated_at: 2014-06-13 20:42:26 -0800
- link_class: permission
- name: can_read
- tail_uuid: zzzzz-tpzed-905b42d1dd4a354
- head_uuid: zzzzz-s0uqq-382brsig8rp3666
-
-job_reader2_can_read_job_with_components:
- # Permission link giving job_reader2 permission
- # to read running_job_with_components
- uuid: zzzzz-o0j2j-jobcomps4jobrdr
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-tpzed-000000000000000
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-06-13 20:42:26 -0800
- updated_at: 2014-06-13 20:42:26 -0800
- link_class: permission
- name: can_read
- tail_uuid: zzzzz-tpzed-readjobwithcomp
- head_uuid: zzzzz-8i9sb-with2components
-
-job_reader2_can_read_pipeline_from_job_with_components:
- # Permission link giving job_reader2 permission
- # to read running_job_with_components
- uuid: zzzzz-o0j2j-pi4comps4jobrdr
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-tpzed-000000000000000
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-06-13 20:42:26 -0800
- updated_at: 2014-06-13 20:42:26 -0800
- link_class: permission
- name: can_read
- tail_uuid: zzzzz-tpzed-readjobwithcomp
- head_uuid: zzzzz-d1hrv-partdonepipelin
-
-job_reader2_can_read_first_job_from_pipeline_from_job_with_components:
- # Permission link giving job_reader2 permission
- # to read running_job_with_components
- uuid: zzzzz-o0j2j-job4pi4j4jobrdr
- owner_uuid: zzzzz-tpzed-000000000000000
- created_at: 2014-06-13 20:42:26 -0800
- modified_by_client_uuid: zzzzz-tpzed-000000000000000
- modified_by_user_uuid: zzzzz-tpzed-000000000000000
- modified_at: 2014-06-13 20:42:26 -0800
- updated_at: 2014-06-13 20:42:26 -0800
- link_class: permission
- name: can_read
- tail_uuid: zzzzz-tpzed-readjobwithcomp
- head_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
-
baz_collection_name_in_asubproject:
uuid: zzzzz-o0j2j-bazprojectname2
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-04-21 15:37:48 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-04-21 15:37:48 -0400
updated_at: 2014-04-21 15:37:48 -0400
@@ -879,7 +658,6 @@ empty_collection_name_in_active_user_home_project:
uuid: zzzzz-o0j2j-i3n6m552x6tmoi4
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -893,7 +671,6 @@ active_user_can_read_activeandfriends:
uuid: zzzzz-o0j2j-8184f5vk8c851ts
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-08-22 14:03:46.321059945 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-08-22 14:03:46.320865926 Z
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -907,7 +684,6 @@ active_user_joined_activeandfriends:
uuid: zzzzz-o0j2j-t63rdd7vupqvnco
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-08-22 14:03:28.835064240 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-08-22 14:03:28.834849409 Z
tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
@@ -921,7 +697,6 @@ future_project_can_read_activeandfriends:
uuid: zzzzz-o0j2j-bkdtnddpmwxqiza
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-08-22 14:04:18.811622057 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-08-22 14:04:18.811463859 Z
tail_uuid: zzzzz-tpzed-futureprojview2
@@ -935,7 +710,6 @@ future_project_user_joined_activeandfriends:
uuid: zzzzz-o0j2j-ksl8bo92eokv332
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-08-22 14:04:24.182103355 Z
- modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-08-22 14:04:24.181939129 Z
tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
@@ -949,7 +723,6 @@ auto_setup_vm_login_username_can_login_to_test_vm:
uuid: zzzzz-o0j2j-i3n6m98766tmoi4
owner_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
@@ -963,7 +736,6 @@ admin_can_login_to_testvm2:
uuid: zzzzz-o0j2j-peek9mecohgh3ai
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-d9tiejq69daie8f
@@ -978,7 +750,6 @@ active_can_login_to_testvm2:
uuid: zzzzz-o0j2j-rah2ya1ohx9xaev
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -993,7 +764,6 @@ spectator_login_link_for_testvm2_without_username:
uuid: zzzzz-o0j2j-aem0eilie1jigh9
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
@@ -1038,7 +808,6 @@ user1-with-load_member_of_all_users_group:
uuid: zzzzz-o0j2j-user1-with-load
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1052,7 +821,6 @@ empty_collection_name_in_fuse_user_home_project:
uuid: zzzzz-o0j2j-hw3mcg3c8pwo6ar
owner_uuid: zzzzz-tpzed-0fusedrivertest
created_at: 2014-08-06 22:11:51.242392533 Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
modified_at: 2014-08-06 22:11:51.242150425 Z
tail_uuid: zzzzz-tpzed-0fusedrivertest
@@ -1066,7 +834,6 @@ star_project_for_active_user:
uuid: zzzzz-o0j2j-starredbyactive
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1088,7 +855,6 @@ star_shared_project_for_project_viewer:
uuid: zzzzz-o0j2j-starredbyviewer
owner_uuid: zzzzz-tpzed-projectviewer1a
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1102,7 +868,6 @@ tagged_collection_readable_by_spectator:
uuid: zzzzz-o0j2j-readacl4tagcoll
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1116,7 +881,6 @@ active_manages_viewing_group:
uuid: zzzzz-o0j2j-activemanagesvi
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1130,7 +894,6 @@ public_favorites_permission_link:
uuid: zzzzz-o0j2j-testpublicfavor
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-01-24 20:42:26 -0800
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
@@ -1144,7 +907,6 @@ future_project_user_member_of_all_users_group:
uuid: zzzzz-o0j2j-cdnq6627g0h0r2a
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2015-07-28T21:34:41.361747000Z
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2015-07-28T21:34:41.361747000Z
updated_at: 2015-07-28T21:34:41.361747000Z
@@ -1153,3 +915,16 @@ future_project_user_member_of_all_users_group:
name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
+
+foo_file_readable_by_soon_to_be_trashed_role:
+ uuid: zzzzz-o0j2j-5s8ry7sn7bwxb7w
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-j7d0g-soontobetrashd2
+ link_class: permission
+ name: can_read
+ head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+ properties: {}
diff --git a/services/api/test/fixtures/logs.yml b/services/api/test/fixtures/logs.yml
index 3b41550ae7..ad265a2134 100644
--- a/services/api/test/fixtures/logs.yml
+++ b/services/api/test/fixtures/logs.yml
@@ -11,22 +11,22 @@ noop: # nothing happened ...to the 'spectator' user
event_at: <%= 1.minute.ago.to_fs(:db) %>
created_at: <%= 1.minute.ago.to_fs(:db) %>
-admin_changes_repository2: # admin changes repository2, which is owned by active user
+admin_changes_collection_owned_by_active:
id: 2
uuid: zzzzz-57u5n-pshmckwoma00002
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
- object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+ object_uuid: zzzzz-4zz18-bv31uwvy3neko21 # collection_owned_by_active
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
created_at: <%= 2.minute.ago.to_fs(:db) %>
event_at: <%= 2.minute.ago.to_fs(:db) %>
event_type: update
-admin_changes_specimen: # admin changes specimen owned_by_spectator
+admin_changes_collection_owned_by_foo:
id: 3
uuid: zzzzz-57u5n-pshmckwoma00003
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
- object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
- object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+ object_uuid: zzzzz-4zz18-50surkhkbhsp31b # collection_owned_by_foo
+ object_owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c # foo user
created_at: <%= 3.minute.ago.to_fs(:db) %>
event_at: <%= 3.minute.ago.to_fs(:db) %>
event_type: update
@@ -60,113 +60,17 @@ log_owned_by_active:
event_at: <%= 2.minute.ago.to_fs(:db) %>
summary: non-admin use can read own logs
-crunchstat_for_running_job:
- id: 7
- uuid: zzzzz-57u5n-tmymyrojrbtnxh1
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- object_uuid: zzzzz-8i9sb-pshmckwoma9plh7
- event_at: 2014-11-07 23:33:42.347455000 Z
- event_type: stderr
- summary: ~
- properties:
- text: '2014-11-07_23:33:41 zzzzz-8i9sb-pshmckwoma9plh7 31708 1 stderr crunchstat:
- cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
- 0.9900 sys'
- created_at: 2014-11-07 23:33:42.351913000 Z
- updated_at: 2014-11-07 23:33:42.347455000 Z
- modified_at: 2014-11-07 23:33:42.347455000 Z
- object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
-
-log_line_for_pipeline_in_publicly_accessible_project:
- id: 8
- uuid: zzzzz-57u5n-tmymyrojrjyhb45
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- object_uuid: zzzzz-d1hrv-n68vc490mloy4fi
- event_at: 2014-11-07 23:33:42.347455000 Z
- event_type: stderr
- summary: ~
- properties:
- text: '2014-11-07_23:33:41 zzzzz-d1hrv-n68vc490mloy4fi 31708 1 stderr crunchstat:
- cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
- 0.9900 sys'
- created_at: 2014-11-07 23:33:42.351913000 Z
- updated_at: 2014-11-07 23:33:42.347455000 Z
- modified_at: 2014-11-07 23:33:42.347455000 Z
- object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
-
-log_line_for_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
- id: 9
- uuid: zzzzz-57u5n-tmyhy56k9lnhb45
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- object_uuid: zzzzz-d1hrv-pisharednotobjs
- event_at: 2014-11-07 23:33:42.347455000 Z
- event_type: stderr
- summary: ~
- properties:
- text: '2014-11-07_23:33:41 zzzzz-d1hrv-pisharednotobjs 31708 1 stderr crunchstat:
- cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
- 0.9900 sys'
- created_at: 2014-11-07 23:33:42.351913000 Z
- updated_at: 2014-11-07 23:33:42.347455000 Z
- modified_at: 2014-11-07 23:33:42.347455000 Z
- object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
-
-crunchstat_for_previous_job:
- id: 10
- uuid: zzzzz-57u5n-eir3aesha3kaene
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- object_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- event_at: 2014-11-07 23:33:42.347455000 Z
- event_type: stderr
- summary: ~
- properties:
- text: '2014-11-07_23:33:41 zzzzz-8i9sb-cjs4pklxxjykqqq 11592 1 stderr crunchstat:
- cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
- 0.9900 sys'
- created_at: 2014-11-07 23:33:42.351913000 Z
- updated_at: 2014-11-07 23:33:42.347455000 Z
- modified_at: 2014-11-07 23:33:42.347455000 Z
- object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
-
-crunchstat_for_ancient_job:
- id: 11
- uuid: zzzzz-57u5n-ixioph7ieb5ung8
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- object_uuid: zzzzz-8i9sb-ahd7cie8jah9qui
- event_at: 2013-11-07 23:33:42.347455000 Z
- event_type: stderr
- summary: ~
- properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
- cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
- 0.9900 sys'
- created_at: 2013-11-07 23:33:42.351913000 Z
- updated_at: 2013-11-07 23:33:42.347455000 Z
- modified_at: 2013-11-07 23:33:42.347455000 Z
- object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
-
stderr_for_ancient_container:
id: 12
uuid: zzzzz-57u5n-containerlog001
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer01
event_at: <%= 2.year.ago.to_fs(:db) %>
event_type: stderr
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer01 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 2.year.ago.to_fs(:db) %>
@@ -178,14 +82,13 @@ crunchstat_for_ancient_container:
id: 13
uuid: zzzzz-57u5n-containerlog002
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer01
event_at: <%= 2.year.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer01 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 2.year.ago.to_fs(:db) %>
@@ -197,14 +100,13 @@ stderr_for_previous_container:
id: 14
uuid: zzzzz-57u5n-containerlog003
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer02
event_at: <%= 1.month.ago.to_fs(:db) %>
event_type: stderr
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer02 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 1.month.ago.to_fs(:db) %>
@@ -216,14 +118,13 @@ crunchstat_for_previous_container:
id: 15
uuid: zzzzz-57u5n-containerlog004
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer02
event_at: <%= 1.month.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer02 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 1.month.ago.to_fs(:db) %>
@@ -235,14 +136,13 @@ stderr_for_running_container:
id: 16
uuid: zzzzz-57u5n-containerlog005
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer03
event_at: <%= 1.hour.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer03 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 1.hour.ago.to_fs(:db) %>
@@ -254,14 +154,13 @@ crunchstat_for_running_container:
id: 17
uuid: zzzzz-57u5n-containerlog006
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer03
event_at: <%= 1.hour.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
- text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+ text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer03 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
created_at: <%= 1.hour.ago.to_fs(:db) %>
diff --git a/services/api/test/fixtures/nodes.yml b/services/api/test/fixtures/nodes.yml
deleted file mode 100644
index d4589ed705..0000000000
--- a/services/api/test/fixtures/nodes.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-busy:
- uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: compute0
- slot_number: 0
- domain: ""
- ip_address: 172.17.2.172
- last_ping_at: <%= 1.minute.ago.to_fs(:db) %>
- first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
- job_uuid: zzzzz-8i9sb-2gx6rz0pjl033w3 # nearly_finished_job
- properties: {}
- info:
- ping_secret: "48dpm3b8ijyj3jkr2yczxw0844dqd2752bhll7klodvgz9bg80"
- slurm_state: "alloc"
-
-down:
- uuid: zzzzz-7ekkf-2vbompg3ecc6e2s
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: compute1
- slot_number: 1
- domain: ""
- ip_address: 172.17.2.173
- last_ping_at: <%= 1.hour.ago.to_fs(:db) %>
- first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
- job_uuid: ~
- properties: {}
- info:
- ping_secret: "2k3i71depad36ugwmlgzilbi4e8n0illb2r8l4efg9mzkb3a1k"
-
-idle:
- uuid: zzzzz-7ekkf-2z3mc76g2q73aio
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: compute2
- slot_number: 2
- domain: ""
- ip_address: 172.17.2.174
- last_ping_at: <%= 2.minute.ago.to_fs(:db) %>
- first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
- job_uuid: ~
- info:
- ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
- slurm_state: "idle"
- properties:
- total_cpu_cores: 16
-
-was_idle_now_down:
- uuid: zzzzz-7ekkf-xuzpkdasl0uzwyz
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: compute3
- slot_number: ~
- domain: ""
- ip_address: 172.17.2.174
- last_ping_at: <%= 1.hour.ago.to_fs(:db) %>
- first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
- job_uuid: ~
- info:
- ping_secret: "1bd1yi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
- slurm_state: "idle"
- properties:
- total_cpu_cores: 16
-
-new_with_no_hostname:
- uuid: zzzzz-7ekkf-newnohostname00
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: ~
- slot_number: ~
- ip_address: 172.17.2.175
- last_ping_at: ~
- first_ping_at: ~
- job_uuid: ~
- properties: {}
- info:
- ping_secret: "abcdyi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
-
-new_with_custom_hostname:
- uuid: zzzzz-7ekkf-newwithhostname
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: custom1
- slot_number: 23
- ip_address: 172.17.2.176
- last_ping_at: ~
- first_ping_at: ~
- job_uuid: ~
- properties: {}
- info:
- ping_secret: "abcdyi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
-
-node_with_no_ip_address_yet:
- uuid: zzzzz-7ekkf-nodenoipaddryet
- owner_uuid: zzzzz-tpzed-000000000000000
- hostname: noipaddr
- slot_number: ~
- last_ping_at: ~
- first_ping_at: ~
- job_uuid: ~
- properties: {}
- info:
- ping_secret: "abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
diff --git a/services/api/test/fixtures/pipeline_instances.yml b/services/api/test/fixtures/pipeline_instances.yml
deleted file mode 100644
index 714fc60771..0000000000
--- a/services/api/test/fixtures/pipeline_instances.yml
+++ /dev/null
@@ -1,530 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-new_pipeline:
- state: New
- uuid: zzzzz-d1hrv-f4gneyn6br1xize
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 1.minute.ago.to_fs(:db) %>
-
-new_pipeline_in_subproject:
- state: New
- uuid: zzzzz-d1hrv-subprojpipeline
- owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
- created_at: <%= 1.minute.ago.to_fs(:db) %>
-
-has_component_with_no_script_parameters:
- state: Ready
- uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 10.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters: {}
-
-has_component_with_empty_script_parameters:
- state: Ready
- uuid: zzzzz-d1hrv-jq16l10gcsnyumo
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
-
-has_component_with_completed_jobs:
- # Test that the job "started_at" and "finished_at" fields are parsed
- # into Time fields when rendering. These jobs must *not* have their
- # own fixtures; the point is to force the
- # pipeline_instances_controller_test in Workbench to parse the
- # "components" field. (The relevant code paths are also used when a
- # user has permission to read the pipeline instance itself, but not
- # the jobs referenced by its components hash.)
- state: Complete
- uuid: zzzzz-d1hrv-i3e77t9z5y8j9cc
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 11.minute.ago.to_fs(:db) %>
- started_at: <%= 10.minute.ago.to_fs(:db) %>
- finished_at: <%= 9.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-rft1xdewxkwgxnz
- script_version: main
- created_at: <%= 10.minute.ago.to_fs(:db) %>
- started_at: <%= 10.minute.ago.to_fs(:db) %>
- finished_at: <%= 9.minute.ago.to_fs(:db) %>
- state: Complete
- tasks_summary:
- failed: 0
- todo: 0
- running: 0
- done: 1
- bar:
- script: bar
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-r2dtbzr6bfread7
- script_version: main
- created_at: <%= 9.minute.ago.to_fs(:db) %>
- started_at: <%= 9.minute.ago.to_fs(:db) %>
- state: Running
- tasks_summary:
- failed: 0
- todo: 1
- running: 2
- done: 3
- baz:
- script: baz
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-c7408rni11o7r6s
- script_version: main
- created_at: <%= 9.minute.ago.to_fs(:db) %>
- state: Queued
- tasks_summary: {}
-
-has_job:
- name: pipeline_with_job
- state: Ready
- uuid: zzzzz-d1hrv-1yfj6xkidf2muk3
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 2.9.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters: {}
- job: {
- uuid: zzzzz-8i9sb-pshmckwoma9plh7,
- script_version: main
- }
-
-components_is_jobspec:
- # Helps test that clients cope with funny-shaped components.
- # For an example, see #3321.
- uuid: zzzzz-d1hrv-1yfj61234abcdk4
- created_at: <%= 4.minute.ago.to_fs(:db) %>
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- state: RunningOnServer
- components:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
-
-pipeline_with_tagged_collection_input:
- name: pipeline_with_tagged_collection_input
- state: Ready
- uuid: zzzzz-d1hrv-1yfj61234abcdk3
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.2.minute.ago.to_fs(:db) %>
- components:
- part-one:
- script_parameters:
- input:
- value: zzzzz-4zz18-znfnqtbbv4spc3w
-
-pipeline_to_merge_params:
- name: pipeline_to_merge_params
- state: Ready
- uuid: zzzzz-d1hrv-1yfj6dcba4321k3
- pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.3.minute.ago.to_fs(:db) %>
- components:
- part-one:
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
- part-two:
- script_parameters:
- input:
- output_of: part-one
- integer_with_default:
- default: 123
- integer_with_value:
- value: 123
- string_with_default:
- default: baz
- string_with_value:
- value: baz
- plain_string: qux
- array_with_default:
- default: [1,1,2,3,5]
- array_with_value:
- value: [1,1,2,3,5]
-
-pipeline_with_newer_template:
- state: Complete
- uuid: zzzzz-d1hrv-9fm8l10i9z2kqc6
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
- created_at: 2014-09-15 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-
-pipeline_instance_owned_by_fuse:
- state: Complete
- uuid: zzzzz-d1hrv-ri9dvgkgqs9y09j
- owner_uuid: zzzzz-tpzed-0fusedrivertest
- pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
- created_at: 2014-09-16 12:00:00
- name: "pipeline instance owned by FUSE"
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-
-pipeline_instance_in_fuse_project:
- state: Complete
- uuid: zzzzz-d1hrv-scarxiyajtshq3l
- owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
- pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
- created_at: 2014-09-17 12:00:00
- name: "pipeline instance in FUSE project"
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-
-pipeline_owned_by_active_in_aproject:
- name: Completed pipeline in A Project
- state: Complete
- uuid: zzzzz-d1hrv-ju5ghi0i9z2kqc6
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-09-18 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-
-pipeline_owned_by_active_in_home:
- name: Completed pipeline in active user home
- state: Complete
- uuid: zzzzz-d1hrv-lihrbd0i9z2kqc6
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-09-19 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-
-pipeline_in_publicly_accessible_project:
- uuid: zzzzz-d1hrv-n68vc490mloy4fi
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- name: Pipeline in publicly accessible project
- pipeline_template_uuid: zzzzz-p5p6p-tmpltpublicproj
- state: Complete
- created_at: <%= 30.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
- job:
- uuid: zzzzz-8i9sb-jyq01m7in1jlofj
- repository: active/foo
- script: foo
- script_version: main
- script_parameters:
- input: zzzzz-4zz18-4en62shvi99lxd4
- log: zzzzz-4zz18-4en62shvi99lxd4
- output: b519d9cb706a29fc7ea24dbea2f05851+93
- state: Complete
-
-pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
- uuid: zzzzz-d1hrv-pisharednotobjs
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- name: Pipeline in public project with other objects elsewhere
- pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
- state: Complete
- created_at: 2014-09-20 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
- job:
- uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
- repository: active/foo
- script: foo
- script_version: main
- script_parameters:
- input: zzzzz-4zz18-bv31uwvy3neko21
- log: zzzzz-4zz18-bv31uwvy3neko21
- output: zzzzz-4zz18-bv31uwvy3neko21
- state: Complete
-
-new_pipeline_in_publicly_accessible_project:
- uuid: zzzzz-d1hrv-newpisharedobjs
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- name: Pipeline in New state in publicly accessible project
- pipeline_template_uuid: zzzzz-p5p6p-tmpltpublicproj
- state: New
- created_at: 2014-09-21 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- value: b519d9cb706a29fc7ea24dbea2f05851+93
-
-new_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
- uuid: zzzzz-d1hrv-newsharenotobjs
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- name: Pipeline in New state in public project with objects elsewhere
- pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
- state: New
- created_at: 2014-09-22 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- value: zzzzz-4zz18-bv31uwvy3neko21
-
-new_pipeline_in_publicly_accessible_project_with_dataclass_file_and_other_objects_elsewhere:
- uuid: zzzzz-d1hrv-newsharenotfile
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- name: Pipeline in public project in New state with file type data class with objects elsewhere
- pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
- state: New
- created_at: 2014-09-23 12:00:00
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: File
- value: zzzzz-4zz18-bv31uwvy3neko21/bar
-
-pipeline_in_running_state:
- name: running_with_job
- uuid: zzzzz-d1hrv-runningpipeline
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 2.8.minute.ago.to_fs(:db) %>
- started_at: <%= 2.8.minute.ago.to_fs(:db) %>
- state: RunningOnServer
- components:
- foo:
- script: foo
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-pshmckwoma9plh7
- script_version: main
-
-running_pipeline_with_complete_job:
- uuid: zzzzz-d1hrv-partdonepipelin
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- state: RunningOnServer
- created_at: <%= 15.minute.ago.to_fs(:db) %>
- components:
- previous:
- job:
- uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- log: zzzzz-4zz18-op4e2lbej01tcvu
- running:
- job:
- uuid: zzzzz-8i9sb-pshmckwoma9plh7
-
-complete_pipeline_with_two_jobs:
- uuid: zzzzz-d1hrv-twodonepipeline
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- state: Complete
- created_at: <%= 2.5.minute.ago.to_fs(:db) %>
- started_at: <%= 2.minute.ago.to_fs(:db) %>
- finished_at: <%= 1.minute.ago.to_fs(:db) %>
- components:
- ancient:
- job:
- uuid: zzzzz-8i9sb-ahd7cie8jah9qui
- log: zzzzz-4zz18-op4e2lbej01tcvu
- previous:
- job:
- uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- log: zzzzz-4zz18-op4e2lbej01tcvu
-
-failed_pipeline_with_two_jobs:
- uuid: zzzzz-d1hrv-twofailpipeline
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 55.minute.ago.to_fs(:db) %>
- state: Failed
- components:
- ancient:
- job:
- uuid: zzzzz-8i9sb-ahd7cie8jah9qui
- log: zzzzz-4zz18-op4e2lbej01tcvu
- previous:
- job:
- uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- log: zzzzz-4zz18-op4e2lbej01tcvu
-
-# This pipeline is a child of another running job and has it's own running children
-job_child_pipeline_with_components_at_level_2:
- state: RunningOnServer
- uuid: zzzzz-d1hrv-picomponentsl02
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-job1atlevel3noc
- script_version: main
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- state: Running
- tasks_summary:
- failed: 0
- todo: 0
- running: 1
- done: 1
- bar:
- script: bar
- script_version: main
- script_parameters: {}
- job:
- uuid: zzzzz-8i9sb-job2atlevel3noc
- script_version: main
- created_at: <%= 12.hour.ago.to_fs(:db) %>
- started_at: <%= 12.hour.ago.to_fs(:db) %>
- state: Running
- tasks_summary:
- failed: 0
- todo: 1
- running: 2
- done: 3
-
-# Test Helper trims the rest of the file
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
-
-# pipelines in project_with_10_pipelines
-<% for i in 1..10 do %>
-pipeline_<%=i%>_of_10:
- name: pipeline_<%= i %>
- uuid: zzzzz-d1hrv-10pipelines0<%= i.to_s.rjust(3, '0') %>
- owner_uuid: zzzzz-j7d0g-000010pipelines
- created_at: <%= (2*(i-1)).hour.ago.to_fs(:db) %>
- started_at: <%= (2*(i-1)).hour.ago.to_fs(:db) %>
- finished_at: <%= (i-1).minute.ago.to_fs(:db) %>
- state: Failed
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
- job:
- state: Failed
-<% end %>
-
-# pipelines in project_with_2_pipelines_and_60_crs
-<% for i in 1..2 do %>
-pipeline_<%=i%>_of_2_pipelines_and_60_crs:
- name: pipeline_<%= i %>
- state: New
- uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
- owner_uuid: zzzzz-j7d0g-nnncrspipelines
- created_at: <%= i.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-<% end %>
-
-# pipelines in project_with_25_pipelines
-<% for i in 1..25 do %>
-pipeline_<%=i%>_of_25:
- name: pipeline_<%=i%>
- state: Failed
- uuid: zzzzz-d1hrv-25pipelines0<%= i.to_s.rjust(3, '0') %>
- owner_uuid: zzzzz-j7d0g-000025pipelines
- created_at: <%= i.hour.ago.to_fs(:db) %>
- started_at: <%= i.hour.ago.to_fs(:db) %>
- finished_at: <%= i.minute.ago.to_fs(:db) %>
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo instance input
-<% end %>
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/pipeline_templates.yml b/services/api/test/fixtures/pipeline_templates.yml
deleted file mode 100644
index 0c185eeb80..0000000000
--- a/services/api/test/fixtures/pipeline_templates.yml
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-two_part:
- uuid: zzzzz-p5p6p-aox0k0ofxrystgw
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Two Part Pipeline Template
- components:
- part-one:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- part-two:
- script: bar
- script_version: main
- script_parameters:
- input:
- output_of: part-one
- integer_with_default:
- default: 123
- integer_with_value:
- value: 123
- string_with_default:
- default: baz
- string_with_value:
- value: baz
- plain_string: qux
- array_with_default: # important to test repeating values in the array!
- default: [1,1,2,3,5]
- array_with_value: # important to test repeating values in the array!
- value: [1,1,2,3,5]
-
-components_is_jobspec:
- # Helps test that clients cope with funny-shaped components.
- # For an example, see #3321.
- uuid: zzzzz-p5p6p-jobspeccomponts
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Pipeline Template with Jobspec Components
- components:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
-
-parameter_with_search:
- uuid: zzzzz-p5p6p-paramwsearch345
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Pipeline Template with Input Parameter with Search
- components:
- with-search:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
- search_for: sometime # Matches baz_collection_in_asubproject
-
-new_pipeline_template:
- # This template must include components that are not
- # present in the pipeline instance 'pipeline_with_newer_template',
- # at least one of which has a script_parameter that is a hash
- # with a 'dataclass' field (ticket #4000)
- uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-09-14 12:00:00
- modified_at: 2014-09-16 12:00:00
- name: Pipeline Template Newer Than Instance
- components:
- foo:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: foo template input
- bar:
- script: bar
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: bar template input
-
-pipeline_template_in_fuse_project:
- uuid: zzzzz-p5p6p-templinfuseproj
- owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
- name: pipeline template in FUSE project
- components:
- foo_component:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "default input"
- description: "input collection"
-
-template_with_dataclass_file:
- uuid: zzzzz-p5p6p-k0xoa0ofxrystgw
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Two Part Template with dataclass File
- components:
- part-one:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: File
- title: "Foo/bar pair"
- description: "Provide an input file"
- part-two:
- script: bar
- script_version: main
- script_parameters:
- input:
- output_of: part-one
- integer_with_default:
- default: 123
- integer_with_value:
- value: 123
- string_with_default:
- default: baz
- string_with_value:
- value: baz
- plain_string: qux
- array_with_default: # important to test repeating values in the array!
- default: [1,1,2,3,5]
- array_with_value: # important to test repeating values in the array!
- value: [1,1,2,3,5]
-
-template_with_dataclass_number:
- uuid: zzzzz-p5p6p-numbertemplatea
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2015-01-14 12:35:04 -0400
- updated_at: 2015-01-14 12:35:04 -0400
- modified_at: 2015-01-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Template with dataclass number
- components:
- work:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: number
- title: "Input number"
-
-pipeline_template_in_publicly_accessible_project:
- uuid: zzzzz-p5p6p-tmpltpublicproj
- owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Pipeline template in publicly accessible project
- components:
- foo_component:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "default input"
- description: "input collection"
-
-# Used to test renaming when removed from the "aproject" subproject
-# while another such object with same name exists in home project.
-template_in_active_user_home_project_to_test_unique_key_violation:
- uuid: zzzzz-p5p6p-templatsamenam1
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2013-04-14 12:35:04 -0400
- updated_at: 2013-04-14 12:35:04 -0400
- modified_at: 2013-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Template to test owner uuid and name unique key violation upon removal
- components:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
-
-template_in_asubproject_with_same_name_as_one_in_active_user_home:
- uuid: zzzzz-p5p6p-templatsamenam2
- owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
- created_at: 2013-04-14 12:35:04 -0400
- updated_at: 2013-04-14 12:35:04 -0400
- modified_at: 2013-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Template to test owner uuid and name unique key violation upon removal
- components:
- script: foo
- script_version: main
- script_parameters:
- input:
- required: true
- dataclass: Collection
- title: "Foo/bar pair"
- description: "Provide a collection containing at least two files."
-
-workflow_with_input_defaults:
- uuid: zzzzz-p5p6p-aox0k0ofxrystg2
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-04-14 12:35:04 -0400
- updated_at: 2014-04-14 12:35:04 -0400
- modified_at: 2014-04-14 12:35:04 -0400
- modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
- modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- name: Pipeline with default input specifications
- components:
- part-one:
- script: foo
- script_version: main
- script_parameters:
- ex_string:
- required: true
- dataclass: string
- ex_string_def:
- required: true
- dataclass: string
- default: hello-testing-123
diff --git a/services/api/test/fixtures/repositories.yml b/services/api/test/fixtures/repositories.yml
deleted file mode 100644
index e4fe71e402..0000000000
--- a/services/api/test/fixtures/repositories.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-crunch_dispatch_test:
- uuid: zzzzz-s0uqq-382brsig8rp3665
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- name: active/crunchdispatchtest
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-arvados:
- uuid: zzzzz-s0uqq-arvadosrepo0123
- owner_uuid: zzzzz-tpzed-000000000000000 # root
- name: arvados
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-foo:
- uuid: zzzzz-s0uqq-382brsig8rp3666
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- name: active/foo
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-repository2:
- uuid: zzzzz-s0uqq-382brsig8rp3667
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- name: active/foo2
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-repository3:
- uuid: zzzzz-s0uqq-38orljkqpyo1j61
- owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
- name: admin/foo3
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-repository4:
- uuid: zzzzz-s0uqq-38oru8hnk57ht34
- owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
- name: admin/foo4
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
-
-has_branch_with_commit_hash_name:
- uuid: zzzzz-s0uqq-382brsig8rp3668
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- name: active/shabranchnames
- created_at: 2015-01-01T00:00:00.123456Z
- modified_at: 2015-01-01T00:00:00.123456Z
diff --git a/services/api/test/fixtures/specimens.yml b/services/api/test/fixtures/specimens.yml
deleted file mode 100644
index bcae020812..0000000000
--- a/services/api/test/fixtures/specimens.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-owned_by_active_user:
- uuid: zzzzz-j58dm-3zx463qyo0k4xrn
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: 2014-04-21 15:37:48 -0400
- modified_at: 2014-04-21 15:37:48 -0400
-
-owned_by_private_group:
- uuid: zzzzz-j58dm-5m3qwg45g3nlpu6
- owner_uuid: zzzzz-j7d0g-rew6elm53kancon
- created_at: 2014-04-21 15:37:48 -0400
- modified_at: 2014-04-21 15:37:48 -0400
-
-owned_by_spectator:
- uuid: zzzzz-j58dm-3b0xxwzlbzxq5yr
- owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
- created_at: 2014-04-21 15:37:48 -0400
- modified_at: 2014-04-21 15:37:48 -0400
-
-in_aproject:
- uuid: zzzzz-j58dm-7r18rnd5nzhg5yk
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-04-21 15:37:48 -0400
- modified_at: 2014-04-21 15:37:48 -0400
-
-in_asubproject:
- uuid: zzzzz-j58dm-c40lddwcqqr1ffs
- owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
- created_at: 2014-04-21 15:37:48 -0400
- modified_at: 2014-04-21 15:37:48 -0400
diff --git a/services/api/test/fixtures/traits.yml b/services/api/test/fixtures/traits.yml
deleted file mode 100644
index 83beb7087d..0000000000
--- a/services/api/test/fixtures/traits.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-owned_by_aproject_with_no_name:
- uuid: zzzzz-q1cn2-ypsjlol9dofwijz
- owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: 2014-05-05 04:11:52 -0400
- modified_at: 2014-05-05 04:11:52 -0400
diff --git a/services/api/test/fixtures/users.yml b/services/api/test/fixtures/users.yml
index 56f44551e1..6c0a6e2c15 100644
--- a/services/api/test/fixtures/users.yml
+++ b/services/api/test/fixtures/users.yml
@@ -8,7 +8,6 @@ system_user:
uuid: zzzzz-tpzed-000000000000000
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-11-27 06:38:21.215463000 Z
- modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-11-27 06:38:21.208036000 Z
email: root
@@ -72,7 +71,6 @@ active:
owner_uuid: zzzzz-tpzed-000000000000000
uuid: zzzzz-tpzed-xurymjxw79nv3jz
email: active-user@arvados.local
- modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
first_name: Active
last_name: User
@@ -429,7 +427,6 @@ has_can_login_permission:
owner_uuid: zzzzz-tpzed-000000000000000
uuid: zzzzz-tpzed-xabcdjxw79nv3jz
email: can-login-user@arvados.local
- modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
first_name: Can_login
last_name: User
diff --git a/services/api/test/functional/application_controller_test.rb b/services/api/test/functional/application_controller_test.rb
index af7882141e..ecd1c7cdd7 100644
--- a/services/api/test/functional/application_controller_test.rb
+++ b/services/api/test/functional/application_controller_test.rb
@@ -13,8 +13,8 @@ class ApplicationControllerTest < ActionController::TestCase
setup do
# These tests are meant to check behavior in ApplicationController.
- # We instantiate a small concrete controller for convenience.
- @controller = Arvados::V1::SpecimensController.new
+ # We instantiate an arbitrary concrete controller.
+ @controller = Arvados::V1::CollectionsController.new
@start_stamp = now_timestamp
end
@@ -42,13 +42,13 @@ class ApplicationControllerTest < ActionController::TestCase
test "requesting object without read permission returns 404 error" do
authorize_with :spectator
- get(:show, params: {id: specimens(:owned_by_active_user).uuid})
+ get(:show, params: {id: collections(:collection_owned_by_active).uuid})
check_404
end
test "submitting bad object returns error" do
authorize_with :spectator
- post(:create, params: {specimen: {badattr: "badvalue"}})
+ post(:create, params: {collection: {badattr: "badvalue"}})
assert_response 422
check_error_token
end
@@ -98,6 +98,18 @@ class ApplicationControllerTest < ActionController::TestCase
end
end
+ [[500, ActiveRecord::Deadlocked],
+ [500, ActiveRecord::QueryAborted],
+ [422, ActiveRecord::RecordNotUnique]].each do |status, etype|
+ test "return status #{status} for #{etype}" do
+ Group.stubs(:new).raises(etype)
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :active
+ post :create, params: {group: {}}
+ assert_response status
+ end
+ end
+
test "exceptions with backtraces get logged at exception_backtrace key" do
Group.stubs(:new).raises(Exception, 'Whoops')
Rails.logger.expects(:info).with(any_parameters) do |param|
diff --git a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
index 60b4133f9a..5312b61994 100644
--- a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
@@ -17,12 +17,6 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
assert_response 401
end
- test "should not get index from untrusted client" do
- authorize_with :active
- get :index
- assert_response 403
- end
-
test "create system auth" do
authorize_with :admin_trustedclient
post :create_system_auth, params: {scopes: '["test"]'}
@@ -30,12 +24,6 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
assert_not_nil JSON.parse(@response.body)['uuid']
end
- test "prohibit create system auth with token from non-trusted client" do
- authorize_with :admin
- post :create_system_auth, params: {scopes: '["test"]'}
- assert_response 403
- end
-
test "prohibit create system auth by non-admin" do
authorize_with :active
post :create_system_auth, params: {scopes: '["test"]'}
@@ -93,43 +81,42 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
[# anyone can look up the token they're currently using
[:admin, :admin, 200, 200, 1],
[:active, :active, 200, 200, 1],
- # cannot look up other tokens (even for same user) if not trustedclient
- [:admin, :active, 403, 403],
- [:admin, :admin_vm, 403, 403],
- [:active, :admin, 403, 403],
- # cannot look up other tokens for other users, regardless of trustedclient
+ # cannot look up other tokens for other users
[:admin_trustedclient, :active, 404, 200, 0],
[:active_trustedclient, :admin, 404, 200, 0],
- ].each do |user, token, expect_get_response, expect_list_response, expect_list_items|
- test "using '#{user}', get '#{token}' by uuid" do
- authorize_with user
+ # system root token is always trusted
+ [:system_user, :active, 200, 200, 1],
+ [:system_user, :admin, 200, 200, 1],
+ ].each do |auth_token, target_token, expect_get_response, expect_list_response, expect_list_items|
+ test "using '#{auth_token}', get '#{target_token}' by uuid" do
+ authorize_with auth_token
get :show, params: {
- id: api_client_authorizations(token).uuid,
+ id: api_client_authorizations(target_token).uuid,
}
assert_response expect_get_response
end
- test "using '#{user}', update '#{token}' by uuid" do
- authorize_with user
+ test "using '#{auth_token}', update '#{target_token}' by uuid" do
+ authorize_with auth_token
put :update, params: {
- id: api_client_authorizations(token).uuid,
+ id: api_client_authorizations(target_token).uuid,
api_client_authorization: {},
}
assert_response expect_get_response
end
- test "using '#{user}', delete '#{token}' by uuid" do
- authorize_with user
+ test "using '#{auth_token}', delete '#{target_token}' by uuid" do
+ authorize_with auth_token
post :destroy, params: {
- id: api_client_authorizations(token).uuid,
+ id: api_client_authorizations(target_token).uuid,
}
assert_response expect_get_response
end
- test "using '#{user}', list '#{token}' by uuid" do
- authorize_with user
+ test "using '#{auth_token}', list '#{target_token}' by uuid" do
+ authorize_with auth_token
get :index, params: {
- filters: [['uuid','=',api_client_authorizations(token).uuid]],
+ filters: [['uuid','=',api_client_authorizations(target_token).uuid]],
}
assert_response expect_list_response
if expect_list_items
@@ -139,10 +126,10 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
end
if expect_list_items
- test "using '#{user}', list '#{token}' by uuid with offset" do
- authorize_with user
+ test "using '#{auth_token}', list '#{target_token}' by uuid with offset" do
+ authorize_with auth_token
get :index, params: {
- filters: [['uuid','=',api_client_authorizations(token).uuid]],
+ filters: [['uuid','=',api_client_authorizations(target_token).uuid]],
offset: expect_list_items,
}
assert_response expect_list_response
@@ -151,10 +138,10 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
end
end
- test "using '#{user}', list '#{token}' by token" do
- authorize_with user
+ test "using '#{auth_token}', list '#{target_token}' by token" do
+ authorize_with auth_token
get :index, params: {
- filters: [['api_token','=',api_client_authorizations(token).api_token]],
+ filters: [['api_token','=',api_client_authorizations(target_token).api_token]],
}
assert_response expect_list_response
if expect_list_items
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
index 43797035bc..d68cba99e2 100644
--- a/services/api/test/functional/arvados/v1/collections_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/collections_controller_test.rb
@@ -165,6 +165,22 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
end
end
+ test "ignore modified_by_client_uuid in select param" do
+ authorize_with :active
+ get :index, params: {select: ["uuid", "modified_by_client_uuid"]}
+ assert_response :success
+ json_response['items'].each do |coll|
+ assert_includes(coll.keys, 'uuid')
+ refute_includes(coll.keys, 'name')
+ end
+ end
+
+ test "reject invalid field in select param" do
+ authorize_with :active
+ get :index, params: {select: ["uuid", "field_does_not_exist"]}
+ assert_response 422
+ end
+
[0,1,2].each do |limit|
test "get index with limit=#{limit}" do
authorize_with :active
@@ -516,14 +532,10 @@ EOS
test "get full provenance for baz file" do
authorize_with :active
- get :provenance, params: {id: 'ea10d51bcf88862dbcc36eb292017dfd+45'}
+ get :provenance, params: {id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}
assert_response :success
resp = JSON.parse(@response.body)
- assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
- assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
- assert_not_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
- assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq'] # bar->baz
- assert_not_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon'] # foo->bar
+ assert_not_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # baz collection
end
test "get no provenance for foo file" do
@@ -540,10 +552,7 @@ EOS
assert_response :success
resp = JSON.parse(@response.body)
assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
- assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
- assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq'] # bar->baz
- assert_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon'] # foo->bar
- assert_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
+ assert_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # foo->bar
end
test "search collections with 'any' operator" do
diff --git a/services/api/test/functional/arvados/v1/commits_controller_test.rb b/services/api/test/functional/arvados/v1/commits_controller_test.rb
deleted file mode 100644
index bf285b06e0..0000000000
--- a/services/api/test/functional/arvados/v1/commits_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::CommitsControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/computed_permissions_controller_test.rb b/services/api/test/functional/arvados/v1/computed_permissions_controller_test.rb
new file mode 100644
index 0000000000..6c89e90b63
--- /dev/null
+++ b/services/api/test/functional/arvados/v1/computed_permissions_controller_test.rb
@@ -0,0 +1,90 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::ComputedPermissionsControllerTest < ActionController::TestCase
+ test "require auth" do
+ get :index, params: {}
+ assert_response 401
+ end
+
+ test "require admin" do
+ authorize_with :active
+ get :index, params: {}
+ assert_response 403
+ end
+
+ test "index with no options" do
+ authorize_with :admin
+ get :index, params: {}
+ assert_response :success
+ assert_operator 0, :<, json_response['items'].length
+
+ last_user = ''
+ last_target = ''
+ json_response['items'].each do |item|
+ assert_not_empty item['user_uuid']
+ assert_not_empty item['target_uuid']
+ assert_not_empty item['perm_level']
+ # check default ordering
+ assert_operator last_user, :<=, item['user_uuid']
+ if last_user == item['user_uuid']
+ assert_operator last_target, :<=, item['target_uuid']
+ end
+ last_user = item['user_uuid']
+ last_target = item['target_uuid']
+ end
+ end
+
+ test "index with limit" do
+ authorize_with :admin
+ get :index, params: {limit: 10}
+ assert_response :success
+ assert_equal 10, json_response['items'].length
+ end
+
+ test "index with filter on user_uuid" do
+ user_uuid = users(:active).uuid
+ authorize_with :admin
+ get :index, params: {filters: [['user_uuid', '=', user_uuid]]}
+ assert_response :success
+ assert_not_equal 0, json_response['items'].length
+ json_response['items'].each do |item|
+ assert_equal user_uuid, item['user_uuid']
+ end
+ end
+
+ test "index with filter on user_uuid and target_uuid" do
+ user_uuid = users(:active).uuid
+ target_uuid = groups(:aproject).uuid
+ authorize_with :admin
+ get :index, params: {filters: [
+ ['user_uuid', '=', user_uuid],
+ ['target_uuid', '=', target_uuid],
+ ]}
+ assert_response :success
+ assert_equal([{"user_uuid" => user_uuid,
+ "target_uuid" => target_uuid,
+ "perm_level" => "can_manage",
+ }],
+ json_response['items'])
+ end
+
+ test "index with disallowed filters" do
+ authorize_with :admin
+ get :index, params: {filters: [['perm_level', '=', 'can_manage']]}
+ assert_response 422
+ end
+
+ %w(user_uuid target_uuid perm_level).each do |attr|
+ test "select only #{attr}" do
+ authorize_with :admin
+ get :index, params: {select: [attr], limit: 1}
+ assert_response :success
+ assert_operator 0, :<, json_response['items'][0][attr].length
+ assert_equal([{attr => json_response['items'][0][attr]}], json_response['items'])
+ end
+ end
+end
diff --git a/services/api/test/functional/arvados/v1/containers_controller_test.rb b/services/api/test/functional/arvados/v1/containers_controller_test.rb
index 07fa5c3211..619567de21 100644
--- a/services/api/test/functional/arvados/v1/containers_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/containers_controller_test.rb
@@ -19,14 +19,14 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
[Container::Queued, Container::Complete].each do |state|
test "cannot get auth in #{state} state" do
- authorize_with :dispatch1
+ authorize_with :system_user
get :auth, params: {id: containers(:queued).uuid}
assert_response 403
end
end
test 'cannot get auth with wrong token' do
- authorize_with :dispatch1
+ authorize_with :dispatch2
c = containers(:queued)
assert c.lock, show_errors(c)
@@ -36,7 +36,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test 'get auth' do
- authorize_with :dispatch1
+ authorize_with :system_user
c = containers(:queued)
assert c.lock, show_errors(c)
get :auth, params: {id: c.uuid}
@@ -46,7 +46,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test 'no auth or secret_mounts in container response' do
- authorize_with :dispatch1
+ authorize_with :system_user
c = containers(:queued)
assert c.lock, show_errors(c)
get :show, params: {id: c.uuid}
@@ -56,7 +56,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test "lock container" do
- authorize_with :dispatch1
+ authorize_with :system_user
uuid = containers(:queued).uuid
post :lock, params: {id: uuid}
assert_response :success
@@ -75,7 +75,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test "unlock container" do
- authorize_with :dispatch1
+ authorize_with :system_user
uuid = containers(:locked).uuid
post :unlock, params: {id: uuid}
assert_response :success
@@ -108,7 +108,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
[:running, :unlock, 422, 'Running'],
].each do |fixture, action, response, state|
test "state transitions from #{fixture} to #{action}" do
- authorize_with :dispatch1
+ authorize_with :system_user
uuid = containers(fixture).uuid
post action, params: {id: uuid}
assert_response response
@@ -124,7 +124,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test 'no container associated with token' do
- authorize_with :dispatch1
+ authorize_with :system_user
get :current
assert_response 404
end
@@ -170,21 +170,21 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
end
test 'update runtime_status, runtime_status is toplevel key' do
- authorize_with :dispatch1
+ authorize_with :system_user
c = containers(:running)
patch :update, params: {id: containers(:running).uuid, runtime_status: {activity: "foo", activityDetail: "bar"}}
assert_response :success
end
test 'update runtime_status, container is toplevel key' do
- authorize_with :dispatch1
+ authorize_with :system_user
c = containers(:running)
patch :update, params: {id: containers(:running).uuid, container: {runtime_status: {activity: "foo", activityDetail: "bar"}}}
assert_response :success
end
test 'update state, state is toplevel key' do
- authorize_with :dispatch1
+ authorize_with :system_user
c = containers(:running)
patch :update, params: {id: containers(:running).uuid, state: "Complete", runtime_status: {activity: "finishing"}}
assert_response :success
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
index ee7f716c80..6777fe3d97 100644
--- a/services/api/test/functional/arvados/v1/groups_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/groups_controller_test.rb
@@ -65,12 +65,12 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
assert_equal 0, json_response['items_available']
end
- def check_project_contents_response disabled_kinds=[]
+ def check_project_contents_response
assert_response :success
assert_operator 2, :<=, json_response['items_available']
assert_operator 2, :<=, json_response['items'].count
kinds = json_response['items'].collect { |i| i['kind'] }.uniq
- expect_kinds = %w'arvados#group arvados#specimen arvados#pipelineTemplate arvados#job' - disabled_kinds
+ expect_kinds = %w'arvados#group'
assert_equal expect_kinds, (expect_kinds & kinds)
json_response['items'].each do |i|
@@ -79,10 +79,6 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
"group#contents returned a non-project group")
end
end
-
- disabled_kinds.each do |d|
- assert_equal true, !kinds.include?(d)
- end
end
test 'get group-owned objects' do
@@ -107,17 +103,17 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
authorize_with :project_viewer
get :contents, params: {
format: :json,
- filters: [['uuid', 'is_a', 'arvados#specimen']]
+ filters: [['uuid', 'is_a', 'arvados#collection']]
}
assert_response :success
found_uuids = json_response['items'].collect { |i| i['uuid'] }
- [[:in_aproject, true],
- [:in_asubproject, true],
- [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
+ [[:foo_collection_in_aproject, true],
+ [:baz_collection_name_in_asubproject, true],
+ [:collection_not_readable_by_active, false]].each do |collection_fixture, should_find|
if should_find
- assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
+ assert_includes found_uuids, collections(collection_fixture).uuid, "did not find collection fixture '#{collection_fixture}'"
else
- refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+ refute_includes found_uuids, collections(collection_fixture).uuid, "found collection fixture '#{collection_fixture}'"
end
end
end
@@ -150,8 +146,8 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
}
assert_response :success
found_uuids = json_response['items'].collect { |i| i['uuid'] }
- assert_includes found_uuids, specimens(:owned_by_active_user).uuid, "specimen did not appear in home project"
- refute_includes found_uuids, specimens(:in_asubproject).uuid, "specimen appeared unexpectedly in home project"
+ assert_includes found_uuids, collections(:collection_owned_by_active).uuid, "collection did not appear in home project"
+ refute_includes found_uuids, collections(:foo_collection_in_aproject).uuid, "collection appeared unexpectedly in home project"
end
test "list collections in home project" do
@@ -279,20 +275,20 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
test "user with project read permission can't rename items in it" do
authorize_with :project_viewer
- @controller = Arvados::V1::LinksController.new
+ @controller = Arvados::V1::CollectionsController.new
post :update, params: {
- id: jobs(:running).uuid,
+ id: collections(:collection_to_search_for_in_aproject).uuid,
name: "Denied test name",
}
assert_includes(403..404, response.status)
end
test "user with project read permission can't remove items from it" do
- @controller = Arvados::V1::PipelineTemplatesController.new
+ @controller = Arvados::V1::CollectionsController.new
authorize_with :project_viewer
post :update, params: {
- id: pipeline_templates(:two_part).uuid,
- pipeline_template: {
+ id: collections(:collection_to_search_for_in_aproject).uuid,
+ collection: {
owner_uuid: users(:project_viewer).uuid,
}
}
@@ -339,8 +335,8 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
select: ["uuid", "storage_classes_desired"]
}
assert_response :success
- assert_equal 17, json_response['items_available']
- assert_equal 17, json_response['items'].count
+ assert_equal 6, json_response['items_available']
+ assert_equal 6, json_response['items'].count
json_response['items'].each do |item|
# Expect collections to have a storage_classes field, other items should not.
if item["kind"] == "arvados#collection"
@@ -351,6 +347,21 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
end
end
+ test 'get objects with ambiguous column name in order param' do
+ authorize_with :active
+ get :contents, params: {
+ format: :json,
+ # bug #22785 ("ambiguous column reference") only occurred when
+ # filtering by a column in a joined table...
+ filters: [["uuid", "is_a", "arvados#containerRequest"],
+ ["container.state", "in", ["Queued","Locked"]]],
+ # ...and ordering by a column that is in both primary and joined
+ # tables.
+ order: "created_at desc",
+ }
+ assert_response :success
+ end
+
test 'get group-owned objects with invalid field in select' do
authorize_with :active
get :contents, params: {
@@ -480,15 +491,17 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
[
[['owner_uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 200,
- 'zzzzz-d1hrv-subprojpipeline', 'zzzzz-d1hrv-1xfj6xkicf2muk2'],
- [["pipeline_instances.state", "not in", ["Complete", "Failed"]], 200,
- 'zzzzz-d1hrv-1xfj6xkicf2muk2', 'zzzzz-d1hrv-i3e77t9z5y8j9cc'],
+ 'zzzzz-j7d0g-publicfavorites', 'zzzzz-xvhdp-cr4queuedcontnr'],
+ [["container_requests.state", "not in", ["Final"]], 200,
+ 'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4completedctr'],
[['container_requests.requesting_container_uuid', '=', nil], 200,
'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4requestercn2'],
[['container_requests.no_such_column', '=', nil], 422],
[['container_requests.', '=', nil], 422],
[['.requesting_container_uuid', '=', nil], 422],
[['no_such_table.uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 422],
+ [["container.state", "=", "Complete"], 200,
+ 'zzzzz-xvhdp-cr4completedctr', 'zzzzz-xvhdp-cr5trashedcontr'],
].each do |filter, expect_code, expect_uuid, not_expect_uuid|
test "get contents with '#{filter}' filter" do
authorize_with :active
@@ -503,25 +516,17 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
end
end
- test 'get contents with jobs and pipeline instances disabled' do
- Rails.configuration.API.DisabledAPIs = ConfigLoader.to_OrderedOptions(
- {'jobs.index'=>{}, 'pipeline_instances.index'=>{}})
-
- authorize_with :active
- get :contents, params: {
- id: groups(:aproject).uuid,
- format: :json,
- }
- check_project_contents_response %w'arvados#pipelineInstance arvados#job'
- end
-
test 'get contents with low max_index_database_read' do
# Some result will certainly have at least 12 bytes in a
- # restricted column
+ # restricted column.
+ #
+ # We cannot use collections.manifest_text to test this, because
+ # GroupsController refuses to select manifest_text, because
+ # controller doesn't sign manifests in a groups#contents response.
Rails.configuration.API.MaxIndexDatabaseRead = 12
authorize_with :active
get :contents, params: {
- id: groups(:aproject).uuid,
+ uuid: users(:active).uuid,
format: :json,
}
assert_response :success
@@ -587,8 +592,8 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
format: :json,
}
assert_response :success
- # Should not be trashed
- assert_nil Group.find_by_uuid(groups(grp).uuid)
+ # Should be trashed
+ assert Group.find_by_uuid(groups(grp).uuid).is_trashed
end
end
@@ -959,6 +964,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
end
get :contents, params: {:include => "owner_uuid", :exclude_home_project => true}
+ assert_response 200
assert_equal 1, json_response['items'].length
assert_equal groups(:project_owned_by_foo).uuid, json_response['items'][0]["uuid"]
@@ -975,6 +981,42 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
assert_response 422
end
+ [[false, 'owner_uuid'],
+ [false, []],
+ [false, ''],
+ [true, 'container_uuid'],
+ [true, ['container_uuid']],
+ [true, ['owner_uuid', 'container_uuid'], ['uuid', 'container_uuid', 'state', 'output']],
+ ].each do |check_container_included, include_param, select_param|
+ test "contents, include=#{include_param.inspect}" do
+ authorize_with :active
+ get :contents, params: {
+ :id => users(:active).uuid,
+ :include => include_param,
+ :limit => 1000,
+ :select => select_param,
+ }
+ assert_response 200
+ if include_param.empty?
+ assert_equal false, json_response.include?('included')
+ return
+ end
+ incl = {}
+ json_response['included'].andand.each do |ctr|
+ incl[ctr['uuid']] = ctr
+ end
+ next if !check_container_included
+ checked_crs = 0
+ json_response['items'].each do |item|
+ next if !item['container_uuid']
+ assert_equal item['container_uuid'], incl[item['container_uuid']]['uuid']
+ assert_not_empty incl[item['container_uuid']]['state']
+ checked_crs += 1
+ end
+ assert_operator 0, :<, checked_crs
+ end
+ end
+
test "include_trash does not return trash inside frozen project" do
authorize_with :active
trashtime = Time.now - 1.second
diff --git a/services/api/test/functional/arvados/v1/humans_controller_test.rb b/services/api/test/functional/arvados/v1/humans_controller_test.rb
deleted file mode 100644
index d73fb30513..0000000000
--- a/services/api/test/functional/arvados/v1/humans_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::HumansControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb b/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
deleted file mode 100644
index 46cfac5c9a..0000000000
--- a/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'helpers/git_test_helper'
-
-class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
- fixtures :repositories, :users, :jobs, :links, :collections
-
- setup do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
- end
-
- BASE_FILTERS = {
- 'repository' => ['=', 'active/foo'],
- 'script' => ['=', 'hash'],
- 'script_version' => ['in git', 'main'],
- 'docker_image_locator' => ['=', nil],
- 'arvados_sdk_version' => ['=', nil],
- }
-
- def filters_from_hash(hash)
- hash.each_pair.map { |name, filter| [name] + filter }
- end
-
- test "find Job with script version range" do
- get :index, params: {
- filters: [["repository", "=", "active/foo"],
- ["script", "=", "hash"],
- ["script_version", "in git", "tag1"]]
- }
- assert_response :success
- assert_not_nil assigns(:objects)
- assert_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- end
-
- test "find Job with script version range exclusions" do
- get :index, params: {
- filters: [["repository", "=", "active/foo"],
- ["script", "=", "hash"],
- ["script_version", "not in git", "tag1"]]
- }
- assert_response :success
- assert_not_nil assigns(:objects)
- refute_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- end
-
- test "find Job with Docker image range" do
- get :index, params: {
- filters: [["docker_image_locator", "in docker",
- "arvados/apitestfixture"]]
- }
- assert_response :success
- assert_not_nil assigns(:objects)
- assert_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_docker_job_run).uuid)
- refute_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- end
-
- test "find Job with Docker image using reader tokens" do
- authorize_with :inactive
- get(:index, params: {
- filters: [["docker_image_locator", "in docker",
- "arvados/apitestfixture"]],
- reader_tokens: [api_token(:active)],
- })
- assert_response :success
- assert_not_nil assigns(:objects)
- assert_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_docker_job_run).uuid)
- refute_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- end
-
- test "'in docker' filter accepts arrays" do
- get :index, params: {
- filters: [["docker_image_locator", "in docker",
- ["_nonesuchname_", "arvados/apitestfixture"]]]
- }
- assert_response :success
- assert_not_nil assigns(:objects)
- assert_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_docker_job_run).uuid)
- refute_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- end
-
- test "'not in docker' filter accepts arrays" do
- get :index, params: {
- filters: [["docker_image_locator", "not in docker",
- ["_nonesuchname_", "arvados/apitestfixture"]]]
- }
- assert_response :success
- assert_not_nil assigns(:objects)
- assert_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_job_run).uuid)
- refute_includes(assigns(:objects).map { |job| job.uuid },
- jobs(:previous_docker_job_run).uuid)
- end
-
-end
diff --git a/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb b/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb
deleted file mode 100644
index d6f4347b87..0000000000
--- a/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::JobTasksControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/jobs_controller_test.rb b/services/api/test/functional/arvados/v1/jobs_controller_test.rb
deleted file mode 100644
index 9298f23d54..0000000000
--- a/services/api/test/functional/arvados/v1/jobs_controller_test.rb
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'helpers/git_test_helper'
-
-class Arvados::V1::JobsControllerTest < ActionController::TestCase
-
- test "search jobs by uuid with >= query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- assert_equal false, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
- end
-
- test "search jobs by uuid with <= query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
- end
-
- test "search jobs by uuid with >= and <= query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7'],
- ['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
- end
-
- test "search jobs by uuid with < query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', '<', 'zzzzz-8i9sb-pshmckwoma9plh7']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal false, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
- end
-
- test "search jobs by uuid with like query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', 'like', '%hmckwoma9pl%']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
- end
-
- test "search jobs by uuid with 'in' query" do
- authorize_with :active
- get :index, params: {
- filters: [['uuid', 'in', ['zzzzz-8i9sb-4cf0nhn6xte809j',
- 'zzzzz-8i9sb-pshmckwoma9plh7']]]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal found.sort, ['zzzzz-8i9sb-4cf0nhn6xte809j',
- 'zzzzz-8i9sb-pshmckwoma9plh7']
- end
-
- test "search jobs by uuid with 'not in' query" do
- exclude_uuids = [jobs(:running).uuid,
- jobs(:running_cancelled).uuid]
- authorize_with :active
- get :index, params: {
- filters: [['uuid', 'not in', exclude_uuids]]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_not_empty found, "'not in' query returned nothing"
- assert_empty(found & exclude_uuids,
- "'not in' query returned uuids I asked not to get")
- end
-
- ['=', '!='].each do |operator|
- [['uuid', 'zzzzz-8i9sb-pshmckwoma9plh7'],
- ['output', nil]].each do |attr, operand|
- test "search jobs with #{attr} #{operator} #{operand.inspect} query" do
- authorize_with :active
- get :index, params: {
- filters: [[attr, operator, operand]]
- }
- assert_response :success
- values = assigns(:objects).collect { |x| x.send(attr) }
- assert_not_empty values, "query should return non-empty result"
- if operator == '='
- assert_empty values - [operand], "query results do not satisfy query"
- else
- assert_empty values & [operand], "query results do not satisfy query"
- end
- end
- end
- end
-
- test "search jobs by started_at with < query" do
- authorize_with :active
- get :index, params: {
- filters: [['started_at', '<', Time.now.to_s]]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- end
-
- test "search jobs by started_at with > query" do
- authorize_with :active
- get :index, params: {
- filters: [['started_at', '>', Time.now.to_s]]
- }
- assert_response :success
- assert_equal 0, assigns(:objects).count
- end
-
- test "search jobs by started_at with >= query on metric date" do
- authorize_with :active
- get :index, params: {
- filters: [['started_at', '>=', '2014-01-01']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- end
-
- test "search jobs by started_at with >= query on metric date and time" do
- authorize_with :active
- get :index, params: {
- filters: [['started_at', '>=', '2014-01-01 01:23:45']]
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- end
-
- test "search jobs with 'any' operator" do
- authorize_with :active
- get :index, params: {
- where: { any: ['contains', 'pshmckw'] }
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal 0, found.index('zzzzz-8i9sb-pshmckwoma9plh7')
- assert_equal 1, found.count
- end
-
- test "search jobs by nonexistent column with < query" do
- authorize_with :active
- get :index, params: {
- filters: [['is_borked', '<', 'fizzbuzz']]
- }
- assert_response 422
- end
-
- [:spectator, :admin].each_with_index do |which_token, i|
- test "get job queue as #{which_token} user" do
- authorize_with which_token
- get :queue
- assert_response :success
- assert_equal 0, assigns(:objects).count
- end
- end
-
- test "job includes assigned nodes" do
- authorize_with :active
- get :show, params: {id: jobs(:nearly_finished_job).uuid}
- assert_response :success
- assert_equal([nodes(:busy).uuid], json_response["node_uuids"])
- end
-
- test 'get job with components' do
- authorize_with :active
- get :show, params: {id: jobs(:running_job_with_components).uuid}
- assert_response :success
- assert_not_nil json_response["components"]
- assert_equal ["component1", "component2"], json_response["components"].keys
- end
-end
diff --git a/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb b/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb
deleted file mode 100644
index 9da9d01631..0000000000
--- a/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::KeepDisksControllerTest < ActionController::TestCase
-
- def default_ping_opts
- {ping_secret: '', service_ssl_flag: false, service_port: 1234}
- end
-
- test "add keep disk with admin token" do
- authorize_with :admin
- post :ping, params: default_ping_opts.
- merge(filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f')
- assert_response :success
- assert_not_nil assigns(:object)
- new_keep_disk = JSON.parse(@response.body)
- assert_not_nil new_keep_disk['uuid']
- assert_not_nil new_keep_disk['ping_secret']
- assert_not_equal '', new_keep_disk['ping_secret']
- end
-
- [
- {},
- {filesystem_uuid: ''},
- ].each do |opts|
- test "add keep disk with[out] filesystem_uuid #{opts}" do
- authorize_with :admin
- post :ping, params: default_ping_opts.merge(opts)
- assert_response :success
- assert_not_nil JSON.parse(@response.body)['uuid']
- end
- end
-
- test "refuse to add keep disk without admin token" do
- post :ping, params: default_ping_opts
- assert_response 404
- end
-
- test "ping keep disk" do
- post :ping, params: default_ping_opts.
- merge(id: keep_disks(:nonfull).uuid,
- ping_secret: keep_disks(:nonfull).ping_secret,
- filesystem_uuid: keep_disks(:nonfull).filesystem_uuid)
- assert_response :success
- assert_not_nil assigns(:object)
- keep_disk = JSON.parse(@response.body)
- assert_not_nil keep_disk['uuid']
- assert_not_nil keep_disk['ping_secret']
- end
-
- test "admin should get index with ping_secret" do
- authorize_with :admin
- get :index
- assert_response :success
- assert_not_nil assigns(:objects)
- items = JSON.parse(@response.body)['items']
- assert_not_equal 0, items.size
- assert_not_nil items[0]['ping_secret']
- end
-
- # inactive user sees keep disks
- test "inactive user should get index" do
- authorize_with :inactive
- get :index
- assert_response :success
- items = JSON.parse(@response.body)['items']
- assert_not_equal 0, items.size
-
- # Check these are still included
- assert items[0]['service_host']
- assert items[0]['service_port']
- end
-
- # active user sees non-secret attributes of keep disks
- test "active user should get non-empty index with no ping_secret" do
- authorize_with :active
- get :index
- assert_response :success
- items = JSON.parse(@response.body)['items']
- assert_not_equal 0, items.size
- items.each do |item|
- assert_nil item['ping_secret']
- assert_not_nil item['is_readable']
- assert_not_nil item['is_writable']
- assert_not_nil item['service_host']
- assert_not_nil item['service_port']
- end
- end
-
- test "search keep_services with 'any' operator" do
- authorize_with :active
- get :index, params: {
- where: { any: ['contains', 'o2t1q5w'] }
- }
- assert_response :success
- found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-penuu-5w2o2t1q5wy7fhn')
- end
-end
diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
deleted file mode 100644
index 47f6c5ff3f..0000000000
--- a/services/api/test/functional/arvados/v1/nodes_controller_test.rb
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::NodesControllerTest < ActionController::TestCase
-
- test "should get index with ping_secret" do
- authorize_with :admin
- get :index
- assert_response :success
- assert_not_nil assigns(:objects)
- node_items = JSON.parse(@response.body)['items']
- assert_not_equal 0, node_items.size
- assert_not_nil node_items[0]['info'].andand['ping_secret']
- end
-
- # inactive user does not see any nodes
- test "inactive user should get empty index" do
- authorize_with :inactive
- get :index
- assert_response :success
- assert_equal 0, json_response['items'].size
- assert_equal 0, json_response['items_available']
- end
-
- # active user sees non-secret attributes of up and recently-up nodes
- test "active user should get non-empty index with no ping_secret" do
- authorize_with :active
- get :index
- assert_response :success
- assert_operator 0, :<, json_response['items_available']
- node_items = json_response['items']
- assert_operator 0, :<, node_items.size
- found_busy_node = false
- node_items.each do |node|
- assert_nil node['info'].andand['ping_secret']
- assert_not_nil node['crunch_worker_state']
- if node['uuid'] == nodes(:busy).uuid
- found_busy_node = true
- assert_equal 'busy', node['crunch_worker_state']
- end
- end
- assert_equal true, found_busy_node
- end
-
- test "node should ping with ping_secret and no token" do
- post :ping, params: {
- id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
- instance_id: 'i-0000000',
- local_ipv4: '172.17.2.174',
- ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
- }
- assert_response :success
- response = JSON.parse(@response.body)
- assert_equal 'zzzzz-7ekkf-2z3mc76g2q73aio', response['uuid']
- # Ensure we are getting the "superuser" attributes, too
- assert_not_nil response['first_ping_at'], '"first_ping_at" attr missing'
- assert_not_nil response['info'], '"info" attr missing'
- assert_not_nil response['nameservers'], '"nameservers" attr missing'
- end
-
- test "node should fail ping with invalid ping_secret" do
- post :ping, params: {
- id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
- instance_id: 'i-0000000',
- local_ipv4: '172.17.2.174',
- ping_secret: 'dricrha4lcpi23pd69e44soanc069udawxvn3zzj45hs8bumvn'
- }
- assert_response 401
- end
-
- test "create node" do
- authorize_with :admin
- post :create, params: {node: {}}
- assert_response :success
- assert_not_nil json_response['uuid']
- assert_not_nil json_response['info'].is_a? Hash
- assert_not_nil json_response['info']['ping_secret']
- assert_nil json_response['slot_number']
- assert_nil json_response['hostname']
- end
-
- test "create node and assign slot" do
- authorize_with :admin
- post :create, params: {node: {}, assign_slot: true}
- assert_response :success
- assert_not_nil json_response['uuid']
- assert_not_nil json_response['info'].is_a? Hash
- assert_not_nil json_response['info']['ping_secret']
- assert_operator 0, :<, json_response['slot_number']
- n = json_response['slot_number']
- assert_equal "compute#{n}", json_response['hostname']
-
- node = Node.where(uuid: json_response['uuid']).first
- assert_equal n, node.slot_number
- assert_equal "compute#{n}", node.hostname
- end
-
- test "update node and assign slot" do
- authorize_with :admin
- node = nodes(:new_with_no_hostname)
- post :update, params: {id: node.uuid, node: {}, assign_slot: true}
- assert_response :success
- assert_operator 0, :<, json_response['slot_number']
- n = json_response['slot_number']
- assert_equal "compute#{n}", json_response['hostname']
-
- node.reload
- assert_equal n, node.slot_number
- assert_equal "compute#{n}", node.hostname
- end
-
- test "update node and assign slot, don't clobber hostname" do
- authorize_with :admin
- node = nodes(:new_with_custom_hostname)
- post :update, params: {id: node.uuid, node: {}, assign_slot: true}
- assert_response :success
- assert_operator 0, :<, json_response['slot_number']
- n = json_response['slot_number']
- assert_equal "custom1", json_response['hostname']
- end
-
- test "ping adds node stats to info" do
- authorize_with :admin
- node = nodes(:idle)
- post :ping, params: {
- id: node.uuid,
- ping_secret: node.info['ping_secret'],
- total_cpu_cores: 32,
- total_ram_mb: 1024,
- total_scratch_mb: 2048
- }
- assert_response :success
- info = JSON.parse(@response.body)['info']
- properties = JSON.parse(@response.body)['properties']
- assert_equal(node.info['ping_secret'], info['ping_secret'])
- assert_equal(32, properties['total_cpu_cores'].to_i)
- assert_equal(1024, properties['total_ram_mb'].to_i)
- assert_equal(2048, properties['total_scratch_mb'].to_i)
- end
-
- test "active user can see their assigned job" do
- authorize_with :active
- get :show, params: {id: nodes(:busy).uuid}
- assert_response :success
- assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"])
- end
-
- test "user without job read permission can't see job" do
- authorize_with :spectator
- get :show, params: {id: nodes(:busy).uuid}
- assert_response :success
- assert_nil(json_response["job"], "spectator can see node's assigned job")
- end
-
- [:admin, :spectator].each do |user|
- test "select param does not break node list for #{user}" do
- authorize_with user
- get :index, params: {select: ['domain']}
- assert_response :success
- assert_operator 0, :<, json_response['items_available']
- end
- end
-
- test "admin can associate a job with a node" do
- changed_node = nodes(:idle)
- assigned_job = jobs(:queued)
- authorize_with :admin
- post :update, params: {
- id: changed_node.uuid,
- node: {job_uuid: assigned_job.uuid},
- }
- assert_response :success
- assert_equal(changed_node.hostname, json_response["hostname"],
- "hostname mismatch after defining job")
- assert_equal(assigned_job.uuid, json_response["job_uuid"],
- "mismatch in node's assigned job UUID")
- end
-
- test "non-admin can't associate a job with a node" do
- authorize_with :active
- post :update, params: {
- id: nodes(:idle).uuid,
- node: {job_uuid: jobs(:queued).uuid},
- }
- assert_response 403
- end
-
- test "admin can unassign a job from a node" do
- changed_node = nodes(:busy)
- authorize_with :admin
- post :update, params: {
- id: changed_node.uuid,
- node: {job_uuid: nil},
- }
- assert_response :success
- assert_equal(changed_node.hostname, json_response["hostname"],
- "hostname mismatch after defining job")
- assert_nil(json_response["job_uuid"],
- "node still has job assignment after update")
- end
-
- test "non-admin can't unassign a job from a node" do
- authorize_with :project_viewer
- post :update, params: {
- id: nodes(:busy).uuid,
- node: {job_uuid: nil},
- }
- assert_response 403
- end
-
- test "node should fail ping with invalid hostname config format" do
- Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%04' # should end with "04d"
- post :ping, params: {
- id: nodes(:new_with_no_hostname).uuid,
- ping_secret: nodes(:new_with_no_hostname).info['ping_secret'],
- }
- assert_response 422
- end
-
- test "first ping should set ip addr using local_ipv4 when provided" do
- post :ping, params: {
- id: 'zzzzz-7ekkf-nodenoipaddryet',
- instance_id: 'i-0000000',
- local_ipv4: '172.17.2.172',
- ping_secret: 'abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2'
- }
- assert_response :success
- response = JSON.parse(@response.body)
- assert_equal 'zzzzz-7ekkf-nodenoipaddryet', response['uuid']
- assert_equal '172.17.2.172', response['ip_address']
- end
-
- test "first ping should set ip addr using remote_ip when local_ipv4 is not provided" do
- post :ping, params: {
- id: 'zzzzz-7ekkf-nodenoipaddryet',
- instance_id: 'i-0000000',
- ping_secret: 'abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2'
- }
- assert_response :success
- response = JSON.parse(@response.body)
- assert_equal 'zzzzz-7ekkf-nodenoipaddryet', response['uuid']
- assert_equal request.remote_ip, response['ip_address']
- end
-
- test "future pings should not change previous ip address" do
- post :ping, params: {
- id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
- instance_id: 'i-0000000',
- local_ipv4: '172.17.2.175',
- ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
- }
- assert_response :success
- response = JSON.parse(@response.body)
- assert_equal 'zzzzz-7ekkf-2z3mc76g2q73aio', response['uuid']
- assert_equal '172.17.2.174', response['ip_address'] # original ip address is not overwritten
- end
-end
diff --git a/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb
deleted file mode 100644
index e455354c11..0000000000
--- a/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::PipelineInstancesControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb
deleted file mode 100644
index 992749c6f1..0000000000
--- a/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::PipelineTemplatesControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
deleted file mode 100644
index 84bd846c91..0000000000
--- a/services/api/test/functional/arvados/v1/repositories_controller_test.rb
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
- test "should get_all_logins with admin token" do
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- end
-
- test "should get_all_logins with non-admin token" do
- authorize_with :active
- get :get_all_permissions
- assert_response 403
- end
-
- test "get_all_permissions gives RW to repository owner" do
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- ok = false
- json_response['repositories'].each do |repo|
- if repo['uuid'] == repositories(:repository2).uuid
- if repo['user_permissions'][users(:active).uuid]['can_write']
- ok = true
- end
- end
- end
- assert_equal(true, ok,
- "No permission on own repo '@{repositories(:repository2).uuid}'")
- end
-
- test "get_all_permissions takes into account is_admin flag" do
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- json_response['repositories'].each do |repo|
- assert_not_nil(repo['user_permissions'][users(:admin).uuid],
- "Admin user is not listed in perms for #{repo['uuid']}")
- assert_equal(true,
- repo['user_permissions'][users(:admin).uuid]['can_write'],
- "Admin has no perms for #{repo['uuid']}")
- end
- end
-
- test "get_all_permissions takes into account is_active flag" do
- act_as_user users(:active) do
- Repository.create! name: 'active/testrepo'
- end
- act_as_system_user do
- u = users(:active)
- u.unsetup
- u.save!
- end
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- json_response['repositories'].each do |r|
- r['user_permissions'].each do |user_uuid, perms|
- refute_equal user_uuid, users(:active).uuid
- end
- end
- end
-
- test "get_all_permissions does not give any access to user without permission" do
- viewer_uuid = users(:project_viewer).uuid
- assert_equal(authorized_keys(:project_viewer).authorized_user_uuid,
- viewer_uuid,
- "project_viewer must have an authorized_key for this test to work")
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- readable_repos = json_response["repositories"].select do |repo|
- repo["user_permissions"].has_key?(viewer_uuid)
- end
- assert_equal(["arvados"], readable_repos.map { |r| r["name"] },
- "project_viewer should only have permissions on public repos")
- end
-
- test "get_all_permissions gives gitolite R to user with read-only access" do
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- found_it = false
- assert_equal(authorized_keys(:spectator).authorized_user_uuid,
- users(:spectator).uuid,
- "spectator must have an authorized_key for this test to work")
- json_response['repositories'].each do |repo|
- next unless repo['uuid'] == repositories(:foo).uuid
- assert_equal('R',
- repo['user_permissions'][users(:spectator).uuid]['gitolite_permissions'],
- "spectator user should have just R access to #{repo['uuid']}")
- found_it = true
- end
- assert_equal true, found_it, "spectator user does not have R on foo repo"
- end
-
- test "get_all_permissions provides admin and active user keys" do
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- [:active, :admin].each do |u|
- assert_equal(1, json_response['user_keys'][users(u).uuid].andand.count,
- "expected 1 key for #{u} (#{users(u).uuid})")
- assert_equal(json_response['user_keys'][users(u).uuid][0]['public_key'],
- authorized_keys(u).public_key,
- "response public_key does not match fixture #{u}.")
- end
- end
-
- test "get_all_permissions lists all repos regardless of permissions" do
- act_as_system_user do
- # Create repos that could potentially be left out of the
- # permission list by accident.
-
- # No authorized_key, no username (this can't even be done
- # without skipping validations)
- r = Repository.create name: 'root/testrepo'
- assert r.save validate: false
-
- r = Repository.create name: 'invalid username / repo name', owner_uuid: users(:inactive).uuid
- assert r.save validate: false
- end
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- assert_equal(Repository.count, json_response["repositories"].size)
- end
-
- test "get_all_permissions lists user permissions for users with no authorized keys" do
- authorize_with :admin
- AuthorizedKey.destroy_all
- get :get_all_permissions
- assert_response :success
- assert_equal(Repository.count, json_response["repositories"].size)
- repos_with_perms = []
- json_response['repositories'].each do |repo|
- if repo['user_permissions'].any?
- repos_with_perms << repo['uuid']
- end
- end
- assert_not_empty repos_with_perms, 'permissions are missing'
- end
-
- # Ensure get_all_permissions correctly describes what the normal
- # permission system would do.
- test "get_all_permissions obeys group permissions" do
- act_as_user system_user do
- r = Repository.create!(name: 'admin/groupcanwrite', owner_uuid: users(:admin).uuid)
- g = Group.create!(group_class: 'role', name: 'repo-writers')
- u1 = users(:active)
- u2 = users(:spectator)
- Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_manage')
- Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
- Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
-
- r = Repository.create!(name: 'admin/groupreadonly', owner_uuid: users(:admin).uuid)
- g = Group.create!(group_class: 'role', name: 'repo-readers')
- u1 = users(:active)
- u2 = users(:spectator)
- Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_read')
- Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
- Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
- end
- authorize_with :admin
- get :get_all_permissions
- assert_response :success
- json_response['repositories'].each do |repo|
- repo['user_permissions'].each do |user_uuid, perms|
- u = User.find_by_uuid(user_uuid)
- if perms['can_read']
- assert u.can? read: repo['uuid']
- assert_match(/R/, perms['gitolite_permissions'])
- else
- refute_match(/R/, perms['gitolite_permissions'])
- end
- if perms['can_write']
- assert u.can? write: repo['uuid']
- assert_match(/RW\+/, perms['gitolite_permissions'])
- else
- refute_match(/W/, perms['gitolite_permissions'])
- end
- if perms['can_manage']
- assert u.can? manage: repo['uuid']
- assert_match(/RW\+/, perms['gitolite_permissions'])
- end
- end
- end
- end
-
- test "default index includes fetch_url" do
- authorize_with :active
- get(:index)
- assert_response :success
- assert_includes(json_response["items"].map { |r| r["fetch_url"] },
- "git@git.zzzzz.arvadosapi.com:active/foo.git")
- end
-
- [
- {cfg: "GitSSH.ExternalURL", cfgval: URI("ssh://git@example.com"), match: %r"^git@example.com:"},
- {cfg: "GitSSH.ExternalURL", cfgval: URI(""), match: %r"^git@git.zzzzz.arvadosapi.com:"},
- {cfg: "GitSSH", cfgval: false, refute: /^git@/ },
- {cfg: "GitHTTP.ExternalURL", cfgval: URI("https://example.com/"), match: %r"^https://example.com/"},
- {cfg: "GitHTTP.ExternalURL", cfgval: URI(""), match: %r"^https://git.zzzzz.arvadosapi.com/"},
- {cfg: "GitHTTP", cfgval: false, refute: /^http/ },
- ].each do |expect|
- test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
- ConfigLoader.set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
- authorize_with :active
- get :index
- assert_response :success
- assert_not_empty json_response['items']
- json_response['items'].each do |r|
- if expect[:refute]
- r['clone_urls'].each do |u|
- refute_match expect[:refute], u
- end
- else
- assert((r['clone_urls'].any? do |u|
- expect[:match].match u
- end),
- "no match for #{expect[:match]} in #{r['clone_urls'].inspect}")
- end
- end
- end
- end
-
- test "select push_url in index" do
- authorize_with :active
- get(:index, params: {select: ["uuid", "push_url"]})
- assert_response :success
- assert_includes(json_response["items"].map { |r| r["push_url"] },
- "git@git.zzzzz.arvadosapi.com:active/foo.git")
- end
-
- test "select clone_urls in index" do
- authorize_with :active
- get(:index, params: {select: ["uuid", "clone_urls"]})
- assert_response :success
- assert_includes(json_response["items"].map { |r| r["clone_urls"] }.flatten,
- "git@git.zzzzz.arvadosapi.com:active/foo.git")
- end
-end
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
index 65a2b64b8a..4b46906072 100644
--- a/services/api/test/functional/arvados/v1/schema_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb
@@ -31,7 +31,6 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections.DefaultTrashLifetime
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
- assert_match(/^unknown$/, discovery_doc['packageVersion'])
assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services.Websocket.ExternalURL.to_s
assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services.Workbench1.ExternalURL.to_s
assert_equal('zzzzz', discovery_doc['uuidPrefix'])
@@ -47,6 +46,15 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
assert_equal 'aaa888fff', discovery_doc['sourceVersion']
end
+ ["unknown", "1.0.1-stable"].each do |pkg_version|
+ test "packageVersion #{pkg_version} comes from AppVersion" do
+ AppVersion.stubs(:package_version).returns(pkg_version)
+ get :index
+ assert_response :success
+ assert_equal(pkg_version, json_response["packageVersion"])
+ end
+ end
+
test "discovery document overrides packageVersion with config" do
Rails.configuration.package_version = '1.0.0-stable'
get :index
@@ -60,16 +68,16 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
assert_response :success
discovery_doc = JSON.parse(@response.body)
assert_equal('POST',
- discovery_doc['resources']['jobs']['methods']['create']['httpMethod'])
+ discovery_doc['resources']['collections']['methods']['create']['httpMethod'])
end
test "non-empty disable_api_methods" do
Rails.configuration.API.DisabledAPIs = ConfigLoader.to_OrderedOptions(
- {'jobs.create'=>{}, 'pipeline_instances.create'=>{}, 'pipeline_templates.create'=>{}})
+ {'collections.create'=>{}, 'workflows.create'=>{}})
get :index
assert_response :success
discovery_doc = JSON.parse(@response.body)
- ['jobs', 'pipeline_instances', 'pipeline_templates'].each do |r|
+ ['collections', 'workflows'].each do |r|
refute_includes(discovery_doc['resources'][r]['methods'].keys(), 'create')
end
end
@@ -78,14 +86,16 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
get :index
assert_response :success
- discovery_doc = JSON.parse(@response.body)
-
- group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
- group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
+ groups_methods = JSON.parse(@response.body)['resources']['groups']['methods']
+ group_index_params = groups_methods['list']['parameters'].each_pair.to_a
+ group_contents_params = groups_methods['contents']['parameters'].each_pair.to_a
- assert_equal group_contents_params.keys.sort, (group_index_params.keys + ['uuid', 'recursive', 'include', 'include_old_versions']).sort
+ assert_equal(
+ group_contents_params & group_index_params, group_index_params,
+ "group contents methods does not take all the same parameters index does",
+ )
- recursive_param = group_contents_params['recursive']
+ recursive_param = groups_methods['contents']['parameters']['recursive']
assert_equal 'boolean', recursive_param['type']
assert_equal false, recursive_param['required']
assert_equal 'query', recursive_param['location']
@@ -97,10 +107,10 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
discovery_doc = JSON.parse(@response.body)
- specimens_index_params = discovery_doc['resources']['specimens']['methods']['index']['parameters'] # no changes from super
- coll_index_params = discovery_doc['resources']['collections']['methods']['index']['parameters']
+ workflows_index_params = discovery_doc['resources']['workflows']['methods']['list']['parameters'] # no changes from super
+ coll_index_params = discovery_doc['resources']['collections']['methods']['list']['parameters']
- assert_equal (specimens_index_params.keys + ['include_trash', 'include_old_versions']).sort, coll_index_params.keys.sort
+ assert_equal (workflows_index_params.keys + ['include_trash', 'include_old_versions']).sort, coll_index_params.keys.sort
include_trash_param = coll_index_params['include_trash']
assert_equal 'boolean', include_trash_param['type']
diff --git a/services/api/test/functional/arvados/v1/specimens_controller_test.rb b/services/api/test/functional/arvados/v1/specimens_controller_test.rb
deleted file mode 100644
index df681e6f5e..0000000000
--- a/services/api/test/functional/arvados/v1/specimens_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::SpecimensControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/traits_controller_test.rb b/services/api/test/functional/arvados/v1/traits_controller_test.rb
deleted file mode 100644
index 3c8d097350..0000000000
--- a/services/api/test/functional/arvados/v1/traits_controller_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class Arvados::V1::TraitsControllerTest < ActionController::TestCase
-end
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
index cc0b5e1320..d10258bf7d 100644
--- a/services/api/test/functional/arvados/v1/users_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/users_controller_test.rb
@@ -121,12 +121,10 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_nil updated['username'], 'expected no username'
end
- test "create user with user, vm and repo as input" do
+ test "create user with user and vm as input" do
authorize_with :admin
- repo_name = 'usertestrepo'
post :setup, params: {
- repo_name: repo_name,
user: {
uuid: 'zzzzz-tpzed-abcdefghijklmno',
first_name: "in_create_test_first_name",
@@ -145,11 +143,8 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # repo link and link add user to 'All users' group
- verify_links_added 3
-
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- "foo/#{repo_name}", created['uuid'], 'arvados#repository', true, 'Repository'
+ # added links: vm permission, 'all users' group
+ verify_links_added 2
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
@@ -165,7 +160,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
post :setup, params: {
uuid: 'bogus_uuid',
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid
}
response_body = JSON.parse(@response.body)
@@ -179,7 +173,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
post :setup, params: {
user: {uuid: 'bogus_uuid'},
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid,
}
response_body = JSON.parse(@response.body)
@@ -193,7 +186,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
authorize_with :admin
post :setup, params: {
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid,
}
response_body = JSON.parse(@response.body)
@@ -208,7 +200,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
post :setup, params: {
user: {},
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid,
}
response_body = JSON.parse(@response.body)
@@ -218,13 +209,12 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
'Expected ArgumentError'
end
- test "invoke setup with existing uuid, vm and repo and verify links" do
+ test "invoke setup with existing uuid and vm permission, and verify links" do
authorize_with :admin
inactive_user = users(:inactive)
post :setup, params: {
uuid: users(:inactive).uuid,
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid
}
@@ -238,10 +228,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_equal inactive_user['email'], resp_obj['email'],
'expecting inactive user email'
- # expect repo and vm links
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'inactiveuser/usertestrepo', resp_obj['uuid'], 'arvados#repository', true, 'Repository'
-
+ # expect vm permission link
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@vm_uuid, resp_obj['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
end
@@ -266,7 +253,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
'expecting inactive user email'
end
- test "setup user with valid email and repo as input" do
+ test "setup user with valid email and repo(ignored) as input" do
authorize_with :admin
post :setup, params: {
@@ -280,15 +267,14 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil response_object['uuid'], 'expected uuid for the new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # three extra links; system_group, group and repo perms
- verify_links_added 3
+ # added links: system_group, 'all users' group.
+ verify_links_added 2
end
test "setup user with fake vm and expect error" do
authorize_with :admin
post :setup, params: {
- repo_name: 'usertestrepo',
vm_uuid: 'no_such_vm',
user: {email: 'foo@example.com'},
}
@@ -300,11 +286,10 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
'Expected RuntimeError: No vm found for no_such_vm'
end
- test "setup user with valid email, repo and real vm as input" do
+ test "setup user with valid email and real vm as input" do
authorize_with :admin
post :setup, params: {
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid,
user: {email: 'foo@example.com'}
}
@@ -315,8 +300,8 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil response_object['uuid'], 'expected uuid for the new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # four extra links; system_group, group, vm, repo
- verify_links_added 4
+ # added links; system_group, 'all users' group, vm.
+ verify_links_added 3
end
test "setup user with valid email, no vm and no repo as input" do
@@ -332,24 +317,20 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil response_object['uuid'], 'expected uuid for new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # two extra links; system_group, and group
+ # added links; system_group, 'all users' group.
verify_links_added 2
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', response_object['uuid'], 'arvados#group', true, 'Group'
- verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
- 'foo/usertestrepo', response_object['uuid'], 'arvados#repository', true, 'Repository'
-
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
nil, response_object['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
end
- test "setup user with email, first name, repo name and vm uuid" do
+ test "setup user with email, first name, and vm uuid" do
authorize_with :admin
post :setup, params: {
- repo_name: 'usertestrepo',
vm_uuid: @vm_uuid,
user: {
first_name: 'test_first_name',
@@ -365,8 +346,8 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_equal 'test_first_name', response_object['first_name'],
'expecting first name'
- # four extra links; system_group, group, repo and vm
- verify_links_added 4
+ # added links: system_group, 'all users' group, vm.
+ verify_links_added 3
end
test "setup user with an existing user email and check different object is created" do
@@ -374,7 +355,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
inactive_user = users(:inactive)
post :setup, params: {
- repo_name: 'usertestrepo',
user: {
email: inactive_user['email']
}
@@ -387,15 +367,14 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_equal response_object['uuid'], inactive_user['uuid'],
'expected different uuid after create operation'
assert_equal inactive_user['email'], response_object['email'], 'expected given email'
- # system_group, group, and repo. No vm link.
- verify_links_added 3
+ # added links: system_group, 'all users' group.
+ verify_links_added 2
end
test "setup user with openid prefix" do
authorize_with :admin
post :setup, params: {
- repo_name: 'usertestrepo',
user: {
first_name: "in_create_test_first_name",
last_name: "test_last_name",
@@ -413,12 +392,8 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # verify links
- # three new links: system_group, repo, and 'All users' group.
- verify_links_added 3
-
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+ # added links: system_group, 'all users' group.
+ verify_links_added 2
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
@@ -427,7 +402,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
end
- test "setup user with user, vm and repo and verify links" do
+ test "setup user with user and vm, and verify links" do
authorize_with :admin
post :setup, params: {
@@ -437,7 +412,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
email: "foo@example.com"
},
vm_uuid: @vm_uuid,
- repo_name: 'usertestrepo',
}
assert_response :success
@@ -450,14 +424,11 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # four new links: system_group, repo, vm and 'All users' group link
- verify_links_added 4
+ # added links: system_group, 'all users' group, vm
+ verify_links_added 3
# system_group isn't part of the response. See User#add_system_group_permission_link
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
-
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
@@ -493,13 +464,11 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
'Expected Forbidden error'
end
- test "setup active user with repo and no vm" do
+ test "setup active user with no vm" do
authorize_with :admin
active_user = users(:active)
- # invoke setup with a repository
post :setup, params: {
- repo_name: 'usertestrepo',
uuid: active_user['uuid']
}
@@ -510,13 +479,10 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_equal active_user[:email], created['email'], 'expected input email'
- # verify links
+ # verify links
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'active/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
-
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
end
@@ -524,13 +490,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
test "setup active user with vm and no repo" do
authorize_with :admin
active_user = users(:active)
- repos_query = Repository.where(owner_uuid: active_user.uuid)
- repo_link_query = Link.where(tail_uuid: active_user.uuid,
- link_class: "permission", name: "can_manage")
- repos_count = repos_query.count
- repo_link_count = repo_link_query.count
- # invoke setup with a repository
post :setup, params: {
vm_uuid: @vm_uuid,
uuid: active_user['uuid'],
@@ -548,9 +508,6 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
- assert_equal(repos_count, repos_query.count)
- assert_equal(repo_link_count, repo_link_query.count)
-
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
end
@@ -655,7 +612,6 @@ The Arvados team.
authorize_with :admin
active_user = users(:active)
- # invoke setup with a repository
put :update, params: {
id: active_user['uuid'],
user: {
@@ -682,6 +638,19 @@ The Arvados team.
end
end
+ test "non-bool include_trash param is silently ignored" do
+ authorize_with :spectator
+ Rails.logger.expects(:warn).never
+ get(:index, params: {include_trash: '-1'})
+ end
+
+ test "select only computed field full_name" do
+ authorize_with :active
+ get :show, params: {id: users(:active).uuid, select: ["uuid", "full_name"]}
+ assert_response :success
+ assert_equal("Active User", json_response["full_name"])
+ end
+
test "non-admin user gets only safe attributes from users#show" do
g = act_as_system_user do
create :group, group_class: "role"
@@ -828,7 +797,7 @@ The Arvados team.
authorize_with :admin
remoteuser = User.create!(uuid: "zbbbb-tpzed-remotremotremot")
- tok = ApiClientAuthorization.create!(user: remoteuser, api_client: api_clients(:untrusted)).api_token
+ tok = ApiClientAuthorization.create!(user: remoteuser).api_token
auth = ApiClientAuthorization.validate(token: tok)
assert_not_nil(auth)
@@ -859,19 +828,6 @@ The Arvados team.
assert_response(422)
end
- [[:active, :project_viewer_trustedclient],
- [:active_trustedclient, :project_viewer]].each do |src, dst|
- test "refuse to merge with untrusted token (#{src} -> #{dst})" do
- authorize_with(src)
- post(:merge, params: {
- new_user_token: api_client_authorizations(dst).api_token,
- new_owner_uuid: api_client_authorizations(dst).user.uuid,
- redirect_to_new_user: true,
- })
- assert_response(403)
- end
- end
-
[[:expired_trustedclient, :project_viewer_trustedclient],
[:project_viewer_trustedclient, :expired_trustedclient]].each do |src, dst|
test "refuse to merge with expired token (#{src} -> #{dst})" do
diff --git a/services/api/test/functional/database_controller_test.rb b/services/api/test/functional/database_controller_test.rb
index ef1d0c6d05..ea44cbf453 100644
--- a/services/api/test/functional/database_controller_test.rb
+++ b/services/api/test/functional/database_controller_test.rb
@@ -40,12 +40,12 @@ class DatabaseControllerTest < ActionController::TestCase
test "reset succeeds with admin token" do
new_uuid = nil
act_as_system_user do
- new_uuid = Specimen.create.uuid
+ new_uuid = Collection.create.uuid
end
- assert_not_empty Specimen.where(uuid: new_uuid)
+ assert_not_empty Collection.where(uuid: new_uuid)
authorize_with :admin
post :reset
assert_response 200
- assert_empty Specimen.where(uuid: new_uuid)
+ assert_empty Collection.where(uuid: new_uuid)
end
end
diff --git a/services/api/test/functional/sys_controller_test.rb b/services/api/test/functional/sys_controller_test.rb
index e13d702983..c3f13cf4b8 100644
--- a/services/api/test/functional/sys_controller_test.rb
+++ b/services/api/test/functional/sys_controller_test.rb
@@ -91,12 +91,22 @@ class SysControllerTest < ActionController::TestCase
assert_not_empty Group.where('uuid=? and is_trashed=true', p.uuid)
end
+ test "trash_sweep - role groups are deleted" do
+ p = groups(:trashed_role_on_next_sweep)
+ assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)
+ assert_not_empty Link.where(uuid: links(:foo_file_readable_by_soon_to_be_trashed_role).uuid)
+ authorize_with :admin
+ post :trash_sweep
+ assert_response :success
+ assert_empty Group.where(uuid: p.uuid)
+ assert_empty Link.where(uuid: links(:foo_file_readable_by_soon_to_be_trashed_role).uuid)
+ end
+
test "trash_sweep - delete projects and their contents" do
g_foo = groups(:trashed_project)
g_bar = groups(:trashed_subproject)
g_baz = groups(:trashed_subproject3)
col = collections(:collection_in_trashed_subproject)
- job = jobs(:job_in_trashed_project)
cr = container_requests(:cr_in_trashed_project)
# Save how many objects were before the sweep
user_nr_was = User.all.length
@@ -104,15 +114,15 @@ class SysControllerTest < ActionController::TestCase
group_nr_was = Group.where('group_class<>?', 'project').length
project_nr_was = Group.where(group_class: 'project').length
cr_nr_was = ContainerRequest.all.length
- job_nr_was = Job.all.length
assert_not_empty Group.where(uuid: g_foo.uuid)
assert_not_empty Group.where(uuid: g_bar.uuid)
assert_not_empty Group.where(uuid: g_baz.uuid)
assert_not_empty Collection.where(uuid: col.uuid)
- assert_not_empty Job.where(uuid: job.uuid)
assert_not_empty ContainerRequest.where(uuid: cr.uuid)
authorize_with :admin
+ Group.find_by_uuid(g_foo.uuid).update!(delete_at: Time.now - 1.second)
+
post :trash_sweep
assert_response :success
@@ -120,16 +130,45 @@ class SysControllerTest < ActionController::TestCase
assert_empty Group.where(uuid: g_bar.uuid)
assert_empty Group.where(uuid: g_baz.uuid)
assert_empty Collection.where(uuid: col.uuid)
- assert_empty Job.where(uuid: job.uuid)
assert_empty ContainerRequest.where(uuid: cr.uuid)
# No unwanted deletions should have happened
assert_equal user_nr_was, User.all.length
assert_equal coll_nr_was-2, # collection_in_trashed_subproject
Collection.all.length # & deleted_on_next_sweep collections
- assert_equal group_nr_was, Group.where('group_class<>?', 'project').length
+ assert_equal group_nr_was-1, # trashed_role_on_next_sweep
+ Group.where('group_class<>?', 'project').length
assert_equal project_nr_was-3, Group.where(group_class: 'project').length
assert_equal cr_nr_was-1, ContainerRequest.all.length
- assert_equal job_nr_was-1, Job.all.length
end
+ test "trash_sweep - delete unused uuid_locks" do
+ uuid_active = "zzzzz-zzzzz-uuidlockstest11"
+ uuid_inactive = "zzzzz-zzzzz-uuidlockstest00"
+
+ ready = Queue.new
+ insertsql = "INSERT INTO uuid_locks (uuid) VALUES ($1) ON CONFLICT (uuid) do UPDATE SET n = uuid_locks.n+1"
+ url = ENV["DATABASE_URL"].sub(/\?.*/, '')
+ Thread.new do
+ conn = PG::Connection.new(url)
+ conn.exec_params(insertsql, [uuid_active])
+ conn.exec_params(insertsql, [uuid_inactive])
+ conn.transaction do |conn|
+ conn.exec_params(insertsql, [uuid_active])
+ ready << true
+ # If we keep this transaction open while trash_sweep runs, the
+ # uuid_active row shouldn't get deleted.
+ sleep 10
+ rescue
+ # Unblock main thread
+ ready << false
+ raise
+ end
+ end
+ assert_equal true, ready.pop
+ authorize_with :admin
+ post :trash_sweep
+ rows = ActiveRecord::Base.connection.exec_query("SELECT uuid FROM uuid_locks ORDER BY uuid", "", []).rows
+ assert_includes(rows, [uuid_active], "row with active lock (still held by thread) should not have been deleted")
+ refute_includes(rows, [uuid_inactive], "row with inactive lock should have been deleted")
+ end
end
diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb
deleted file mode 100644
index cb30f68015..0000000000
--- a/services/api/test/helpers/git_test_helper.rb
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'fileutils'
-require 'tmpdir'
-
-# Commit log for "foo" repository in test.git.tar
-# main is the main branch
-# b1 is a branch off of main
-# tag1 is a tag
-#
-# 1de84a8 * b1
-# 077ba2a * main
-# 4fe459a * tag1
-# 31ce37f * foo
-
-module GitTestHelper
- def self.included base
- base.setup do
- # Extract the test repository data into the default test
- # environment's Rails.configuration.Git.Repositories. (We
- # don't use that config setting here, though: it doesn't seem
- # worth the risk of stepping on a real git repo root.)
- @tmpdir = Rails.root.join 'tmp', 'git'
- FileUtils.mkdir_p @tmpdir
- system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
- Rails.configuration.Git.Repositories = "#{@tmpdir}/test"
- Rails.configuration.Containers.JobsAPI.GitInternalDir = "#{@tmpdir}/internal.git"
- end
-
- base.teardown do
- FileUtils.remove_entry CommitsHelper.cache_dir_base, true
- FileUtils.mkdir_p @tmpdir
- system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
- end
- end
-
- def internal_tag tag
- IO.read "|git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
- end
-
- # Intercept fetch_remote_repository and fetch from a specified url
- # or local fixture instead of the remote url requested. fakeurl can
- # be a url (probably starting with file:///) or the name of a
- # fixture (as a symbol)
- def fetch_remote_from_local_repo url, fakeurl
- if fakeurl.is_a? Symbol
- fakeurl = 'file://' + repositories(fakeurl).server_path
- end
- CommitsHelper.expects(:fetch_remote_repository).once.with do |gitdir, giturl|
- if giturl == url
- CommitsHelper.unstub(:fetch_remote_repository)
- CommitsHelper.fetch_remote_repository gitdir, fakeurl
- true
- end
- end
- end
-end
diff --git a/services/api/test/helpers/users_test_helper.rb b/services/api/test/helpers/users_test_helper.rb
index e106d994cd..c4dc72d3ba 100644
--- a/services/api/test/helpers/users_test_helper.rb
+++ b/services/api/test/helpers/users_test_helper.rb
@@ -54,14 +54,10 @@ module UsersTestHelper
# these don't get added any more! they shouldn't appear ever.
assert !oid_login_perms.any?, "expected all oid_login_perms deleted"
+ # these don't get added any more! they shouldn't appear ever.
repo_perms = Link.where(tail_uuid: uuid,
- link_class: 'permission',
- name: 'can_manage').where("head_uuid like ?", Repository.uuid_like_pattern)
- if expect_repo_perms
- assert repo_perms.any?, "expected repo_perms"
- else
- assert !repo_perms.any?, "expected all repo_perms deleted"
- end
+ link_class: 'permission').where("head_uuid like ?", '_____-s0uqq-_______________')
+ assert !repo_perms.any?, "expected all repo_perms deleted"
vm_login_perms = Link.
where(tail_uuid: uuid,
diff --git a/services/api/test/integration/api_client_authorizations_api_test.rb b/services/api/test/integration/api_client_authorizations_api_test.rb
index 1b5c563962..09e0395097 100644
--- a/services/api/test/integration/api_client_authorizations_api_test.rb
+++ b/services/api/test/integration/api_client_authorizations_api_test.rb
@@ -12,19 +12,20 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
test "create system auth" do
post "/arvados/v1/api_client_authorizations/create_system_auth",
params: {:format => :json, :scopes => ['test'].to_json},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:admin_trustedclient).api_token}"}
assert_response :success
end
- [:admin_trustedclient, :SystemRootToken].each do |tk|
- test "create token for different user using #{tk}" do
- if tk == :SystemRootToken
- token = "xyzzy-SystemRootToken"
- Rails.configuration.SystemRootToken = token
- else
- token = api_client_authorizations(tk).api_token
- end
-
+ [
+ [true, :active, 403],
+ [true, :admin, 200],
+ [true, :system_user, 200],
+ [false, :active, 403],
+ [false, :admin, 403],
+ [false, :system_user, 200],
+ ].each do |issue_trusted_tokens, tk, expect_response|
+ test "create token for different user using #{tk} with IssueTrustedTokens=#{issue_trusted_tokens}" do
+ Rails.configuration.Login.IssueTrustedTokens = issue_trusted_tokens
post "/arvados/v1/api_client_authorizations",
params: {
:format => :json,
@@ -32,12 +33,14 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
:owner_uuid => users(:spectator).uuid
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
- assert_response :success
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(tk).api_token}"}
+
+ assert_response expect_response
+ return if expect_response >= 300
get "/arvados/v1/users/current",
params: {:format => :json},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{json_response['api_token']}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{json_response['api_token']}"}
@json_response = nil
assert_equal json_response['uuid'], users(:spectator).uuid
end
@@ -48,22 +51,10 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
Rails.configuration.SystemRootToken = token
get "/arvados/v1/users/current",
params: {:format => :json},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{token}"}
assert_equal json_response['uuid'], system_user_uuid
end
- test "refuse to create token for different user if not trusted client" do
- post "/arvados/v1/api_client_authorizations",
- params: {
- :format => :json,
- :api_client_authorization => {
- :owner_uuid => users(:spectator).uuid
- }
- },
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
- assert_response 403
- end
-
test "refuse to create token for different user if not admin" do
post "/arvados/v1/api_client_authorizations",
params: {
@@ -72,7 +63,7 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
:owner_uuid => users(:spectator).uuid
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active_trustedclient).api_token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:active_trustedclient).api_token}"}
assert_response 403
end
@@ -92,7 +83,7 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
:expires_at => desired_expiration,
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{token}"}
assert_response 200
expiration_t = json_response['expires_at'].to_time
if admin && desired_expiration
@@ -112,7 +103,7 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
:expires_at => desired_expiration
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{token}"}
assert_response 200
expiration_t = json_response['expires_at'].to_time
if admin && desired_expiration
diff --git a/services/api/test/integration/api_client_authorizations_scopes_test.rb b/services/api/test/integration/api_client_authorizations_scopes_test.rb
index 3b28a3163f..93e5b42a21 100644
--- a/services/api/test/integration/api_client_authorizations_scopes_test.rb
+++ b/services/api/test/integration/api_client_authorizations_scopes_test.rb
@@ -44,15 +44,15 @@ class ApiTokensScopeTest < ActionDispatch::IntegrationTest
assert_response 403
end
- test "specimens token can see exactly owned specimens" do
- get_args = {params: {}, headers: auth(:active_specimens)}
- get(v1_url('specimens'), **get_args)
+ test "collections token can see exactly owned collections" do
+ get_args = {params: {}, headers: auth(:active_all_collections)}
+ get(v1_url('collections'), **get_args)
assert_response 403
- get(v1_url('specimens', specimens(:owned_by_active_user).uuid), **get_args)
+ get(v1_url('collections', collections(:collection_owned_by_active).uuid), **get_args)
assert_response :success
- head(v1_url('specimens', specimens(:owned_by_active_user).uuid), **get_args)
+ head(v1_url('collections', collections(:collection_owned_by_active).uuid), **get_args)
assert_response :success
- get(v1_url('specimens', specimens(:owned_by_spectator).uuid), **get_args)
+ get(v1_url('collections', collections(:collection_owned_by_foo).uuid), **get_args)
assert_includes(403..404, @response.status)
end
@@ -70,7 +70,7 @@ class ApiTokensScopeTest < ActionDispatch::IntegrationTest
token_count = get_token_count
# Test the POST scope.
post(v1_url('api_client_authorizations'),
- params: {api_client_authorization: {user_id: users(:active).id}},
+ params: {api_client_authorization: {owner_uuid: users(:active).uuid}},
headers: auth(:active_apitokens))
assert_response :success
assert_equal(token_count + 1, get_token_count,
diff --git a/services/api/test/integration/bundler_version_test.rb b/services/api/test/integration/bundler_version_test.rb
new file mode 100644
index 0000000000..fb1634cf90
--- /dev/null
+++ b/services/api/test/integration/bundler_version_test.rb
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class BundlerVersionTest < ActionDispatch::IntegrationTest
+ test "Bundler version matches expectations" do
+ # The expected version range should be the latest that supports all the
+ # versions of Ruby we intend to support. This test checks that a developer
+ # doesn't accidentally update Bundler past that point.
+ expected = Gem::Dependency.new("", "~> 2.4.22")
+ actual = Bundler.gem_version
+ assert(
+ expected.match?("", actual),
+ "Bundler version #{actual} did not match #{expected}",
+ )
+ end
+end
diff --git a/services/api/test/integration/computed_permissions_test.rb b/services/api/test/integration/computed_permissions_test.rb
new file mode 100644
index 0000000000..7ae6ac02a4
--- /dev/null
+++ b/services/api/test/integration/computed_permissions_test.rb
@@ -0,0 +1,66 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ComputedPermissionsTest < ActionDispatch::IntegrationTest
+ include DbCurrentTime
+ fixtures :users, :groups, :api_client_authorizations, :collections
+
+ test "non-admin forbidden" do
+ get "/arvados/v1/computed_permissions",
+ params: {:format => :json},
+ headers: auth(:active)
+ assert_response 403
+ end
+
+ test "admin get permission for specified user" do
+ get "/arvados/v1/computed_permissions",
+ params: {
+ :format => :json,
+ :filters => [['user_uuid', '=', users(:active).uuid]].to_json,
+ },
+ headers: auth(:admin)
+ assert_response :success
+ assert_equal users(:active).uuid, json_response['items'][0]['user_uuid']
+ assert_nil json_response['count']
+ end
+
+ test "admin get implicit permission for specified user and target" do
+ get "/arvados/v1/computed_permissions",
+ params: {
+ :format => :json,
+ :filters => [
+ ['user_uuid', '=', users(:active).uuid],
+ ['target_uuid', '=', groups(:private).uuid],
+ ].to_json,
+ },
+ headers: auth(:admin)
+ assert_response :success
+ assert_equal 1, json_response['items'].length
+ assert_equal users(:active).uuid, json_response['items'][0]['user_uuid']
+ assert_equal groups(:private).uuid, json_response['items'][0]['target_uuid']
+ assert_equal 'can_manage', json_response['items'][0]['perm_level']
+ end
+
+ test "reject count=exact" do
+ get "/arvados/v1/computed_permissions",
+ params: {
+ :format => :json,
+ :count => 'exact',
+ },
+ headers: auth(:admin)
+ assert_response 422
+ end
+
+ test "reject offset>0" do
+ get "/arvados/v1/computed_permissions",
+ params: {
+ :format => :json,
+ :offset => 7,
+ },
+ headers: auth(:admin)
+ assert_response 422
+ end
+end
diff --git a/services/api/test/integration/container_auth_test.rb b/services/api/test/integration/container_auth_test.rb
index 2a1ebabc0a..7352b93da6 100644
--- a/services/api/test/integration/container_auth_test.rb
+++ b/services/api/test/integration/container_auth_test.rb
@@ -31,7 +31,7 @@ class ContainerAuthTest < ActionDispatch::IntegrationTest
:format => :json,
:container => {:state => "Cancelled"}
},
- headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:dispatch1).token}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:system_user).token}"}
assert_response :success
get "/arvados/v1/containers/current",
params: {:format => :json},
diff --git a/services/api/test/integration/container_request_test.rb b/services/api/test/integration/container_request_test.rb
index 26cc081a65..245a992eb7 100644
--- a/services/api/test/integration/container_request_test.rb
+++ b/services/api/test/integration/container_request_test.rb
@@ -29,7 +29,7 @@ class ContainerRequestIntegrationTest < ActionDispatch::IntegrationTest
}
}.to_json,
headers: {
- 'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}",
+ 'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:active).api_token}",
'CONTENT_TYPE' => 'application/json'
}
assert_response :success
diff --git a/services/api/test/integration/credentials_test.rb b/services/api/test/integration/credentials_test.rb
new file mode 100644
index 0000000000..4d8ee6fc95
--- /dev/null
+++ b/services/api/test/integration/credentials_test.rb
@@ -0,0 +1,305 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CredentialsApiTest < ActionDispatch::IntegrationTest
+ fixtures :all
+
+ def credential_create_helper
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+2.weeks
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ json_response
+ end
+
+ test "credential create and query" do
+ jr = credential_create_helper
+
+ # fields other than secret is are returned by the API
+ assert_equal "test credential", jr["name"]
+ assert_equal "the credential for test", jr["description"]
+ assert_equal "basic_auth", jr["credential_class"]
+ assert_equal "my_username", jr["external_id"]
+ assert_nil jr["secret"]
+
+ # secret is not returned by the API
+ get "/arvados/v1/credentials/#{jr['uuid']}", headers: auth(:active)
+ assert_response :success
+ jr = json_response
+ assert_equal "test credential", jr["name"]
+ assert_equal "the credential for test", jr["description"]
+ assert_equal "basic_auth", jr["credential_class"]
+ assert_equal "my_username", jr["external_id"]
+ assert_nil jr["secret"]
+
+ # can get credential from the database and it has the password
+ assert_equal "my_password", Credential.find_by_uuid(jr["uuid"]).secret
+
+ # secret cannot appear in queries
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :filters => [["secret", "=", "my_password"]].to_json,
+ },
+ headers: auth(:active)
+ assert_response 403
+ assert_match(/Cannot filter on 'secret'/, json_response["errors"][0])
+
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :where => {secret: "my_password"}.to_json
+ },
+ headers: auth(:active)
+ assert_response 403
+ assert_match(/Cannot use 'secret' in where clause/, json_response["errors"][0])
+
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :order => ["secret"].to_json
+ },
+ headers: auth(:active)
+ assert_response 403
+ assert_match(/Cannot order by 'secret'/, json_response["errors"][0])
+
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :where => {any: "my_password"}.to_json
+ },
+ headers: auth(:active)
+ assert_response 200
+ assert_equal [], json_response["items"]
+
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :filters => [["any", "=", "my_password"]].to_json
+ },
+ headers: auth(:active)
+ assert_response 200
+ assert_equal [], json_response["items"]
+
+ get "/arvados/v1/credentials",
+ params: {:format => :json,
+ :filters => [["any", "ilike", "my_pass%"]].to_json
+ },
+ headers: auth(:active)
+ assert_response 200
+ assert_equal [], json_response["items"]
+
+ end
+
+ test "credential fetch by container" do
+ jr = credential_create_helper
+
+ # cannot fetch secret using a regular token
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret", headers: auth(:active)
+ assert_response 403
+
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response :success
+ assert_equal "my_password", json_response["secret"]
+
+ lg = Log.where(object_uuid: jr['uuid'], event_type: "secret_access").first
+ assert_equal jr["name"], lg["properties"]["name"]
+ assert_equal jr["credential_class"], lg["properties"]["credential_class"]
+ assert_equal jr["external_id"], lg["properties"]["external_id"]
+ end
+
+ test "credential owned by admin" do
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+2.weeks
+ }
+ },
+ headers: auth(:admin),
+ as: :json
+ assert_response :success
+ jr = json_response
+
+ # cannot fetch secret using a regular token, even by admin
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret", headers: auth(:admin)
+ assert_response 403
+
+ # user 'active' can't see it
+ get "/arvados/v1/credentials/#{jr['uuid']}", headers: auth(:active)
+ assert_response 404
+
+ # not readable by container run by 'active' user returns a 404
+ # here like the previous check because the credential itself isn't
+ # considered visible to the user
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response 404
+ end
+
+ test "credential sharing" do
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+2.weeks
+ }
+ },
+ headers: auth(:admin),
+ as: :json
+ assert_response :success
+ jr = json_response
+
+ # user 'active' can't see it
+ get "/arvados/v1/credentials/#{jr['uuid']}", headers: auth(:active)
+ assert_response 404
+
+ # not readable by container run by 'active' user returns a 404
+ # here like the previous check because the credential itself isn't
+ # considered visible to the user
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response 404
+
+ # active user can't share
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:active).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: jr["uuid"],
+ properties: {}
+ }
+ },
+ headers: auth(:active)
+ assert_response 422
+
+ # admin can share
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:active).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: jr["uuid"],
+ properties: {}
+ }
+ },
+ headers: auth(:admin)
+ assert_response :success
+
+ # now the 'active' user can read it
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response :success
+ end
+
+ test "credential expiration" do
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+5.seconds
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ jr = json_response
+
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response :success
+ assert_equal "my_username", json_response["external_id"]
+ assert_equal "my_password", json_response["secret"]
+
+ assert_equal "my_password", Credential.find_by_uuid(jr["uuid"]).secret
+
+ Credential.where(uuid: jr["uuid"]).update_all(expires_at: Time.now)
+
+ get "/arvados/v1/credentials/#{jr['uuid']}/secret",
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+ assert_response 403
+ assert_match(/Credential has expired/, json_response["errors"][0])
+
+ post "/sys/trash_sweep",
+ headers: auth(:admin)
+ assert_response :success
+
+ assert_equal "", Credential.find_by_uuid(jr["uuid"]).secret
+ end
+
+ test "credential names are unique" do
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+2.weeks
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password",
+ expires_at: Time.now+2.weeks
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response 422
+ assert_match(/RecordNotUnique/, json_response["errors"][0])
+ end
+
+ test "credential expires_at must be set" do
+ post "/arvados/v1/credentials",
+ params: {:format => :json,
+ credential: {
+ name: "test credential",
+ description: "the credential for test",
+ credential_class: "basic_auth",
+ external_id: "my_username",
+ secret: "my_password"
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response 422
+ assert_match(/NotNullViolation/, json_response["errors"][0])
+ end
+end
diff --git a/services/api/test/integration/database_reset_test.rb b/services/api/test/integration/database_reset_test.rb
index 7015453a9a..aa778dbf9f 100644
--- a/services/api/test/integration/database_reset_test.rb
+++ b/services/api/test/integration/database_reset_test.rb
@@ -31,22 +31,22 @@ class DatabaseResetTest < ActionDispatch::IntegrationTest
post '/database/reset', params: {}, headers: admin_auth
assert_response :success
- post '/arvados/v1/specimens', params: {specimen: '{}'}, headers: active_auth
+ post '/arvados/v1/collections', params: {collection: '{}'}, headers: active_auth
assert_response :success
new_uuid = json_response['uuid']
- get '/arvados/v1/specimens/'+new_uuid, params: {}, headers: active_auth
+ get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth
assert_response :success
- put('/arvados/v1/specimens/'+new_uuid,
- params: {specimen: '{"properties":{}}'},
+ put('/arvados/v1/collections/'+new_uuid,
+ params: {collection: '{"properties":{}}'},
headers: active_auth)
assert_response :success
- delete '/arvados/v1/specimens/'+new_uuid, params: {}, headers: active_auth
+ delete '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth
assert_response :success
- get '/arvados/v1/specimens/'+new_uuid, params: {}, headers: active_auth
+ get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth
assert_response 404
end
@@ -54,14 +54,14 @@ class DatabaseResetTest < ActionDispatch::IntegrationTest
active_auth = auth(:active)
admin_auth = auth(:admin)
- old_uuid = specimens(:owned_by_active_user).uuid
+ old_uuid = collections(:collection_owned_by_active).uuid
authorize_with :admin
post '/database/reset', params: {}, headers: admin_auth
assert_response :success
- delete '/arvados/v1/specimens/' + old_uuid, params: {}, headers: active_auth
+ delete '/arvados/v1/collections/' + old_uuid, params: {}, headers: active_auth
assert_response :success
- post '/arvados/v1/specimens', params: {specimen: '{}'}, headers: active_auth
+ post '/arvados/v1/collections', params: {collection: '{}'}, headers: active_auth
assert_response :success
new_uuid = json_response['uuid']
@@ -69,10 +69,10 @@ class DatabaseResetTest < ActionDispatch::IntegrationTest
post '/database/reset', params: {}, headers: admin_auth
assert_response :success
- # New specimen should disappear. Old specimen should reappear.
- get '/arvados/v1/specimens/'+new_uuid, params: {}, headers: active_auth
+ # New collection should disappear. Old collection should reappear.
+ get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth
assert_response 404
- get '/arvados/v1/specimens/'+old_uuid, params: {}, headers: active_auth
+ get '/arvados/v1/collections/'+old_uuid, params: {}, headers: active_auth
assert_response :success
end
end
diff --git a/services/api/test/integration/discovery_document_test.rb b/services/api/test/integration/discovery_document_test.rb
index 37e7750297..5785fee469 100644
--- a/services/api/test/integration/discovery_document_test.rb
+++ b/services/api/test/integration/discovery_document_test.rb
@@ -32,27 +32,63 @@ class DiscoveryDocumentTest < ActionDispatch::IntegrationTest
missing = canonical.select { |key| canonical[key].nil? }
assert(missing.empty?, "discovery document missing required fields")
actual_json = JSON.pretty_generate(canonical)
+ # Check committed copies of the discovery document that support code or
+ # documentation generation for other Arvados components.
+ bad_copies = [
+ "sdk/python/arvados-v1-discovery.json",
+ "sdk/R/arvados-v1-discovery.json",
+ ].filter_map do |rel_path|
+ src_path = Rails.root.join("..", "..", rel_path)
+ begin
+ expected_json = File.open(src_path) { |f| f.read }
+ rescue Errno::ENOENT
+ expected_json = "(#{src_path} not found)"
+ end
+ if expected_json == actual_json
+ nil
+ else
+ src_path
+ end
+ end.to_a
+ if bad_copies.any?
+ out_path = Rails.root.join("tmp", "test-arvados-v1-discovery.json")
+ File.open(out_path, "w") { |f| f.write(actual_json) }
+ end
+ assert_equal([], bad_copies,
+ "Live discovery document did not match the copies at:\n" +
+ bad_copies.map { |path| " * #{path}\n" }.join("") +
+ "If the live version is correct, copy it to these paths by running:\n" +
+ bad_copies.map { |path| " cp #{out_path} #{path}\n"}.join(""))
+ end
- # Currently the Python SDK is the only component using this copy of the
- # discovery document, and storing it with the source simplifies the build
- # process, so it lives there. If another component wants to use it later,
- # we might consider moving it to a more general subdirectory, but then the
- # Python build process will need to be extended to accommodate that.
- src_path = Rails.root.join("../../sdk/python/arvados-v1-discovery.json")
- begin
- expected_json = File.open(src_path) { |f| f.read }
- rescue Errno::ENOENT
- expected_json = "(#{src_path} not found)"
+ test "all methods have full descriptions" do
+ get "/discovery/v1/apis/arvados/v1/rest"
+ assert_response :success
+ missing = []
+ def missing.check(name, key, spec)
+ self << "#{name} #{key}" if spec[key].blank?
end
- out_path = Rails.root.join("tmp", "test-arvados-v1-discovery.json")
- if expected_json != actual_json
- File.open(out_path, "w") { |f| f.write(actual_json) }
+ Enumerator::Chain.new(
+ *json_response["resources"].map { |_, res| res["methods"].each_value }
+ ).each do |method|
+ method_name = method["id"]
+ missing.check(method_name, "description", method)
+ method["parameters"].andand.each_pair do |param_name, param|
+ missing.check("#{method_name} #{param_name} parameter", "description", param)
+ end
+ end
+
+ json_response["schemas"].each_pair do |schema_name, schema|
+ missing.check(schema_name, "description", schema)
+ schema["properties"].andand.each_pair do |prop_name, prop|
+ missing.check("#{schema_name} #{prop_name} property", "description", prop)
+ end
end
- assert_equal(expected_json, actual_json, [
- "#{src_path} did not match the live discovery document",
- "Current live version saved to #{out_path}",
- "Commit that to #{src_path} to regenerate documentation",
- ].join(". "))
+
+ assert_equal(
+ missing, [],
+ "named methods and schemas are missing documentation",
+ )
end
end
diff --git a/services/api/test/integration/groups_test.rb b/services/api/test/integration/groups_test.rb
index e76f2b5406..22a9ab8e76 100644
--- a/services/api/test/integration/groups_test.rb
+++ b/services/api/test/integration/groups_test.rb
@@ -140,7 +140,7 @@ class GroupsTest < ActionDispatch::IntegrationTest
test 'count none works with offset' do
first_results = nil
- (0..10).each do |offset|
+ (0..5).each do |offset|
get "/arvados/v1/groups/contents", params: {
id: groups(:aproject).uuid,
offset: offset,
@@ -152,11 +152,32 @@ class GroupsTest < ActionDispatch::IntegrationTest
assert_nil json_response['items_available']
if first_results.nil?
first_results = json_response['items']
+ # should get back at least two different kinds of objects, to
+ # test offset paging properly.
+ kinds = first_results.map { |i| i['kind'] }
+ assert_equal 2, kinds.uniq.length
else
assert_equal first_results[offset]['uuid'], json_response['items'][0]['uuid']
end
end
end
+
+ test "group contents with include=array" do
+ get "/arvados/v1/groups/contents",
+ params: {
+ filters: [["uuid", "is_a", "arvados#container_request"]].to_json,
+ include: ["container_uuid"].to_json,
+ select: ["uuid", "state"],
+ limit: 1000,
+ },
+ headers: auth(:active)
+ assert_response 200
+ incl = {}
+ json_response['included'].each { |i| incl[i['uuid']] = i }
+ json_response['items'].each do |c|
+ assert_not_nil incl[c['container_uuid']]['state']
+ end
+ end
end
class NonTransactionalGroupsTest < ActionDispatch::IntegrationTest
diff --git a/services/api/test/integration/jobs_api_test.rb b/services/api/test/integration/jobs_api_test.rb
deleted file mode 100644
index 76d4fff59e..0000000000
--- a/services/api/test/integration/jobs_api_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class JobsApiTest < ActionDispatch::IntegrationTest
-end
diff --git a/services/api/test/integration/logging_test.rb b/services/api/test/integration/logging_test.rb
index cbf9681d47..10dfe0eb23 100644
--- a/services/api/test/integration/logging_test.rb
+++ b/services/api/test/integration/logging_test.rb
@@ -3,6 +3,7 @@
# SPDX-License-Identifier: AGPL-3.0
require 'stringio'
+require 'test_helper'
class LoggingTest < ActionDispatch::IntegrationTest
fixtures :collections
@@ -12,17 +13,14 @@ class LoggingTest < ActionDispatch::IntegrationTest
logcopy = ActiveSupport::Logger.new(buf)
logcopy.level = :info
begin
- Rails.logger.extend(ActiveSupport::Logger.broadcast(logcopy))
+ Rails.logger.broadcast_to(logcopy)
get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
params: {:format => :json},
headers: auth(:active).merge({ 'X-Request-Id' => 'req-aaaaaaaaaaaaaaaaaaaa' })
assert_response :success
assert_match /^{.*"request_id":"req-aaaaaaaaaaaaaaaaaaaa"/, buf.string
ensure
- # We don't seem to have an "unbroadcast" option, so this is how
- # we avoid filling buf with unlimited logs from subsequent
- # tests.
- logcopy.level = :fatal
+ Rails.logger.broadcasts.delete(logcopy)
end
end
end
diff --git a/services/api/test/integration/login_workflow_test.rb b/services/api/test/integration/login_workflow_test.rb
index ba3b2ac6e3..7ad95ceebf 100644
--- a/services/api/test/integration/login_workflow_test.rb
+++ b/services/api/test/integration/login_workflow_test.rb
@@ -6,8 +6,8 @@ require 'test_helper'
class LoginWorkflowTest < ActionDispatch::IntegrationTest
test "default prompt to login is JSON" do
- post('/arvados/v1/specimens',
- params: {specimen: {}},
+ post('/arvados/v1/collections',
+ params: {collection: {}},
headers: {'HTTP_ACCEPT' => ''})
assert_response 401
json_response['errors'].each do |err|
@@ -16,8 +16,8 @@ class LoginWorkflowTest < ActionDispatch::IntegrationTest
end
test "login prompt respects JSON Accept header" do
- post('/arvados/v1/specimens',
- params: {specimen: {}},
+ post('/arvados/v1/collections',
+ params: {collection: {}},
headers: {'HTTP_ACCEPT' => 'application/json'})
assert_response 401
json_response['errors'].each do |err|
@@ -26,8 +26,8 @@ class LoginWorkflowTest < ActionDispatch::IntegrationTest
end
test "login prompt respects HTML Accept header" do
- post('/arvados/v1/specimens',
- params: {specimen: {}},
+ post('/arvados/v1/collections',
+ params: {collection: {}},
headers: {'HTTP_ACCEPT' => 'text/html'})
assert_response 302
assert_match(%r{http://www.example.com/login$}, @response.headers['Location'],
diff --git a/services/api/test/integration/noop_deep_munge_test.rb b/services/api/test/integration/noop_deep_munge_test.rb
index a94898ba22..822e38e610 100644
--- a/services/api/test/integration/noop_deep_munge_test.rb
+++ b/services/api/test/integration/noop_deep_munge_test.rb
@@ -37,7 +37,7 @@ class NoopDeepMungeTest < ActionDispatch::IntegrationTest
}
}.to_json,
headers: {
- 'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}",
+ 'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:admin).api_token}",
'CONTENT_TYPE' => 'application/json'
}
assert_response :success
diff --git a/services/api/test/integration/passenger_config_test.rb b/services/api/test/integration/passenger_config_test.rb
new file mode 100644
index 0000000000..94627830d0
--- /dev/null
+++ b/services/api/test/integration/passenger_config_test.rb
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PassengerConfigTest < ActionDispatch::IntegrationTest
+ def setup
+ super
+ @passenger_config ||= File.open(Rails.root.join("Passengerfile.json")) do |f|
+ JSON.parse(f)
+ end
+ end
+
+ test "Passenger disables exception extension gems" do
+ # For security, consistency, and performance reasons, we do not want these
+ # gems to extend exception messages included in API error responses.
+ begin
+ rubyopt = @passenger_config["envvars"]["RUBYOPT"].split
+ rescue NoMethodError, TypeError
+ rubyopt = [""]
+ end
+ assert_includes(rubyopt, "--disable-did_you_mean")
+ assert_includes(rubyopt, "--disable-error_highlight")
+ assert_includes(rubyopt, "--disable-syntax_suggest")
+ end
+end
diff --git a/services/api/test/integration/permissions_test.rb b/services/api/test/integration/permissions_test.rb
index d2dce44f01..a2cfbb6a19 100644
--- a/services/api/test/integration/permissions_test.rb
+++ b/services/api/test/integration/permissions_test.rb
@@ -273,6 +273,119 @@ class PermissionsTest < ActionDispatch::IntegrationTest
assert_response 404
end
+ test "adding can_read links from group to collection, user to group, then trash group" do
+ # try to read collection as spectator
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response 404
+
+ # add permission for group to read collection
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: groups(:private_role).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: collections(:foo_file).uuid,
+ properties: {}
+ }
+ },
+ headers: auth(:admin)
+ assert_response :success
+
+ # try to read collection as spectator
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response 404
+
+ # add permission for spectator to read group
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:spectator).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: groups(:private_role).uuid,
+ properties: {}
+ }
+ },
+ headers: auth(:admin)
+ u = json_response['uuid']
+ assert_response :success
+
+ # try to read collection as spectator
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response :success
+
+ # put the group in the trash, this should keep the group members
+ # but delete the permissions.
+ post "/arvados/v1/groups/#{groups(:private_role).uuid}/trash",
+ params: {:format => :json},
+ headers: auth(:admin)
+ assert_response :success
+
+ # try to read collection as spectator, should fail now
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response 404
+
+ # should not be able to grant permission to a trashed group
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: groups(:private_role).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: collections(:foo_file).uuid,
+ properties: {}
+ }
+ },
+ headers: auth(:admin)
+ assert_response 422
+
+ # can't take group out of the trash
+ post "/arvados/v1/groups/#{groups(:private_role).uuid}/untrash",
+ params: {:format => :json},
+ headers: auth(:admin)
+ assert_response 422
+
+ # when a role group is untrashed the permissions don't
+ # automatically come back
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response 404
+
+ # can't add permission for group to read collection either
+ post "/arvados/v1/links",
+ params: {
+ :format => :json,
+ :link => {
+ tail_uuid: groups(:private_role).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: collections(:foo_file).uuid,
+ properties: {}
+ }
+ },
+ headers: auth(:admin)
+ assert_response 422
+
+ # still can't read foo file
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}",
+ params: {:format => :json},
+ headers: auth(:spectator)
+ assert_response 404
+ end
+
test "read-only group-admin cannot modify administered user" do
put "/arvados/v1/users/#{users(:active).uuid}",
params: {
@@ -302,26 +415,29 @@ class PermissionsTest < ActionDispatch::IntegrationTest
assert_response 404
end
- test "RO group-admin finds user's specimens, RW group-admin can update" do
+ test "RO group-admin finds user's collections, RW group-admin can update" do
+ other_user_collection = act_as_user(users(:user_foo_in_sharing_group)) do
+ Collection.create()
+ end
[[:rominiadmin, false],
[:miniadmin, true]].each do |which_user, update_should_succeed|
- get "/arvados/v1/specimens",
+ get "/arvados/v1/collections",
params: {:format => :json},
headers: auth(which_user)
assert_response :success
resp_uuids = json_response['items'].collect { |i| i['uuid'] }
- [[true, specimens(:owned_by_active_user).uuid],
- [true, specimens(:owned_by_private_group).uuid],
- [false, specimens(:owned_by_spectator).uuid],
+ [[true, collections(:collection_owned_by_active).uuid],
+ [true, collections(:foo_collection_in_aproject).uuid],
+ [false, other_user_collection.uuid],
].each do |should_find, uuid|
assert_equal(should_find, !resp_uuids.index(uuid).nil?,
- "%s should%s see %s in specimen list" %
+ "%s should%s see %s in collection list" %
[which_user.to_s,
- should_find ? '' : 'not ',
+ should_find ? '' : ' not',
uuid])
- put "/arvados/v1/specimens/#{uuid}",
+ put "/arvados/v1/collections/#{uuid}",
params: {
- :specimen => {
+ :collection => {
properties: {
miniadmin_was_here: true
}
diff --git a/services/api/test/integration/reader_tokens_test.rb b/services/api/test/integration/reader_tokens_test.rb
index e8e8c910c7..891bffbb1d 100644
--- a/services/api/test/integration/reader_tokens_test.rb
+++ b/services/api/test/integration/reader_tokens_test.rb
@@ -7,20 +7,20 @@ require 'test_helper'
class ReaderTokensTest < ActionDispatch::IntegrationTest
fixtures :all
- def spectator_specimen
- specimens(:owned_by_spectator).uuid
+ def owned_by_foo
+ collections(:collection_owned_by_foo).uuid
end
- def get_specimens(main_auth, read_auth, formatter=:to_a)
+ def get_collections(main_auth, read_auth, formatter=:to_a)
params = {}
params[:reader_tokens] = [api_token(read_auth)].send(formatter) if read_auth
headers = {}
headers.merge!(auth(main_auth)) if main_auth
- get('/arvados/v1/specimens', params: params, headers: headers)
+ get('/arvados/v1/collections', params: params, headers: headers)
end
- def get_specimen_uuids(main_auth, read_auth, formatter=:to_a)
- get_specimens(main_auth, read_auth, formatter)
+ def get_collection_uuids(main_auth, read_auth, formatter=:to_a)
+ get_collections(main_auth, read_auth, formatter)
assert_response :success
json_response['items'].map { |spec| spec['uuid'] }
end
@@ -33,26 +33,26 @@ class ReaderTokensTest < ActionDispatch::IntegrationTest
headers = {}
expected = 401
end
- post('/arvados/v1/specimens.json',
- params: {specimen: {}, reader_tokens: [api_token(read_auth)].send(formatter)},
+ post('/arvados/v1/collections.json',
+ params: {collection: {}, reader_tokens: [api_token(read_auth)].send(formatter)},
headers: headers)
assert_response expected
end
- test "active user can't see spectator specimen" do
+ test "active user can't see foo-owned collection" do
# Other tests in this suite assume that the active user doesn't
- # have read permission to the owned_by_spectator specimen.
+ # have read permission to the owned_by_foo collection.
# This test checks that this assumption still holds.
- refute_includes(get_specimen_uuids(:active, nil), spectator_specimen,
- ["active user can read the owned_by_spectator specimen",
+ refute_includes(get_collection_uuids(:active, nil), owned_by_foo,
+ ["active user can read the owned_by_foo collection",
"other tests will return false positives"].join(" - "))
end
[nil, :active_noscope].each do |main_auth|
- [:spectator, :spectator_specimens].each do |read_auth|
+ [:foo, :foo_collections].each do |read_auth|
[:to_a, :to_json].each do |formatter|
test "#{main_auth.inspect} auth with #{formatter} reader token #{read_auth} can#{"'t" if main_auth} read" do
- get_specimens(main_auth, read_auth)
+ get_collections(main_auth, read_auth)
assert_response(if main_auth then 403 else 200 end)
end
@@ -65,18 +65,18 @@ class ReaderTokensTest < ActionDispatch::IntegrationTest
test "scopes are still limited with reader tokens" do
get('/arvados/v1/collections',
- params: {reader_tokens: [api_token(:spectator_specimens)]},
+ params: {reader_tokens: [api_token(:foo_collections)]},
headers: auth(:active_noscope))
assert_response 403
end
test "reader tokens grant no permissions when expired" do
- get_specimens(:active_noscope, :expired)
+ get_collections(:active_noscope, :expired)
assert_response 403
end
test "reader tokens grant no permissions outside their scope" do
- refute_includes(get_specimen_uuids(:active, :admin_vm), spectator_specimen,
+ refute_includes(get_collection_uuids(:active, :admin_vm), owned_by_foo,
"scoped reader token granted permissions out of scope")
end
end
diff --git a/services/api/test/integration/remote_user_test.rb b/services/api/test/integration/remote_user_test.rb
index 98250a6242..d3aa8c94ea 100644
--- a/services/api/test/integration/remote_user_test.rb
+++ b/services/api/test/integration/remote_user_test.rb
@@ -77,6 +77,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
body = {
uuid: @stub_token_uuid || api_client_authorizations(:active).uuid.sub('zzzzz', clusterid),
owner_uuid: "#{clusterid}-tpzed-00000000000000z",
+ expires_at: '2067-07-01T00:00:00.000000000Z',
scopes: @stub_token_scopes,
}
if @stub_content.is_a?(Hash) and owner_uuid = @stub_content[:uuid]
@@ -124,7 +125,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
else
tokens = ApiClientAuthorization.where("uuid like ?", "#{src}-%")
end
- tokens.update_all(expires_at: "1995-05-15T01:02:03Z")
+ tokens.update_all(refreshes_at: "1995-05-15T01:02:03Z")
end
test 'authenticate with remote token that has limited scope' do
@@ -135,14 +136,14 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
@stub_token_scopes = ["GET /arvados/v1/users/current"]
- # re-authorize before cache expires
+ # re-authorize before cache refresh time arrives
get '/arvados/v1/collections',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
assert_response :success
uncache_token('zbbbb')
- # re-authorize after cache expires
+ # re-authorize after cache refresh time arrives
get '/arvados/v1/collections',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
@@ -157,6 +158,19 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_response :success
end
+ test 'expires_at is from remote cluster, refreshes_at reflects RemoteTokenRefresh' do
+ 2.times do
+ get '/arvados/v1/api_client_authorizations/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+ assert_equal '2067-07-01T00:00:00.000000000Z', json_response['expires_at']
+ got_refresh = ApiClientAuthorization.find_by_uuid(json_response['uuid']).refreshes_at
+ expect_refresh = (db_current_time + Rails.configuration.Login.RemoteTokenRefresh).to_datetime
+ assert_operator (got_refresh - expect_refresh).to_f.abs, :<, 1.second.to_f
+ end
+ end
+
test 'authenticate with remote token' do
get '/arvados/v1/users/current',
params: {format: 'json'},
@@ -171,14 +185,14 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
# revoke original token
@stub_token_status = 401
- # re-authorize before cache expires
+ # re-authorize before cache refresh time arrives
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
assert_response :success
uncache_token('zbbbb')
- # re-authorize after cache expires
+ # re-authorize after cache refresh time arrives
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
@@ -216,7 +230,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
@stub_content[:is_invited] = false
uncache_token('zbbbb')
- # re-authorize after cache expires
+ # re-authorize after cache refresh time arrives
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
@@ -252,10 +266,11 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
headers: auth(remote: 'zbbbb')
assert_response :success
- # Expire the cached token.
+ # Update refreshes_at to a time in the past, to induce a re-fetch
+ # from the stub cluster.
@cached_token_uuid = json_response['uuid']
act_as_system_user do
- ApiClientAuthorization.where(uuid: @cached_token_uuid).update_all(expires_at: db_current_time() - 1.day)
+ ApiClientAuthorization.where(uuid: @cached_token_uuid).update_all(refreshes_at: db_current_time() - 1.day)
end
# Now use the same bare token, but set up the remote cluster to
@@ -406,6 +421,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
Rails.configuration.Login.LoginCluster = 'zbbbb'
email_dest = ActiveSupport::OrderedOptions.new
email_dest[:'arvados-admin@example.com'] = ActiveSupport::OrderedOptions.new
+ Rails.configuration.Users.SendUserSetupNotificationEmail = true
Rails.configuration.Users.UserNotifierEmailBcc = email_dest
Rails.configuration.Users.NewUserNotificationRecipients = email_dest
Rails.configuration.Users.NewInactiveUserNotificationRecipients = email_dest
@@ -540,7 +556,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
"is_admin" => false
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(:admin)}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_token(:admin)}"}
assert_response :success
get '/arvados/v1/users/current',
@@ -586,12 +602,23 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal(users(:active).uuid, json_response['uuid'])
end
- test 'container request with runtime_token' do
- [["valid local", "v2/#{api_client_authorizations(:active).uuid}/#{api_client_authorizations(:active).api_token}"],
- ["valid remote", "v2/zbbbb-gj3su-000000000000000/abc"],
- ["invalid local", "v2/#{api_client_authorizations(:active).uuid}/fakefakefake"],
- ["invalid remote", "v2/zbork-gj3su-000000000000000/abc"],
- ].each do |label, runtime_token|
+ [["valid local", :active, nil],
+ ["valid remote", "zbbbb-gj3su-000000000000000", nil],
+ ["invalid local", :active, "fakeactivetoken"],
+ ["invalid remote", "zbork-gj3su-000000000000000", nil],
+ ].each do |label, auth_uuid, auth_token|
+ test "container request with #{label} runtime_token" do
+ case auth_uuid
+ when Symbol
+ aca = api_client_authorizations(auth_uuid)
+ auth_uuid = aca.uuid
+ auth_token ||= aca.api_token
+ when String
+ auth_token ||= "fakeremotetoken"
+ else
+ flunk "test case uses an unsupported auth identifier: #{auth_uuid}"
+ end
+ runtime_token = "v2/#{auth_uuid}/#{auth_token}"
post '/arvados/v1/container_requests',
params: {
"container_request" => {
@@ -603,7 +630,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
}
},
headers: {"HTTP_AUTHORIZATION" => "Bearer #{api_client_authorizations(:active).api_token}"}
- if label.include? "invalid"
+ if label.start_with? "invalid"
assert_response 422
else
assert_response :success
diff --git a/services/api/test/integration/serialized_encoding_test.rb b/services/api/test/integration/serialized_encoding_test.rb
index f41c033b39..bf2d0062f2 100644
--- a/services/api/test/integration/serialized_encoding_test.rb
+++ b/services/api/test/integration/serialized_encoding_test.rb
@@ -3,26 +3,13 @@
# SPDX-License-Identifier: AGPL-3.0
require 'test_helper'
-require 'helpers/git_test_helper'
class SerializedEncodingTest < ActionDispatch::IntegrationTest
- include GitTestHelper
-
fixtures :all
{
api_client_authorization: {scopes: []},
-
- human: {properties: {eye_color: 'gray'}},
-
link: {link_class: 'test', name: 'test', properties: {foo: :bar}},
-
- node: {info: {uptime: 1234}},
-
- specimen: {properties: {eye_color: 'meringue'}},
-
- trait: {properties: {eye_color: 'brown'}},
-
user: {prefs: {cookies: 'thin mint'}},
}.each_pair do |resource, postdata|
test "create json-encoded #{resource.to_s}" do
diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb
index eb49cf832e..2b37454218 100644
--- a/services/api/test/integration/user_sessions_test.rb
+++ b/services/api/test/integration/user_sessions_test.rb
@@ -87,22 +87,20 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
# Test various combinations of auto_setup configuration and email
# address provided during a new user's first session setup.
- [{result: :nope, email: nil, cfg: {auto: true, repo: true, vm: true}},
+ [{result: :nope, email: nil, cfg: {auto: true, vm: true}},
{result: :yup, email: nil, cfg: {auto: true}},
- {result: :nope, email: '@example.com', cfg: {auto: true, repo: true, vm: true}},
+ {result: :nope, email: '@example.com', cfg: {auto: true, vm: true}},
{result: :yup, email: '@example.com', cfg: {auto: true}},
- {result: :nope, email: 'root@', cfg: {auto: true, repo: true, vm: true}},
- {result: :nope, email: 'root@', cfg: {auto: true, repo: true}},
{result: :nope, email: 'root@', cfg: {auto: true, vm: true}},
- {result: :yup, email: 'root@', cfg: {auto: true}},
- {result: :nope, email: 'gitolite@', cfg: {auto: true, repo: true}},
+ {result: :nope, email: 'root@', cfg: {auto: true}},
+ {result: :nope, email: 'gitolite@', cfg: {auto: true}},
{result: :nope, email: '*_*@', cfg: {auto: true, vm: true}},
{result: :yup, email: 'toor@', cfg: {auto: true, vm: true, repo: true}},
{result: :yup, email: 'foo@', cfg: {auto: true, vm: true},
uniqprefix: 'foo'},
- {result: :yup, email: 'foo@', cfg: {auto: true, repo: true},
+ {result: :yup, email: 'foo@', cfg: {auto: true},
uniqprefix: 'foo'},
- {result: :yup, email: 'auto_setup_vm_login@', cfg: {auto: true, repo: true},
+ {result: :yup, email: 'auto_setup_vm_login@', cfg: {auto: true},
uniqprefix: 'auto_setup_vm_login'},
].each do |testcase|
test "user auto-activate #{testcase.inspect}" do
@@ -111,23 +109,16 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
Rails.configuration.Users.AutoSetupNewUsers = testcase[:cfg][:auto]
Rails.configuration.Users.AutoSetupNewUsersWithVmUUID =
(testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : "")
- Rails.configuration.Users.AutoSetupNewUsersWithRepository =
- testcase[:cfg][:repo]
mock_auth_with(email: testcase[:email])
u = assigns(:user)
vm_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
'permission', u.uuid,
'%-' + VirtualMachine.uuid_prefix + '-%')
- repo_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
- 'permission', u.uuid,
- '%-' + Repository.uuid_prefix + '-%')
- repos = Repository.where('uuid in (?)', repo_links.collect(&:head_uuid))
case u[:result]
when :nope
assert_equal false, u.is_invited, "should not have been set up"
assert_empty vm_links, "should not have VM login permission"
- assert_empty repo_links, "should not have repo permission"
when :yup
assert_equal true, u.is_invited
if testcase[:cfg][:vm]
@@ -135,21 +126,11 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
else
assert_empty vm_links, "should not have VM login permission"
end
- if testcase[:cfg][:repo]
- assert_equal 1, repo_links.count, "wrong number of repo perm links"
- assert_equal 1, repos.count, "wrong number of repos"
- assert_equal 'can_manage', repo_links.first.name, "wrong perm type"
- else
- assert_empty repo_links, "should not have repo permission"
- end
end
if (prefix = testcase[:uniqprefix])
# This email address conflicts with a test fixture. Make sure
- # every VM login and repository name got digits added to make
- # it unique.
- (repos.collect(&:name) +
- vm_links.collect { |link| link.properties['username'] }
- ).each do |name|
+ # every VM login got digits added to make it unique.
+ vm_links.collect { |link| link.properties['username'] }.each do |name|
r = name.match(/^(.{#{prefix.length}})(\d+)$/)
assert_not_nil r, "#{name.inspect} does not match {prefix}\\d+"
assert_equal(prefix, r[1],
diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
index f8956b21e2..cbe651ceb6 100644
--- a/services/api/test/integration/users_test.rb
+++ b/services/api/test/integration/users_test.rb
@@ -9,11 +9,8 @@ class UsersTest < ActionDispatch::IntegrationTest
include UsersTestHelper
test "setup user multiple times" do
- repo_name = 'usertestrepo'
-
post "/arvados/v1/users/setup",
params: {
- repo_name: repo_name,
user: {
uuid: 'zzzzz-tpzed-abcdefghijklmno',
first_name: "in_create_test_first_name",
@@ -35,10 +32,7 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # repo link and link add user to 'All users' group
-
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+ # link to add user to 'All users' group
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
@@ -51,7 +45,6 @@ class UsersTest < ActionDispatch::IntegrationTest
# invoke setup again with the same data
post "/arvados/v1/users/setup",
params: {
- repo_name: repo_name,
vm_uuid: virtual_machines(:testvm).uuid,
user: {
uuid: 'zzzzz-tpzed-abcdefghijklmno',
@@ -66,7 +59,6 @@ class UsersTest < ActionDispatch::IntegrationTest
# invoke setup on the same user
post "/arvados/v1/users/setup",
params: {
- repo_name: repo_name,
vm_uuid: virtual_machines(:testvm).uuid,
uuid: 'zzzzz-tpzed-abcdefghijklmno',
},
@@ -81,10 +73,7 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # arvados#user, repo link and link add user to 'All users' group
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
-
+ # arvados#user, and link to add user to 'All users' group
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
@@ -119,31 +108,6 @@ class UsersTest < ActionDispatch::IntegrationTest
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
- # invoke setup with a repository
- post "/arvados/v1/users/setup",
- params: {
- repo_name: 'newusertestrepo',
- uuid: created['uuid']
- },
- headers: auth(:admin)
-
- assert_response :success
-
- response_items = json_response['items']
- created = find_obj_in_resp response_items, 'arvados#user', nil
-
- assert_equal 'foo@example.com', created['email'], 'expected input email'
-
- # verify links
- verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
- 'All users', created['uuid'], 'arvados#group', true, 'Group'
-
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/newusertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
-
- verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
- nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
-
# invoke setup with a vm_uuid
post "/arvados/v1/users/setup",
params: {
@@ -173,7 +137,6 @@ class UsersTest < ActionDispatch::IntegrationTest
test "setup and unsetup user" do
post "/arvados/v1/users/setup",
params: {
- repo_name: 'newusertestrepo',
vm_uuid: virtual_machines(:testvm).uuid,
user: {email: 'foo@example.com'},
},
@@ -185,14 +148,11 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_not_nil created['uuid'], 'expected uuid for the new user'
assert_equal created['email'], 'foo@example.com', 'expected given email'
- # four extra links: system_group, login, group, repo and vm
+ # three extra links: system_group, login, group and vm
verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
- verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
- 'foo/newusertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
-
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
@@ -200,7 +160,7 @@ class UsersTest < ActionDispatch::IntegrationTest
# create a token
token = act_as_system_user do
- ApiClientAuthorization.create!(user: User.find_by_uuid(created['uuid']), api_client: ApiClient.all.first).api_token
+ ApiClientAuthorization.create!(user: User.find_by_uuid(created['uuid'])).api_token
end
# share project and collections with the new user
@@ -276,13 +236,6 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
assert_equal(users(:project_viewer).uuid, json_response['authorized_user_uuid'])
- get('/arvados/v1/repositories/' + repositories(:foo).uuid,
- params: {},
- headers: auth(:active))
- assert_response(:success)
- assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
- assert_equal("#{users(:project_viewer).username}/foo", json_response['name'])
-
get('/arvados/v1/groups/' + groups(:aproject).uuid,
params: {},
headers: auth(:active))
@@ -299,7 +252,7 @@ class UsersTest < ActionDispatch::IntegrationTest
"username" => "barney"
}
},
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(:admin)}"}
+ headers: {'HTTP_AUTHORIZATION' => "Bearer #{api_token(:admin)}"}
assert_response :success
rp = json_response
assert_not_nil rp["uuid"]
@@ -317,41 +270,6 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_equal 'barney', json_response['username']
end
- test 'merge with repository name conflict' do
- post('/arvados/v1/groups',
- params: {
- group: {
- group_class: 'project',
- name: "active user's stuff",
- },
- },
- headers: auth(:project_viewer))
- assert_response(:success)
- project_uuid = json_response['uuid']
-
- post('/arvados/v1/repositories/',
- params: { :repository => { :name => "#{users(:project_viewer).username}/foo", :owner_uuid => users(:project_viewer).uuid } },
- headers: auth(:project_viewer))
- assert_response(:success)
-
- post('/arvados/v1/users/merge',
- params: {
- new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,
- new_owner_uuid: project_uuid,
- redirect_to_new_user: true,
- },
- headers: auth(:active_trustedclient))
- assert_response(:success)
-
- get('/arvados/v1/repositories/' + repositories(:foo).uuid,
- params: {},
- headers: auth(:active))
- assert_response(:success)
- assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
- assert_equal("#{users(:project_viewer).username}/migratedfoo", json_response['name'])
-
- end
-
test "cannot set is_active to false directly" do
post('/arvados/v1/users',
params: {
@@ -366,7 +284,7 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_equal false, user['is_active']
token = act_as_system_user do
- ApiClientAuthorization.create!(user: User.find_by_uuid(user['uuid']), api_client: ApiClient.all.first).api_token
+ ApiClientAuthorization.create!(user: User.find_by_uuid(user['uuid'])).api_token
end
post("/arvados/v1/user_agreements/sign",
params: {uuid: 'zzzzz-4zz18-t68oksiu9m80s4y'},
@@ -396,7 +314,7 @@ class UsersTest < ActionDispatch::IntegrationTest
token = nil
act_as_system_user do
user = User.create!(email: "bob@example.com", username: "bobby")
- ap = ApiClientAuthorization.create!(user: user, api_client: ApiClient.all.first)
+ ap = ApiClientAuthorization.create!(user: user)
token = ap.api_token
end
@@ -423,7 +341,7 @@ class UsersTest < ActionDispatch::IntegrationTest
token = nil
act_as_system_user do
user = User.create!(email: "bob@example.com", username: "bobby")
- ap = ApiClientAuthorization.create!(user: user, api_client_id: 0)
+ ap = ApiClientAuthorization.create!(user: user)
token = ap.api_token
end
@@ -462,7 +380,7 @@ class UsersTest < ActionDispatch::IntegrationTest
# Need to get a new token, the old one was invalidated by the unsetup call
act_as_system_user do
- ap = ApiClientAuthorization.create!(user: user, api_client_id: 0)
+ ap = ApiClientAuthorization.create!(user: user)
token = ap.api_token
end
diff --git a/services/api/test/integration/valid_links_test.rb b/services/api/test/integration/valid_links_test.rb
index 1a98a65744..4873b8a9c3 100644
--- a/services/api/test/integration/valid_links_test.rb
+++ b/services/api/test/integration/valid_links_test.rb
@@ -8,7 +8,7 @@ class ValidLinksTest < ActionDispatch::IntegrationTest
fixtures :all
test "tail must exist on update" do
- admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+ admin_auth = {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:admin).api_token}"}
post "/arvados/v1/links",
params: {
diff --git a/services/api/test/integration/workflows_test.rb b/services/api/test/integration/workflows_test.rb
new file mode 100644
index 0000000000..3785f54e83
--- /dev/null
+++ b/services/api/test/integration/workflows_test.rb
@@ -0,0 +1,498 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class WorkflowsApiTest < ActionDispatch::IntegrationTest
+ fixtures :all
+
+ def create_workflow_collection_helper
+ post "/arvados/v1/collections",
+ params: {:format => :json,
+ collection: {
+ name: "test workflow",
+ description: "the workflow that tests linking collection and workflow records",
+ properties: {
+ "type": "workflow",
+ "arv:workflowMain": "foo.cwl",
+ "arv:cwl_inputs": [{
+ "id": "#main/x",
+ "type": "int",
+ }
+ ],
+ "arv:cwl_outputs": [{
+ "id": "#main/y",
+ "type": "File",
+ }],
+ "arv:cwl_requirements": [
+ ],
+ "arv:cwl_hints": [
+ ],
+ }
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ json_response
+ end
+
+ test "link a workflow to a collection" do
+
+ collection_response = create_workflow_collection_helper
+ assert_equal(collection_response["name"], "test workflow")
+ assert_equal(collection_response["description"], "the workflow that tests linking collection and workflow records")
+ assert_equal(collection_response["owner_uuid"], users(:active).uuid)
+
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+ assert_equal(collection_response["name"], workflow_response["name"])
+ assert_equal(collection_response["description"], workflow_response["description"])
+ assert_equal(collection_response["owner_uuid"], workflow_response["owner_uuid"])
+ assert_equal({"cwlVersion"=>"v1.2",
+ "$graph"=>[
+ {"class"=>"Workflow",
+ "id"=>"#main",
+ "inputs"=>[{"id"=>"#main/x", "type"=>"int"}],
+ "outputs"=>[{"id"=>"#main/y", "type"=>"File", "outputSource"=>"#main/step/y"}],
+ "steps"=>[{"id"=>"#main/foo.cwl",
+ "in"=>[{"id"=>"#main/step/x", "source"=>"#main/x"}],
+ "out"=>[{"id"=>"#main/step/y"}],
+ "run"=>"keep:d41d8cd98f00b204e9800998ecf8427e+0/foo.cwl",
+ "label"=>"test workflow"}],
+ "requirements"=>[{"class"=>"SubworkflowFeatureRequirement"}],
+ "hints"=>[]}]},
+ JSON.parse(workflow_response["definition"]))
+
+ # Now update the collection and check that the linked workflow record was also updated.
+ patch "/arvados/v1/collections/#{collection_response['uuid']}",
+ params: {:format => :json,
+ collection: {
+ name: "test workflow v2",
+ description: "the second version of the workflow that tests linking collection and workflow records",
+ owner_uuid: groups(:private).uuid,
+ properties: {
+ "type": "workflow",
+ "arv:workflowMain": "foo.cwl",
+ "arv:cwl_inputs": [{
+ "id": "#main/w",
+ "type": "int",
+ },
+ {
+ "id": "#main/x",
+ "type": "int",
+ }
+ ],
+ "arv:cwl_outputs": [{
+ "id": "#main/y",
+ "type": "File",
+ },
+ {
+ "id": "#main/z",
+ "type": "File",
+ }],
+ "arv:cwl_requirements": [
+ ],
+ "arv:cwl_hints": [
+ ],
+ }
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ collection_response = json_response
+ assert_equal(collection_response["name"], "test workflow v2")
+ assert_equal(collection_response["description"], "the second version of the workflow that tests linking collection and workflow records")
+ assert_equal(collection_response["owner_uuid"], groups(:private).uuid)
+
+ get "/arvados/v1/workflows/#{workflow_response['uuid']}", headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+ assert_equal(collection_response["name"], workflow_response["name"])
+ assert_equal(collection_response["description"], workflow_response["description"])
+ assert_equal(collection_response["owner_uuid"], workflow_response["owner_uuid"])
+ assert_equal({"cwlVersion"=>"v1.2",
+ "$graph"=>[
+ {"class"=>"Workflow",
+ "id"=>"#main",
+ "inputs"=>[{"id"=>"#main/w", "type"=>"int"},
+ {"id"=>"#main/x", "type"=>"int"}
+ ],
+ "outputs"=>[{"id"=>"#main/y", "type"=>"File", "outputSource"=>"#main/step/y"},
+ {"id"=>"#main/z", "type"=>"File", "outputSource"=>"#main/step/z"}],
+ "steps"=>[{"id"=>"#main/foo.cwl",
+ "in"=>[{"id"=>"#main/step/w", "source"=>"#main/w"},
+ {"id"=>"#main/step/x", "source"=>"#main/x"}],
+ "out"=>[{"id"=>"#main/step/y"}, {"id"=>"#main/step/z"}],
+ "run"=>"keep:d41d8cd98f00b204e9800998ecf8427e+0/foo.cwl",
+ "label"=>"test workflow v2"}],
+ "requirements"=>[{"class"=>"SubworkflowFeatureRequirement"}],
+ "hints"=>[]}]},
+
+ JSON.parse(workflow_response["definition"]))
+ end
+
+ test "workflow cannot be modified after it is linked" do
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ name: "legacy"
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+ assert_equal("legacy", workflow_response["name"])
+
+ patch "/arvados/v1/workflows/#{workflow_response['uuid']}",
+ params: {:format => :json,
+ :workflow => {
+ name: "legacy v2"
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ workflow_response = json_response
+ assert_equal("legacy v2", workflow_response["name"])
+
+ collection_response = create_workflow_collection_helper
+ patch "/arvados/v1/workflows/#{workflow_response['uuid']}",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response['uuid']
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ workflow_response = json_response
+ assert_equal(collection_response['name'], workflow_response["name"])
+
+ patch "/arvados/v1/workflows/#{workflow_response['uuid']}",
+ params: {:format => :json,
+ :workflow => {
+ name: "legacy v2"
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response 403
+
+ end
+
+ test "trashing collection also hides workflow" do
+
+ collection_response = create_workflow_collection_helper
+
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ get "/arvados/v1/workflows/#{workflow_response['uuid']}", headers: auth(:active)
+ assert_response :success
+
+ # Now trash the collection
+ post "/arvados/v1/collections/#{collection_response['uuid']}/trash", headers: auth(:active)
+ assert_response :success
+
+ get "/arvados/v1/collections/#{collection_response['uuid']}", headers: auth(:active)
+ assert_response 404
+
+ get "/arvados/v1/workflows/#{workflow_response['uuid']}", headers: auth(:active)
+ assert_response 404
+
+ # Now untrash the collection
+ post "/arvados/v1/collections/#{collection_response['uuid']}/untrash", headers: auth(:active)
+ assert_response :success
+
+ get "/arvados/v1/collections/#{collection_response['uuid']}", headers: auth(:active)
+ assert_response :success
+
+ get "/arvados/v1/workflows/#{workflow_response['uuid']}", headers: auth(:active)
+ assert_response :success
+ end
+
+ test "collection is missing cwl_inputs" do
+ # The following is allowed, because it isn't linked.
+ # This is what legacy arvados-cwl-runner instances
+ # have been creating, so we want to make sure we can still
+ # create them, but not link them.
+ post "/arvados/v1/collections",
+ params: {:format => :json,
+ collection: {
+ name: "test workflow",
+ description: "the workflow that tests linking collection and workflow records",
+ properties: {
+ "type": "workflow",
+ "arv:workflowMain": "foo.cwl"
+ }
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ collection_response = json_response
+
+ # But it can't be linked because it doesn't have all the fields
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response 422
+ assert_match(/missing field 'arv:cwl_inputs' in collection properties/, json_response["errors"][0])
+ end
+
+ test "collection cwl_inputs wrong type" do
+ post "/arvados/v1/collections",
+ params: {:format => :json,
+ collection: {
+ name: "test workflow",
+ description: "the workflow that tests linking collection and workflow records",
+ properties: {
+ "type": "workflow",
+ "arv:workflowMain": "foo.cwl",
+ "arv:cwl_inputs": { "#main/x": {
+ "type": "int"
+ }
+ },
+ "arv:cwl_outputs": [{
+ "id": "#main/y",
+ "type": "File",
+ }],
+ "arv:cwl_requirements": [
+ ],
+ "arv:cwl_hints": [
+ ],
+
+ }
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+ collection_response = json_response
+
+ # But it can't be linked because one of the fields is invalid
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response 422
+ assert_match(/expected field 'arv:cwl_inputs' in collection properties to be a Array/, json_response["errors"][0])
+ end
+
+ test "cannot change collection type as long as there is a linked workflow" do
+ collection_response = create_workflow_collection_helper
+
+ # create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ # now try to change the type property, should fail
+ properties = collection_response["properties"]
+ properties["type"] = "something else"
+
+ patch "/arvados/v1/collections/#{collection_response['uuid']}",
+ params: {:format => :json,
+ collection: {
+ properties: properties,
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response 422
+ assert_match(/cannot change 'type' property when there are linked workflows/, json_response["errors"][0])
+
+ # Delete the linked workflow
+ delete "/arvados/v1/workflows/#{workflow_response['uuid']}",
+ params: {:format => :json},
+ headers: auth(:active)
+ assert_response :success
+
+ # Now we can change the type property
+ patch "/arvados/v1/collections/#{collection_response['uuid']}",
+ params: {:format => :json,
+ collection: {
+ properties: properties,
+ }
+ },
+ headers: auth(:active),
+ as: :json
+ assert_response :success
+
+ # But we can't make a new linked workflow, because the type is wrong
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response 422
+ assert_match(/properties does not have type: workflow/, json_response["errors"][0])
+ end
+
+ test "destroying collection destroys linked workflow" do
+ collection_response = create_workflow_collection_helper
+
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ assert_not_nil Collection.find_by_uuid(collection_response['uuid'])
+ assert_not_nil Workflow.find_by_uuid(workflow_response['uuid'])
+
+ delete "/arvados/v1/workflows/#{workflow_response['uuid']}",
+ params: {:format => :json},
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ assert_not_nil Collection.find_by_uuid(collection_response['uuid'])
+ assert_nil Workflow.find_by_uuid(workflow_response['uuid'])
+ end
+
+ test "workflow can be deleted without deleting collection" do
+ collection_response = create_workflow_collection_helper
+
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ assert_not_nil Collection.find_by_uuid(collection_response['uuid'])
+ assert_not_nil Workflow.find_by_uuid(workflow_response['uuid'])
+
+ Collection.find_by_uuid(collection_response['uuid']).destroy
+
+ assert_nil Collection.find_by_uuid(collection_response['uuid'])
+ assert_nil Workflow.find_by_uuid(workflow_response['uuid'])
+ end
+
+ test "group contents endpoint supports include=collection_uuid and query on collection.properties" do
+ collection_response = create_workflow_collection_helper
+
+ # Now create a workflow linked to the collection.
+ post "/arvados/v1/workflows",
+ params: {:format => :json,
+ :workflow => {
+ collection_uuid: collection_response["uuid"]
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ workflow_response = json_response
+
+ # no manifest text by default
+ get '/arvados/v1/groups/contents',
+ params: {
+ filters: [["uuid", "is_a", "arvados#workflow"], ["collection.properties.arv:workflowMain", "=", "foo.cwl"]].to_json,
+ include: '["collection_uuid"]',
+ format: :json,
+ },
+ headers: auth(:active)
+ assert_response :success
+ assert_equal workflow_response["uuid"], json_response["items"][0]["uuid"]
+ assert_equal collection_response["uuid"], json_response["included"][0]["uuid"]
+ assert_nil json_response["included"][0]["manifest_text"]
+ assert_nil json_response["included"][0]["unsigned_manifest_text"]
+ assert_equal collection_response["properties"]["arv:workflowMain"], json_response["included"][0]["properties"]["arv:workflowMain"]
+
+ # select didn't include manifest text, so still shouldn't get it
+ get '/arvados/v1/groups/contents',
+ params: {
+ filters: [["uuid", "is_a", "arvados#workflow"], ["collection.properties.arv:workflowMain", "=", "foo.cwl"]].to_json,
+ include: '["collection_uuid"]',
+ select: '["uuid", "collection_uuid", "properties"]',
+ format: :json,
+ },
+ headers: auth(:active)
+ assert_response :success
+ assert_equal workflow_response["uuid"], json_response["items"][0]["uuid"]
+ assert_equal collection_response["uuid"], json_response["included"][0]["uuid"]
+ assert_nil json_response["included"][0]["manifest_text"]
+ assert_nil json_response["included"][0]["unsigned_manifest_text"]
+ assert_equal collection_response["properties"]["arv:workflowMain"], json_response["included"][0]["properties"]["arv:workflowMain"]
+
+ # currently, with the group contents API, you won't get
+ # manifest_text even if you ask for it, because it won't be signed
+ # by controller.
+ get '/arvados/v1/groups/contents',
+ params: {
+ filters: [["uuid", "is_a", "arvados#workflow"], ["collection.properties.arv:workflowMain", "=", "foo.cwl"]].to_json,
+ include: '["collection_uuid"]',
+ select: '["uuid", "collection_uuid", "properties", "manifest_text"]',
+ format: :json,
+ },
+ headers: auth(:active)
+ assert_response :success
+ assert_equal workflow_response["uuid"], json_response["items"][0]["uuid"]
+ assert_equal collection_response["uuid"], json_response["included"][0]["uuid"]
+ assert_nil json_response["included"][0]["manifest_text"]
+ assert_nil json_response["included"][0]["unsigned_manifest_text"]
+ assert_equal collection_response["properties"]["arv:workflowMain"], json_response["included"][0]["properties"]["arv:workflowMain"]
+
+ # However, you can get unsigned_manifest_text
+ get '/arvados/v1/groups/contents',
+ params: {
+ filters: [["uuid", "is_a", "arvados#workflow"], ["collection.properties.arv:workflowMain", "=", "foo.cwl"]].to_json,
+ include: '["collection_uuid"]',
+ select: '["uuid", "collection_uuid", "properties", "unsigned_manifest_text"]',
+ format: :json,
+ },
+ headers: auth(:active)
+ assert_response :success
+ assert_equal workflow_response["uuid"], json_response["items"][0]["uuid"]
+ assert_equal collection_response["uuid"], json_response["included"][0]["uuid"]
+ assert_nil json_response["included"][0]["manifest_text"]
+ assert_equal "", json_response["included"][0]["unsigned_manifest_text"]
+ assert_equal collection_response["properties"]["arv:workflowMain"], json_response["included"][0]["properties"]["arv:workflowMain"]
+
+ end
+
+end
diff --git a/services/api/test/test.git.tar b/services/api/test/test.git.tar
deleted file mode 100644
index 7af80b0774..0000000000
Binary files a/services/api/test/test.git.tar and /dev/null differ
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
index 0255d8907d..a93f59cd64 100644
--- a/services/api/test/test_helper.rb
+++ b/services/api/test/test_helper.rb
@@ -49,6 +49,45 @@ module ArvadosTestSupport
{'HTTP_AUTHORIZATION' => "Bearer #{api_token(api_client_auth_name)}"}
end
+ def full_text_excluded_columns
+ [
+ # All the columns that contain a UUID or PDH as of June 2024/Arvados 3.0.
+ # It's okay if this list gets out-of-date, it just needs to be complete
+ # enough to test that full text indexes exclude the right columns.
+ "authorized_user_uuid",
+ "auth_uuid",
+ "cancelled_by_client_uuid",
+ "cancelled_by_user_uuid",
+ "container_image",
+ "container_uuid",
+ "current_version_uuid",
+ "for_container_uuid",
+ "frozen_by_uuid",
+ "group_uuid",
+ "head_uuid",
+ "is_locked_by_uuid",
+ "locked_by_uuid",
+ "log_uuid",
+ "modified_by_client_uuid",
+ "modified_by_user_uuid",
+ "node_uuid",
+ "object_owner_uuid",
+ "object_uuid",
+ "output_uuid",
+ "owner_uuid",
+ "perm_origin_uuid",
+ "portable_data_hash",
+ "pri_container_uuid",
+ "redirect_to_user_uuid",
+ "requesting_container_uuid",
+ "starting_uuid",
+ "tail_uuid",
+ "target_uuid",
+ "user_uuid",
+ "uuid",
+ ]
+ end
+
def show_errors model
return lambda { model.errors.full_messages.inspect }
end
@@ -64,8 +103,6 @@ class ActiveSupport::TestCase
setup do
Thread.current[:api_client_ip_address] = nil
Thread.current[:api_client_authorization] = nil
- Thread.current[:api_client_uuid] = nil
- Thread.current[:api_client] = nil
Thread.current[:token] = nil
Thread.current[:user] = nil
restore_configuration
@@ -124,7 +161,6 @@ class ActiveSupport::TestCase
client_auth = api_client_authorizations(auth_name)
client_auth.user.forget_cached_group_perms
Thread.current[:api_client_authorization] = client_auth
- Thread.current[:api_client] = client_auth.api_client
Thread.current[:user] = client_auth.user
Thread.current[:token] = client_auth.token
end
@@ -217,8 +253,6 @@ class ActionDispatch::IntegrationTest
teardown do
Thread.current[:api_client_ip_address] = nil
Thread.current[:api_client_authorization] = nil
- Thread.current[:api_client_uuid] = nil
- Thread.current[:api_client] = nil
Thread.current[:token] = nil
Thread.current[:user] = nil
end
diff --git a/services/api/test/unit/api_client_authorization_test.rb b/services/api/test/unit/api_client_authorization_test.rb
index e043f8914a..fe02c3b6cc 100644
--- a/services/api/test/unit/api_client_authorization_test.rb
+++ b/services/api/test/unit/api_client_authorization_test.rb
@@ -11,7 +11,6 @@ class ApiClientAuthorizationTest < ActiveSupport::TestCase
test "ApiClientAuthorization can be created then deleted by #{token}" do
set_user_from_auth token
x = ApiClientAuthorization.create!(user_id: current_user.id,
- api_client_id: 0,
scopes: [])
newtoken = x.api_token
assert x.destroy, "Failed to destroy new ApiClientAuth"
@@ -28,7 +27,6 @@ class ApiClientAuthorizationTest < ActiveSupport::TestCase
auth = ApiClientAuthorization.validate(token: "xxxSystemRootTokenxxx")
assert_equal "xxxSystemRootTokenxxx", auth.api_token
assert_equal User.find_by_uuid(system_user_uuid).id, auth.user_id
- assert auth.api_client.is_trusted
# now change the token and try to use the old one first
Rails.configuration.SystemRootToken = "newxxxSystemRootTokenxxx"
diff --git a/services/api/test/unit/api_client_test.rb b/services/api/test/unit/api_client_test.rb
deleted file mode 100644
index dbe9c86367..0000000000
--- a/services/api/test/unit/api_client_test.rb
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class ApiClientTest < ActiveSupport::TestCase
- include CurrentApiClient
-
- [true, false].each do |token_lifetime_enabled|
- test "configured workbench is trusted when token lifetime is#{token_lifetime_enabled ? '': ' not'} enabled" do
- Rails.configuration.Login.TokenLifetime = token_lifetime_enabled ? 8.hours : 0
- Rails.configuration.Login.IssueTrustedTokens = !token_lifetime_enabled;
- Rails.configuration.Services.Workbench1.ExternalURL = URI("http://wb1.example.com")
- Rails.configuration.Services.Workbench2.ExternalURL = URI("https://wb2.example.com:443")
- Rails.configuration.Login.TrustedClients = ActiveSupport::OrderedOptions.new
- Rails.configuration.Login.TrustedClients[:"https://wb3.example.com"] = ActiveSupport::OrderedOptions.new
-
- act_as_system_user do
- [["http://wb0.example.com", false],
- ["http://wb1.example.com", true],
- ["http://wb2.example.com", false],
- ["https://wb2.example.com", true],
- ["https://wb2.example.com/", true],
- ["https://wb3.example.com/", true],
- ["https://wb4.example.com/", false],
- ].each do |pfx, result|
- a = ApiClient.create(url_prefix: pfx, is_trusted: false)
- if token_lifetime_enabled
- assert_equal false, a.is_trusted, "API client with url prefix '#{pfx}' shouldn't be trusted"
- else
- assert_equal result, a.is_trusted
- end
- end
-
- a = ApiClient.create(url_prefix: "http://example.com", is_trusted: true)
- a.save!
- a.reload
- assert a.is_trusted
- end
- end
- end
-
- [
- [true, "https://ok.example", "https://ok.example"],
- [true, "https://ok.example:443/", "https://ok.example"],
- [true, "https://ok.example", "https://ok.example:443/"],
- [true, "https://ok.example", "https://ok.example/foo/bar"],
- [true, "https://ok.example", "https://ok.example?foo/bar"],
- [true, "https://ok.example/waz?quux", "https://ok.example/foo?bar#baz"],
- [false, "https://ok.example", "http://ok.example"],
- [false, "https://ok.example", "http://ok.example:443"],
-
- [true, "https://*.wildcard.example", "https://ok.wildcard.example"],
- [true, "https://*.wildcard.example", "https://ok.ok.ok.wildcard.example"],
- [false, "https://*.wildcard.example", "http://wrongscheme.wildcard.example"],
- [false, "https://*.wildcard.example", "https://wrongport.wildcard.example:80"],
- [false, "https://*.wildcard.example", "https://ok.wildcard.example.attacker.example/"],
- [false, "https://*.wildcard.example", "https://attacker.example/https://ok.wildcard.example/"],
- [false, "https://*.wildcard.example", "https://attacker.example/?https://ok.wildcard.example/"],
- [false, "https://*.wildcard.example", "https://attacker.example/#https://ok.wildcard.example/"],
- [false, "https://*-wildcard.example", "https://notsupported-wildcard.example"],
- ].each do |pass, trusted, current|
- test "is_trusted(#{current}) returns #{pass} based on #{trusted} in TrustedClients" do
- Rails.configuration.Login.TrustedClients = ActiveSupport::OrderedOptions.new
- Rails.configuration.Login.TrustedClients[trusted.to_sym] = ActiveSupport::OrderedOptions.new
- assert_equal pass, ApiClient.new(url_prefix: current).is_trusted
- end
- end
-end
diff --git a/services/api/test/unit/arvados_model_test.rb b/services/api/test/unit/arvados_model_test.rb
index 69a2710bb9..cdca8290c9 100644
--- a/services/api/test/unit/arvados_model_test.rb
+++ b/services/api/test/unit/arvados_model_test.rb
@@ -8,20 +8,20 @@ class ArvadosModelTest < ActiveSupport::TestCase
fixtures :all
def create_with_attrs attrs
- a = Specimen.create({material: 'caloric'}.merge(attrs))
+ a = Collection.create({properties: {'foo' => 'bar'}}.merge(attrs))
a if a.valid?
end
test 'non-admin cannot assign uuid' do
set_user_from_auth :active_trustedclient
- want_uuid = Specimen.generate_uuid
+ want_uuid = Collection.generate_uuid
a = create_with_attrs(uuid: want_uuid)
assert_nil a, "Non-admin should not assign uuid."
end
test 'admin can assign valid uuid' do
set_user_from_auth :admin_trustedclient
- want_uuid = Specimen.generate_uuid
+ want_uuid = Collection.generate_uuid
a = create_with_attrs(uuid: want_uuid)
assert_equal want_uuid, a.uuid, "Admin should assign valid uuid."
assert a.uuid.length==27, "Auto assigned uuid length is wrong."
@@ -29,7 +29,7 @@ class ArvadosModelTest < ActiveSupport::TestCase
test 'admin cannot assign uuid with wrong object type' do
set_user_from_auth :admin_trustedclient
- want_uuid = Human.generate_uuid
+ want_uuid = Group.generate_uuid
a = create_with_attrs(uuid: want_uuid)
assert_nil a, "Admin should not be able to assign invalid uuid."
end
@@ -126,79 +126,75 @@ class ArvadosModelTest < ActiveSupport::TestCase
end
test "search index exists on models that go into projects" do
- all_tables = ActiveRecord::Base.connection.tables
- all_tables.delete 'schema_migrations'
- all_tables.delete 'permission_refresh_lock'
- all_tables.delete 'ar_internal_metadata'
-
- all_tables.each do |table|
- table_class = table.classify.constantize
- if table_class.respond_to?('searchable_columns')
- search_index_columns = table_class.searchable_columns('ilike')
- # Disappointing, but text columns aren't indexed yet.
- search_index_columns -= table_class.columns.select { |c|
- c.type == :text or c.name == 'description' or c.name == 'file_names'
- }.collect(&:name)
-
- indexes = ActiveRecord::Base.connection.indexes(table)
- search_index_by_columns = indexes.select do |index|
- # After rails 5.0 upgrade, AR::Base.connection.indexes() started to include
- # GIN indexes, with its 'columns' attribute being a String like
- # 'to_tsvector(...)'
- index.columns.is_a?(Array) ? index.columns.sort == search_index_columns.sort : false
- end
- search_index_by_name = indexes.select do |index|
- index.name == "#{table}_search_index"
- end
- assert !search_index_by_columns.empty?, "#{table} has no search index with columns #{search_index_columns}. Instead found search index with columns #{search_index_by_name.first.andand.columns}"
+ ActiveRecord::Base.descendants.each do |model_class|
+ next if model_class.abstract_class?
+ next if !model_class.respond_to?('searchable_columns')
+
+ search_index_columns = model_class.searchable_columns('ilike')
+ # Disappointing, but text columns aren't indexed yet.
+ search_index_columns -= model_class.columns.select { |c|
+ c.type == :text or c.name == 'description' or c.name == 'file_names'
+ }.collect(&:name)
+ next if search_index_columns.empty?
+
+ indexes = ActiveRecord::Base.connection.indexes(model_class.table_name)
+ search_index_by_columns = indexes.select do |index|
+ # After rails 5.0 upgrade, AR::Base.connection.indexes() started to include
+ # GIN indexes, with its 'columns' attribute being a String like
+ # 'to_tsvector(...)'
+ index.columns.is_a?(Array) ? index.columns.sort == search_index_columns.sort : false
+ end
+ search_index_by_name = indexes.select do |index|
+ index.name == "#{model_class.table_name}_search_index"
end
+ assert !search_index_by_columns.empty?, "#{model_class.table_name} (#{model_class.to_s}) has no search index with columns #{search_index_columns}. Instead found search index with columns #{search_index_by_name.first.andand.columns}"
end
end
- [
- %w[collections collections_trgm_text_search_idx],
- %w[container_requests container_requests_trgm_text_search_idx],
- %w[groups groups_trgm_text_search_idx],
- %w[jobs jobs_trgm_text_search_idx],
- %w[pipeline_instances pipeline_instances_trgm_text_search_idx],
- %w[pipeline_templates pipeline_templates_trgm_text_search_idx],
- %w[workflows workflows_trgm_text_search_idx]
- ].each do |model|
- table = model[0]
- indexname = model[1]
- test "trigram index exists on #{table} model" do
- table_class = table.classify.constantize
- expect = table_class.full_text_searchable_columns
- ok = false
+ [Collection, ContainerRequest, Group, Workflow].each do |model|
+ test "trigram index exists on #{model} model" do
+ expect = model.full_text_searchable_columns
conn = ActiveRecord::Base.connection
- conn.exec_query("SELECT indexdef FROM pg_indexes WHERE tablename = '#{table}' AND indexname = '#{indexname}'").each do |res|
+ index_name = "#{model.table_name}_trgm_text_search_idx"
+ indexes = conn.exec_query("SELECT indexdef FROM pg_indexes WHERE tablename = '#{model.table_name}' AND indexname = '#{index_name}'")
+ assert_not_equal(indexes.length, 0)
+ indexes.each do |res|
searchable = res['indexdef'].scan(/COALESCE\(+([A-Za-z_]+)/).flatten
- ok = (expect == searchable)
- assert ok, "Invalid or no trigram index on #{table} named #{indexname}\nexpect: #{expect.inspect}\nfound: #{searchable}"
+ assert_equal(
+ searchable, expect,
+ "Invalid or no trigram index for #{model} named #{index_name}\nexpect: #{expect.inspect}\nfound: #{searchable}",
+ )
end
end
+
+ test "UUID and hash columns are excluded from #{model} full text index" do
+ assert_equal(
+ model.full_text_searchable_columns & full_text_excluded_columns, [],
+ "UUID/hash columns returned by #{model}.full_text_searchable_columns",
+ )
+ end
end
test "selectable_attributes includes database attributes" do
- assert_includes(Job.selectable_attributes, "success")
+ assert_includes(Collection.selectable_attributes, "name")
end
test "selectable_attributes includes non-database attributes" do
- assert_includes(Job.selectable_attributes, "node_uuids")
+ assert_includes(Collection.selectable_attributes, "unsigned_manifest_text")
end
test "selectable_attributes includes common attributes in extensions" do
- assert_includes(Job.selectable_attributes, "uuid")
+ assert_includes(Collection.selectable_attributes, "uuid")
end
test "selectable_attributes does not include unexposed attributes" do
- refute_includes(Job.selectable_attributes, "nodes")
+ refute_includes(Collection.selectable_attributes, "id")
end
test "selectable_attributes on a non-default template" do
- attr_a = Job.selectable_attributes(:common)
+ attr_a = Collection.selectable_attributes(:common)
assert_includes(attr_a, "uuid")
- refute_includes(attr_a, "success")
+ refute_includes(attr_a, "name")
end
test 'create and retrieve using created_at time' do
@@ -220,15 +216,15 @@ class ArvadosModelTest < ActiveSupport::TestCase
group.update!(name: "test create and update name 1")
results = Group.where(uuid: group.uuid)
assert_equal "test create and update name 1", results.first.name, "Expected name to be updated to 1"
- updated_at_1 = results.first.updated_at.to_f
+ modified_at_1 = results.first.modified_at.to_f
# update 2
group.update!(name: "test create and update name 2")
results = Group.where(uuid: group.uuid)
assert_equal "test create and update name 2", results.first.name, "Expected name to be updated to 2"
- updated_at_2 = results.first.updated_at.to_f
+ modified_at_2 = results.first.modified_at.to_f
- assert_equal true, (updated_at_2 > updated_at_1), "Expected updated time 2 to be newer than 1"
+ assert_equal true, (modified_at_2 > modified_at_1), "Expected modified time 2 to be newer than 1"
end
test 'jsonb column' do
@@ -251,6 +247,48 @@ class ArvadosModelTest < ActiveSupport::TestCase
assert_equal({'foo' => 'bar'}, c.properties)
end
+ {
+ Collection => ["description", "manifest_text"],
+ Container => [
+ "command",
+ "environment",
+ "output_properties",
+ "runtime_constraints",
+ "secret_mounts",
+ ],
+ ContainerRequest => [
+ "command",
+ "environment",
+ "mounts",
+ "output_glob",
+ "output_properties",
+ "properties",
+ "runtime_constraints",
+ "secret_mounts",
+ ],
+ Group => ["description", "properties"],
+ Log => ["properties", "summary"],
+ }.each_pair do |model, expect|
+ test "#{model.name} limits expected columns on index" do
+ assert_equal(
+ (model.limit_index_columns_read & expect).sort,
+ expect.sort,
+ )
+ end
+ end
+
+ {
+ Collection => ["delete_at", "preserve_version", "trash_at", "version"],
+ Container => ["cost", "progress", "state", "subrequests_cost"],
+ ContainerRequest => ["container_uuid", "cwd", "requesting_container_uuid"],
+ Group => ["group_class", "is_trashed", "trashed_at"],
+ Log => ["event_at", "event_type"],
+ }.each_pair do |model, colnames|
+ test "#{model.name} does not limit expected columns on index" do
+ assert_equal(model.limit_index_columns_read & colnames, [])
+ end
+ end
+
test 'serialized attributes dirty tracking with audit log settings' do
Rails.configuration.AuditLogs.MaxDeleteBatch = 1000
set_user_from_auth :admin
@@ -260,19 +298,21 @@ class ArvadosModelTest < ActiveSupport::TestCase
else
Rails.configuration.AuditLogs.MaxAge = 0
end
+ tested_serialized = false
[
User.find_by_uuid(users(:active).uuid),
ContainerRequest.find_by_uuid(container_requests(:queued).uuid),
Container.find_by_uuid(containers(:queued).uuid),
- PipelineInstance.find_by_uuid(pipeline_instances(:has_component_with_completed_jobs).uuid),
- PipelineTemplate.find_by_uuid(pipeline_templates(:two_part).uuid),
- Job.find_by_uuid(jobs(:running).uuid)
+ Group.find_by_uuid(groups(:afiltergroup).uuid),
+ Collection.find_by_uuid(collections(:collection_with_one_property).uuid),
].each do |obj|
- assert_not(obj.class.serialized_attributes.empty?,
- "#{obj.class} model doesn't have serialized attributes")
+ if !obj.class.serialized_attributes.empty?
+ tested_serialized = true
+ end
# obj shouldn't have changed since it's just retrieved from the database
assert_not(obj.changed?, "#{obj.class} model's attribute(s) appear as changed: '#{obj.changes.keys.join(',')}' with audit logs #{auditlogs_enabled ? '': 'not '}enabled.")
end
+ assert(tested_serialized, "did not test any models with serialized attributes")
end
end
end
diff --git a/services/api/test/unit/commit_ancestor_test.rb b/services/api/test/unit/commit_ancestor_test.rb
deleted file mode 100644
index 46041211bb..0000000000
--- a/services/api/test/unit/commit_ancestor_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class CommitAncestorTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
-end
diff --git a/services/api/test/unit/commit_test.rb b/services/api/test/unit/commit_test.rb
deleted file mode 100644
index e83061f61a..0000000000
--- a/services/api/test/unit/commit_test.rb
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'helpers/git_test_helper'
-
-# NOTE: calling Commit.find_commit_range(nil, nil, 'rev')
-# produces an error message "fatal: bad object 'rev'" on stderr if
-# 'rev' does not exist in a given repository. Many of these tests
-# report such errors; their presence does not represent a fatal
-# condition.
-
-class CommitTest < ActiveSupport::TestCase
- # See git_setup.rb for the commit log for test.git.tar
- include GitTestHelper
-
- setup do
- authorize_with :active
- end
-
- test 'find_commit_range does not bypass permissions' do
- authorize_with :inactive
- assert_raises ArgumentError do
- CommitsHelper::find_commit_range 'foo', nil, 'main', []
- end
- end
-
- def must_pipe(cmd)
- begin
- return IO.read("|#{cmd}")
- ensure
- assert $?.success?
- end
- end
-
- [
- 'https://github.com/arvados/arvados.git',
- 'http://github.com/arvados/arvados.git',
- 'git://github.com/arvados/arvados.git',
- ].each do |url|
- test "find_commit_range uses fetch_remote_repository to get #{url}" do
- fake_gitdir = repositories(:foo).server_path
- CommitsHelper::expects(:cache_dir_for).once.with(url).returns fake_gitdir
- CommitsHelper::expects(:fetch_remote_repository).once.with(fake_gitdir, url).returns true
- c = CommitsHelper::find_commit_range url, nil, 'main', []
- refute_empty c
- end
- end
-
- [
- 'bogus/repo',
- '/bogus/repo',
- '/not/allowed/.git',
- 'file:///not/allowed.git',
- 'git.arvados.org/arvados.git',
- 'github.com/arvados/arvados.git',
- ].each do |url|
- test "find_commit_range skips fetch_remote_repository for #{url}" do
- CommitsHelper::expects(:fetch_remote_repository).never
- assert_raises ArgumentError do
- CommitsHelper::find_commit_range url, nil, 'main', []
- end
- end
- end
-
- test 'fetch_remote_repository does not leak commits across repositories' do
- url = "http://localhost:1/fake/fake.git"
- fetch_remote_from_local_repo url, :foo
- c = CommitsHelper::find_commit_range url, nil, 'main', []
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57'], c
-
- url = "http://localhost:2/fake/fake.git"
- fetch_remote_from_local_repo url, 'file://' + File.expand_path('../../.git', Rails.root)
- c = CommitsHelper::find_commit_range url, nil, '077ba2ad3ea24a929091a9e6ce545c93199b8e57', []
- assert_equal [], c
- end
-
- test 'tag_in_internal_repository creates and updates tags in internal.git' do
- authorize_with :active
- gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir}"
- IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
- assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
- refute $?.success?
- CommitsHelper::tag_in_internal_repository 'active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', 'testtag'
- assert_match(/^commit 31ce37f/, IO.read("|#{gitint} show testtag"))
- assert $?.success?
- end
-
- def with_foo_repository
- Dir.chdir("#{Rails.configuration.Git.Repositories}/#{repositories(:foo).uuid}") do
- must_pipe("git checkout main 2>&1")
- yield
- end
- end
-
- test 'tag_in_internal_repository, new non-tip sha1 in local repo' do
- tag = "tag#{rand(10**10)}"
- sha1 = nil
- with_foo_repository do
- must_pipe("git checkout -b branch-#{rand(10**10)} 2>&1")
- must_pipe("echo -n #{tag.shellescape} >bar")
- must_pipe("git add bar")
- must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
- sha1 = must_pipe("git log -n1 --format=%H").strip
- must_pipe("git rm bar")
- must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
- end
- CommitsHelper::tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
- assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
- assert $?.success?
- end
-
- test 'tag_in_internal_repository, new unreferenced sha1 in local repo' do
- tag = "tag#{rand(10**10)}"
- sha1 = nil
- with_foo_repository do
- must_pipe("echo -n #{tag.shellescape} >bar")
- must_pipe("git add bar")
- must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
- sha1 = must_pipe("git log -n1 --format=%H").strip
- must_pipe("git reset --hard HEAD^")
- end
- CommitsHelper::tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
- assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
- assert $?.success?
- end
-
- # In active/shabranchnames, "7387838c69a21827834586cc42b467ff6c63293b" is
- # both a commit hash, and the name of a branch that begins from that same
- # commit.
- COMMIT_BRANCH_NAME = "7387838c69a21827834586cc42b467ff6c63293b"
- # A commit that appears in the branch after 7387838c.
- COMMIT_BRANCH_COMMIT_2 = "abec49829bf1758413509b7ffcab32a771b71e81"
- # "738783" is another branch that starts from the above commit.
- SHORT_COMMIT_BRANCH_NAME = COMMIT_BRANCH_NAME[0, 6]
- # A commit that appears in branch 738783 after 7387838c.
- SHORT_BRANCH_COMMIT_2 = "77e1a93093663705a63bb4d505698047e109dedd"
-
- test "find_commit_range min_version prefers commits over branch names" do
- assert_equal([COMMIT_BRANCH_NAME],
- CommitsHelper::find_commit_range("active/shabranchnames",
- COMMIT_BRANCH_NAME, nil, nil))
- end
-
- test "find_commit_range max_version prefers commits over branch names" do
- assert_equal([COMMIT_BRANCH_NAME],
- CommitsHelper::find_commit_range("active/shabranchnames",
- nil, COMMIT_BRANCH_NAME, nil))
- end
-
- test "find_commit_range min_version with short branch name" do
- assert_equal([SHORT_BRANCH_COMMIT_2],
- CommitsHelper::find_commit_range("active/shabranchnames",
- SHORT_COMMIT_BRANCH_NAME, nil, nil))
- end
-
- test "find_commit_range max_version with short branch name" do
- assert_equal([SHORT_BRANCH_COMMIT_2],
- CommitsHelper::find_commit_range("active/shabranchnames",
- nil, SHORT_COMMIT_BRANCH_NAME, nil))
- end
-
- test "find_commit_range min_version with disambiguated branch name" do
- assert_equal([COMMIT_BRANCH_COMMIT_2],
- CommitsHelper::find_commit_range("active/shabranchnames",
- "heads/#{COMMIT_BRANCH_NAME}",
- nil, nil))
- end
-
- test "find_commit_range max_version with disambiguated branch name" do
- assert_equal([COMMIT_BRANCH_COMMIT_2],
- CommitsHelper::find_commit_range("active/shabranchnames", nil,
- "heads/#{COMMIT_BRANCH_NAME}", nil))
- end
-
- test "find_commit_range min_version with unambiguous short name" do
- assert_equal([COMMIT_BRANCH_NAME],
- CommitsHelper::find_commit_range("active/shabranchnames",
- COMMIT_BRANCH_NAME[0..-2], nil, nil))
- end
-
- test "find_commit_range max_version with unambiguous short name" do
- assert_equal([COMMIT_BRANCH_NAME],
- CommitsHelper::find_commit_range("active/shabranchnames", nil,
- COMMIT_BRANCH_NAME[0..-2], nil))
- end
-
- test "find_commit_range laundry list" do
- authorize_with :active
-
- # single
- a = CommitsHelper::find_commit_range('active/foo', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
- assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
-
- #test "test_branch1" do
- a = CommitsHelper::find_commit_range('active/foo', nil, 'main', nil)
- assert_includes(a, '077ba2ad3ea24a929091a9e6ce545c93199b8e57')
-
- #test "test_branch2" do
- a = CommitsHelper::find_commit_range('active/foo', nil, 'b1', nil)
- assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
-
- #test "test_branch3" do
- a = CommitsHelper::find_commit_range('active/foo', nil, 'HEAD', nil)
- assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
-
- #test "test_single_revision_repo" do
- a = CommitsHelper::find_commit_range('active/foo', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
- assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
- a = CommitsHelper::find_commit_range('arvados', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
- assert_equal [], a
-
- #test "test_multi_revision" do
- # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
- a = CommitsHelper::find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', nil)
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
-
- #test "test_tag" do
- # complains "fatal: ambiguous argument 'tag1': unknown revision or path
- # not in the working tree."
- a = CommitsHelper::find_commit_range('active/foo', 'tag1', 'main', nil)
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577'], a
-
- #test "test_multi_revision_exclude" do
- a = CommitsHelper::find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['4fe459abe02d9b365932b8f5dc419439ab4e2577'])
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
-
- #test "test_multi_revision_tagged_exclude" do
- # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
- a = CommitsHelper::find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['tag1'])
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
-
- Dir.mktmpdir do |touchdir|
- # invalid input to maximum
- a = CommitsHelper::find_commit_range('active/foo', nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", nil)
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
- assert_equal [], a
-
- # invalid input to maximum
- a = CommitsHelper::find_commit_range('active/foo', nil, "$(uname>#{touchdir}/uh_oh)", nil)
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
- assert_equal [], a
-
- # invalid input to minimum
- a = CommitsHelper::find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
- assert_equal [], a
-
- # invalid input to minimum
- a = CommitsHelper::find_commit_range('active/foo', "$(uname>#{touchdir}/uh_oh)", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
- assert_equal [], a
-
- # invalid input to 'excludes'
- # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
- a = CommitsHelper::find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
- assert_equal [], a
-
- # invalid input to 'excludes'
- # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
- a = CommitsHelper::find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
- assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
- assert_equal [], a
- end
- end
-end
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
index d25c08a579..4ae914f4ad 100644
--- a/services/api/test/unit/container_request_test.rb
+++ b/services/api/test/unit/container_request_test.rb
@@ -14,19 +14,16 @@ class ContainerRequestTest < ActiveSupport::TestCase
def with_container_auth(ctr)
auth_was = Thread.current[:api_client_authorization]
- client_was = Thread.current[:api_client]
token_was = Thread.current[:token]
user_was = Thread.current[:user]
auth = ApiClientAuthorization.find_by_uuid(ctr.auth_uuid)
Thread.current[:api_client_authorization] = auth
- Thread.current[:api_client] = auth.api_client
Thread.current[:token] = auth.token
Thread.current[:user] = auth.user
begin
yield
ensure
Thread.current[:api_client_authorization] = auth_was
- Thread.current[:api_client] = client_was
Thread.current[:token] = token_was
Thread.current[:user] = user_was
end
@@ -112,11 +109,15 @@ class ContainerRequestTest < ActiveSupport::TestCase
{"mounts" => {"FOO" => {}}},
{"mounts" => {"FOO" => {"kind" => "tmp", "capacity" => 42.222}}},
{"command" => ["echo", 55]},
- {"environment" => {"FOO" => 55}}
+ {"environment" => {"FOO" => 55}},
+ {"output_glob" => [false]},
+ {"output_glob" => [["bad"]]},
+ {"output_glob" => "bad"},
+ {"output_glob" => ["nope", -1]},
].each do |value|
test "Create with invalid #{value}" do
set_user_from_auth :active
- assert_raises(ActiveRecord::RecordInvalid) do
+ assert_raises(ActiveRecord::RecordInvalid, Serializer::TypeMismatch) do
cr = create_minimal_req!({state: "Committed",
priority: 1}.merge(value))
cr.save!
@@ -127,7 +128,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
set_user_from_auth :active
cr = create_minimal_req!(state: "Uncommitted", priority: 1)
cr.save!
- assert_raises(ActiveRecord::RecordInvalid) do
+ assert_raises(ActiveRecord::RecordInvalid, Serializer::TypeMismatch) do
cr = ContainerRequest.find_by_uuid cr.uuid
cr.update!({state: "Committed",
priority: 1}.merge(value))
@@ -1827,4 +1828,158 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_equal 3+7+9, cr.cumulative_cost
end
+ test "Service cannot use existing container" do
+ set_user_from_auth :active
+ cr = create_minimal_req!
+ cr.service = true
+ cr.use_existing = true
+ cr.state = "Committed"
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+ end
+
+ test "published_ports validation" do
+ set_user_from_auth :active
+ cr = create_minimal_req!
+ cr.use_existing = false
+
+ # Bad port number
+ cr.service = true
+ cr.published_ports = {
+ "9000000" => {
+ "access" => "public",
+ "label" => "stuff",
+ "initial_path" => "",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # Not a hash
+ cr.published_ports = {
+ "9000" => ""
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # empty hash
+ cr.published_ports = {
+ "9000" => {
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # missing access
+ cr.published_ports = {
+ "9000" => {
+ "label" => "stuff",
+ "initial_path" => "",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # invalid access
+ cr.published_ports = {
+ "9000" => {
+ "access" => "peanuts",
+ "label" => "stuff",
+ "initial_path" => "",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # missing label
+ cr.published_ports = {
+ "9000" => {
+ "access" => "public",
+ "initial_path" => "",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # empty label
+ cr.published_ports = {
+ "9000" => {
+ "access" => "public",
+ "label" => "",
+ "initial_path" => "",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # Missing initial_path
+ cr.published_ports = {
+ "9000" => {
+ "access" => "public",
+ "label" => "stuff",
+ }
+ }
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.save!
+ end
+
+ # All good!
+ cr.published_ports = {
+ "9000" => {
+ "access" => "public",
+ "label" => "stuff",
+ "initial_path" => "",
+ }
+ }
+ cr.save!
+ end
+
+ test "container request in a project with trash_at in the future" do
+ # Tests edge case where a container request is created in a
+ # project which has trash_at set in the future.
+ #
+ # A user actually encountered this in the wild, they created a
+ # temporary project to run some tests and set it expire
+ # automatically as a cleanup operation. However, because of bug
+ # #22768, the containers were assigned priority 0.
+ #
+ # This tests that the behavior now works as intended, which is the
+ # container has nonzero priority while the project remains live,
+ # and then goes to zero once trash_at has passed.
+
+ set_user_from_auth :active
+
+ project = Group.create!(group_class: "project", name: "trashed_project", trash_at: Time.now+5.minutes)
+
+ cr = create_minimal_req!({state: "Committed", priority: 500, owner_uuid: project.uuid})
+
+ assert_equal 500, cr.priority
+
+ c = Container.find_by_uuid cr.container_uuid
+
+ # Nonzero priority, which means runnable, because the project
+ # isn't trashed yet
+ assert_operator c.priority, :>, 0
+
+ project.trash_at = Time.now
+ project.save!
+
+ c.reload
+
+ # Project is now trashed, so the container has zero priority,
+ # which means it won't run and will be cancelled if it was already
+ # running.
+ assert_equal 0, c.priority
+ end
+
+
+
end
diff --git a/services/api/test/unit/container_test.rb b/services/api/test/unit/container_test.rb
index 09b885b391..494aa3b791 100644
--- a/services/api/test/unit/container_test.rb
+++ b/services/api/test/unit/container_test.rb
@@ -22,6 +22,7 @@ class ContainerTest < ActiveSupport::TestCase
cwd: "test",
command: ["echo", "hello"],
output_path: "test",
+ output_glob: [],
runtime_constraints: {
"API" => false,
"keep_cache_disk" => 0,
@@ -48,6 +49,7 @@ class ContainerTest < ActiveSupport::TestCase
environment: {},
mounts: {},
output_path: "test",
+ output_glob: [],
runtime_auth_scopes: ["all"],
runtime_constraints: {
"API" => false,
@@ -89,8 +91,8 @@ class ContainerTest < ActiveSupport::TestCase
{environment: {"FOO" => "BAR"}},
{mounts: {"FOO" => "BAR"}},
{output_path: "/tmp3"},
- {locked_by_uuid: "zzzzz-gj3su-027z32aux8dg2s1"},
- {auth_uuid: "zzzzz-gj3su-017z32aux8dg2s1"},
+ {locked_by_uuid: api_client_authorizations(:admin).uuid},
+ {auth_uuid: api_client_authorizations(:system_user).uuid},
{runtime_constraints: {"FOO" => "BAR"}}]
end
@@ -172,7 +174,7 @@ class ContainerTest < ActiveSupport::TestCase
assert_equal c.runtime_status, {}
assert_equal Container::Queued, c.state
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.update! state: Container::Locked
c.update! state: Container::Running
@@ -212,7 +214,7 @@ class ContainerTest < ActiveSupport::TestCase
c1.update! runtime_status: {'error' => 'Oops!'}
end
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
# Allow updates when state = Locked
c1.update! state: Container::Locked
@@ -238,7 +240,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c2, _ = minimal_new(attrs)
assert_equal c2.runtime_status, {}
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c2.update! state: Container::Locked
c2.update! state: Container::Running
c2.update! state: Container::Cancelled
@@ -251,7 +253,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
env = {"C" => "3", "B" => "2", "A" => "1"}
m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
- rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1, "keep_cache_disk" => 0, "API" => true, "cuda" => {"device_count":0, "driver_version": "", "hardware_capability": ""}}
+ rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1, "keep_cache_disk" => 0, "API" => true, "gpu" => {"stack": "", "device_count":0, "driver_version": "", "hardware_target": [], "vram": 0}}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
c.reload
assert_equal Container.deep_sort_hash(env).to_json, c.environment.to_json
@@ -293,7 +295,7 @@ class ContainerTest < ActiveSupport::TestCase
c_recent, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_older.uuid, c_recent.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_older.update!({state: Container::Locked})
c_older.update!({state: Container::Running})
c_older.update!(completed_attrs)
@@ -330,10 +332,10 @@ class ContainerTest < ActiveSupport::TestCase
assert_not_equal c_output1.uuid, c_output2.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
out1 = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
- log1 = collections(:real_log_collection).portable_data_hash
+ log1 = collections(:log_collection).portable_data_hash
c_output1.update!({state: Container::Locked})
c_output1.update!({state: Container::Running})
c_output1.update!(completed_attrs.merge({log: log1, output: out1}))
@@ -356,7 +358,7 @@ class ContainerTest < ActiveSupport::TestCase
c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_slower.update!({state: Container::Locked})
c_slower.update!({state: Container::Running,
progress: 0.1})
@@ -380,7 +382,7 @@ class ContainerTest < ActiveSupport::TestCase
c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_slower.update!({state: Container::Locked})
c_slower.update!({state: Container::Running,
progress: 0.1})
@@ -404,7 +406,7 @@ class ContainerTest < ActiveSupport::TestCase
c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_slower.update!({state: Container::Locked})
c_slower.update!({state: Container::Running,
progress: 0.1})
@@ -432,7 +434,7 @@ class ContainerTest < ActiveSupport::TestCase
c_high_priority_newer, _ = minimal_new(common_attrs.merge({use_existing: false}))
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_low_priority.uuid, c_high_priority_older.uuid, c_high_priority_newer.uuid].uniq.length
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_low_priority.update!({state: Container::Locked,
priority: 1})
c_high_priority_older.update!({state: Container::Locked,
@@ -450,7 +452,7 @@ class ContainerTest < ActiveSupport::TestCase
c_failed, _ = minimal_new(common_attrs.merge({use_existing: false}))
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_failed.uuid, c_running.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_failed.update!({state: Container::Locked})
c_failed.update!({state: Container::Running})
c_failed.update!({state: Container::Complete,
@@ -471,7 +473,7 @@ class ContainerTest < ActiveSupport::TestCase
c_completed, _ = minimal_new(common_attrs.merge({use_existing: false}))
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_completed.uuid, c_running.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_completed.update!({state: Container::Locked})
c_completed.update!({state: Container::Running})
c_completed.update!({state: Container::Complete,
@@ -492,7 +494,7 @@ class ContainerTest < ActiveSupport::TestCase
c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_running.uuid, c_locked.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_locked.update!({state: Container::Locked})
c_running.update!({state: Container::Locked})
c_running.update!({state: Container::Running,
@@ -508,7 +510,7 @@ class ContainerTest < ActiveSupport::TestCase
c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
c_queued, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_queued.uuid, c_locked.uuid
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c_locked.update!({state: Container::Locked})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -519,7 +521,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed"}})
c, _ = minimal_new(attrs)
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.update!({state: Container::Locked})
c.update!({state: Container::Running})
c.update!({state: Container::Complete,
@@ -542,7 +544,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c1_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"test" => name, "state" => c1_state}, scheduling_parameters: {"preemptible" => c1_preemptible}})
c1, _ = minimal_new(c1_attrs)
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c1.update!({state: Container::Locked}) if c1_state != Container::Queued
c1.update!({state: Container::Running, priority: c1_priority}) if c1_state == Container::Running
c2_attrs = c1_attrs.merge({scheduling_parameters: {"preemptible" => c2_preemptible}})
@@ -645,17 +647,20 @@ class ContainerTest < ActiveSupport::TestCase
# No cuda
no_cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
- "cuda" => {"device_count":0, "driver_version": "", "hardware_capability": ""}},})
+ "cuda" => {"device_count" => 0, "driver_version" => "", "hardware_capability" => ""}},})
c1, _ = minimal_new(no_cuda_attrs)
assert_equal Container::Queued, c1.state
# has cuda
cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
- "cuda" => {"device_count":1, "driver_version": "11.0", "hardware_capability": "9.0"}},})
+ "cuda" => {"device_count" => 1, "driver_version" => "11.0", "hardware_capability" => "9.0"}},})
c2, _ = minimal_new(cuda_attrs)
assert_equal Container::Queued, c2.state
+ no_cuda_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(no_cuda_attrs[:runtime_constraints])
+ cuda_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(cuda_attrs[:runtime_constraints])
+
# should find the no cuda one
reused = Container.find_reusable(no_cuda_attrs)
assert_not_nil reused
@@ -667,11 +672,78 @@ class ContainerTest < ActiveSupport::TestCase
assert_equal reused.uuid, c2.uuid
end
+ test "find_reusable with legacy cuda" do
+ set_user_from_auth :active
+
+ # has cuda
+
+ cuda_attrs = {
+ command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"],
+ cwd: "test",
+ environment: {},
+ output_path: "test",
+ output_glob: [],
+ container_image: "fa3c1a9cb6783f85f2ecda037e07b8c3+167",
+ mounts: {},
+ runtime_constraints: Container.resolve_runtime_constraints({
+ "cuda" => {
+ "device_count" => 1,
+ "driver_version" => "11.0",
+ "hardware_capability" => "9.0",
+ },
+ "ram" => 12000000000,
+ "vcpus" => 4,
+ }),
+ scheduling_parameters: {},
+ secret_mounts: {},
+ }
+
+ Rails.configuration.Containers.LogReuseDecisions = true
+ # should find the gpu one
+ reused = Container.find_reusable(cuda_attrs)
+ assert_not_nil reused
+ assert_equal reused.uuid, containers(:legacy_cuda_container).uuid
+
+ end
+
+ test "find_reusable method with gpu" do
+ set_user_from_auth :active
+ # No gpu
+ no_gpu_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
+ runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
+ "gpu" => {"device_count" => 0, "driver_version" => "",
+ "hardware_target" => [], "stack" => "", "vram" => 0}},})
+ c1, _ = minimal_new(no_gpu_attrs)
+ assert_equal Container::Queued, c1.state
+
+ # wants gpu
+ gpu_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
+ runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
+ "gpu" => {"device_count" => 1, "driver_version" => "11.0",
+ "hardware_target" => ["9.0"], "stack" => "cuda",
+ "vram" => 2000000000}},})
+ c2, _ = minimal_new(gpu_attrs)
+ assert_equal Container::Queued, c2.state
+
+ no_gpu_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(no_gpu_attrs[:runtime_constraints])
+ gpu_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(gpu_attrs[:runtime_constraints])
+
+ # should find the no gpu one
+ reused = Container.find_reusable(no_gpu_attrs)
+ assert_not_nil reused
+ assert_equal reused.uuid, c1.uuid
+
+ # should find the gpu one
+ reused = Container.find_reusable(gpu_attrs)
+ assert_not_nil reused
+ assert_equal reused.uuid, c2.uuid
+ end
+
test "Container running" do
set_user_from_auth :active
c, _ = minimal_new priority: 1
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
check_illegal_updates c, [{state: Container::Running},
{state: Container::Complete}]
@@ -691,7 +763,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, cr = minimal_new priority: 0
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert_equal Container::Queued, c.state
assert_raise(ArvadosModel::LockFailedError) do
@@ -756,7 +828,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, cr = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert_equal Container::Queued, c.state
assert_equal 0, c.lock_count
@@ -799,7 +871,7 @@ class ContainerTest < ActiveSupport::TestCase
test "Container queued cancel" do
set_user_from_auth :active
c, cr = minimal_new({container_count_max: 1})
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert c.update(state: Container::Cancelled), show_errors(c)
check_no_change_from_cancelled c
cr.reload
@@ -821,7 +893,7 @@ class ContainerTest < ActiveSupport::TestCase
test "Container locked cancel" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert c.lock, show_errors(c)
assert c.update(state: Container::Cancelled), show_errors(c)
check_no_change_from_cancelled c
@@ -831,7 +903,7 @@ class ContainerTest < ActiveSupport::TestCase
Rails.configuration.API.TokenMaxLifetime = 1.hour
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert c.lock, show_errors(c)
refute c.auth.nil?
assert c.auth.expires_at.nil?
@@ -841,11 +913,11 @@ class ContainerTest < ActiveSupport::TestCase
test "Container locked cancel with log" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
assert c.lock, show_errors(c)
assert c.update(
state: Container::Cancelled,
- log: collections(:real_log_collection).portable_data_hash,
+ log: collections(:log_collection).portable_data_hash,
), show_errors(c)
check_no_change_from_cancelled c
end
@@ -853,7 +925,7 @@ class ContainerTest < ActiveSupport::TestCase
test "Container running cancel" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
c.update! state: Container::Cancelled
@@ -903,7 +975,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, _ = minimal_new
if start_state != Container::Queued
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
if start_state != Container::Locked
c.update! state: Container::Running
@@ -923,7 +995,7 @@ class ContainerTest < ActiveSupport::TestCase
test "can only change exit code while running and at completion" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
check_illegal_updates c, [{exit_code: 1}]
c.update! state: Container::Running
@@ -933,7 +1005,7 @@ class ContainerTest < ActiveSupport::TestCase
test "locked_by_uuid can update log when locked/running, and output when running" do
set_user_from_auth :active
- logcoll = collections(:real_log_collection)
+ logcoll = collections(:container_log_collection)
c, cr1 = minimal_new
cr2 = ContainerRequest.new(DEFAULT_ATTRS)
cr2.state = ContainerRequest::Committed
@@ -944,7 +1016,7 @@ class ContainerTest < ActiveSupport::TestCase
logpdh_time1 = logcoll.portable_data_hash
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid
c.update!(log: logpdh_time1)
@@ -975,8 +1047,8 @@ class ContainerTest < ActiveSupport::TestCase
assert_equal cr1log_uuid, cr1.log_uuid
assert_equal cr2log_uuid, cr2.log_uuid
assert_equal 1, Collection.where(uuid: [cr1log_uuid, cr2log_uuid]).to_a.collect(&:portable_data_hash).uniq.length
- assert_equal ". acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
-./log\\040for\\040container\\040#{cr1.container_uuid} acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
+ assert_equal ". 8c12f5f5297b7337598170c6f531fcee+7882 acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 7882:3:foo.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt
+./log\\040for\\040container\\040#{cr1.container_uuid} 8c12f5f5297b7337598170c6f531fcee+7882 acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 7882:3:foo.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt
", Collection.find_by_uuid(cr1log_uuid).manifest_text
end
@@ -990,20 +1062,18 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, _ = minimal_new
end
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
if tok == "runtime_token"
auth = ApiClientAuthorization.validate(token: c.runtime_token)
Thread.current[:api_client_authorization] = auth
- Thread.current[:api_client] = auth.api_client
Thread.current[:token] = auth.token
Thread.current[:user] = auth.user
else
auth = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
Thread.current[:api_client_authorization] = auth
- Thread.current[:api_client] = auth.api_client
Thread.current[:token] = auth.token
Thread.current[:user] = auth.user
end
@@ -1013,7 +1083,7 @@ class ContainerTest < ActiveSupport::TestCase
assert c.update(runtime_status: {'warning' => 'something happened'})
assert c.update(progress: 0.5)
assert c.update(exit_code: 0)
- refute c.update(log: collections(:real_log_collection).portable_data_hash)
+ refute c.update(log: collections(:log_collection).portable_data_hash)
c.reload
assert c.update(state: Container::Complete, exit_code: 0)
end
@@ -1022,7 +1092,7 @@ class ContainerTest < ActiveSupport::TestCase
test "not allowed to set output that is not readable by current user" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
@@ -1037,7 +1107,7 @@ class ContainerTest < ActiveSupport::TestCase
test "other token cannot set output on running container" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
@@ -1050,7 +1120,7 @@ class ContainerTest < ActiveSupport::TestCase
test "can set trashed output on running container" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
@@ -1064,7 +1134,7 @@ class ContainerTest < ActiveSupport::TestCase
test "not allowed to set trashed output that is not readable by current user" do
set_user_from_auth :active
c, _ = minimal_new
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update! state: Container::Running
@@ -1095,7 +1165,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, cr = minimal_new(secret_mounts: {'/secret' => {'kind' => 'text', 'content' => 'foo'}},
container_count_max: 1, runtime_token: api_client_authorizations(:active).token)
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
c.lock
c.update!(state: Container::Running)
c.reload
@@ -1374,4 +1444,62 @@ class ContainerTest < ActiveSupport::TestCase
)
end
end
+
+ test "published_ports base_url when ExternalURL is wildcard" do
+ Rails.configuration.Services.ContainerWebServices.ExternalURL = URI.parse("https://*.example.com/")
+ set_user_from_auth :active
+ c, _ = minimal_new(
+ published_ports:
+ {"1234" => {
+ "access": "public",
+ "label": "example",
+ "initial_path": "initial_path"}})
+ set_user_from_auth :system_user
+ c.lock
+ c.update! state: Container::Running
+
+ c.reload
+ assert_equal "https://#{c.uuid}-1234.example.com/", c.published_ports["1234"]["base_url"]
+ assert_equal "https://#{c.uuid}-1234.example.com/initial_path", c.published_ports["1234"]["initial_url"]
+ end
+
+ test "published_ports base_url when ExternalURL has port range" do
+ Rails.configuration.Services.ContainerWebServices.ExternalURL = URI.parse("https://example.com/")
+ Rails.configuration.Services.ContainerWebServices.ExternalPortMin = 2000
+ Rails.configuration.Services.ContainerWebServices.ExternalPortMax = 3000
+ set_user_from_auth :active
+ c, _ = minimal_new(
+ published_ports:
+ {"1234" => {
+ "access": "public",
+ "label": "example",
+ "initial_path": "/initial_path"},
+ "9999" => {
+ "access": "private",
+ "label": "label",
+ "initial_path": ""}})
+ set_user_from_auth :system_user
+ c.lock
+ c.update! state: Container::Running
+
+ c.reload
+ assert_equal "https://example.com:2000/", c.published_ports["1234"]["base_url"]
+ assert_equal "https://example.com:2000/initial_path", c.published_ports["1234"]["initial_url"]
+ assert_equal "https://example.com:2001/", c.published_ports["9999"]["base_url"]
+ assert_equal "https://example.com:2001/", c.published_ports["9999"]["initial_url"]
+ assert_equal [[1234,2000], [9999,2001]], assigned_ports_for_container(c.uuid)
+
+ c.update! state: Container::Cancelled
+
+ assert_equal [], assigned_ports_for_container(c.uuid)
+ end
+
+ def assigned_ports_for_container(uuid)
+ ActiveRecord::Base.connection.exec_query(
+ 'select * from container_ports where container_uuid=$1',
+ '',
+ [uuid]).map do |row|
+ [row['container_port'], row['external_port']]
+ end
+ end
end
diff --git a/services/api/test/unit/create_superuser_token_test.rb b/services/api/test/unit/create_superuser_token_test.rb
deleted file mode 100644
index 86ba78cb99..0000000000
--- a/services/api/test/unit/create_superuser_token_test.rb
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'safe_json'
-require 'test_helper'
-require 'create_superuser_token'
-
-class CreateSuperUserTokenTest < ActiveSupport::TestCase
- include CreateSuperUserToken
-
- test "create superuser token twice and expect same results" do
- # Create a token with some string
- token1 = create_superuser_token 'atesttoken'
- assert_not_nil token1
- assert_match(/atesttoken$/, token1)
-
- # Create token again; this time, we should get the one created earlier
- token2 = create_superuser_token
- assert_not_nil token2
- assert_equal token1, token2
- end
-
- test "create superuser token with two different inputs and expect the first both times" do
- # Create a token with some string
- token1 = create_superuser_token 'atesttoken'
- assert_not_nil token1
- assert_match(/\/atesttoken$/, token1)
-
- # Create token again with some other string and expect the existing superuser token back
- token2 = create_superuser_token 'someothertokenstring'
- assert_not_nil token2
- assert_equal token1, token2
- end
-
- test "create superuser token and invoke again with some other valid token" do
- # Create a token with some string
- token1 = create_superuser_token 'atesttoken'
- assert_not_nil token1
- assert_match(/\/atesttoken$/, token1)
-
- su_token = api_client_authorizations("system_user").api_token
- token2 = create_superuser_token su_token
- assert_equal token2.split('/')[2], su_token
- end
-
- test "create superuser token, expire it, and create again" do
- # Create a token with some string
- token1 = create_superuser_token 'atesttoken'
- assert_not_nil token1
- assert_match(/\/atesttoken$/, token1)
-
- # Expire this token and call create again; expect a new token created
- apiClientAuth = ApiClientAuthorization.where(api_token: 'atesttoken').first
- refute_nil apiClientAuth
- Thread.current[:user] = users(:admin)
- apiClientAuth.update expires_at: '2000-10-10'
-
- token2 = create_superuser_token
- assert_not_nil token2
- assert_not_equal token1, token2
- end
-
- test "invoke create superuser token with an invalid non-superuser token and expect error" do
- active_user_token = api_client_authorizations("active").api_token
- e = assert_raises RuntimeError do
- create_superuser_token active_user_token
- end
- assert_not_nil e
- assert_equal "Token exists but is not a superuser token.", e.message
- end
-
- test "specified token has limited scope" do
- active_user_token = api_client_authorizations("data_manager").api_token
- e = assert_raises RuntimeError do
- create_superuser_token active_user_token
- end
- assert_not_nil e
- assert_match /^Token exists but has limited scope/, e.message
- end
-
- test "existing token has limited scope" do
- active_user_token = api_client_authorizations("admin_vm").api_token
- ApiClientAuthorization.
- where(user_id: system_user.id).
- update_all(scopes: ["GET /"])
- fixture_tokens = ApiClientAuthorization.all.collect(&:api_token)
- new_token = create_superuser_token
- refute_includes(fixture_tokens, new_token)
- end
-end
diff --git a/services/api/test/unit/group_test.rb b/services/api/test/unit/group_test.rb
index 36f42006ff..4ffa5ff10f 100644
--- a/services/api/test/unit/group_test.rb
+++ b/services/api/test/unit/group_test.rb
@@ -18,13 +18,13 @@ class GroupTest < ActiveSupport::TestCase
assert g.save, "active user should be able to modify group #{g.uuid}"
# Use the group as the owner of a new object
- s = Specimen.
+ s = Collection.
create(owner_uuid: groups(:bad_group_has_ownership_cycle_b).uuid)
assert s.valid?, "ownership should pass validation #{s.errors.messages}"
assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
# Use the group as the new owner of an existing object
- s = specimens(:in_aproject)
+ s = collections(:collection_owned_by_active)
s.owner_uuid = groups(:bad_group_has_ownership_cycle_b).uuid
assert s.valid?, "ownership should pass validation"
assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
@@ -257,10 +257,10 @@ class GroupTest < ActiveSupport::TestCase
def insert_group uuid, owner_uuid, name, group_class
q = ActiveRecord::Base.connection.exec_query %{
-insert into groups (uuid, owner_uuid, name, group_class, created_at, updated_at)
+insert into groups (uuid, owner_uuid, name, group_class, created_at, updated_at, modified_at)
values ('#{uuid}', '#{owner_uuid}',
'#{name}', #{if group_class then "'"+group_class+"'" else 'NULL' end},
- statement_timestamp(), statement_timestamp())
+ statement_timestamp(), statement_timestamp(), statement_timestamp())
}
uuid
end
diff --git a/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb b/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb
deleted file mode 100644
index 01ed4302da..0000000000
--- a/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class ApiClientAuthorizationsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/api_clients_helper_test.rb b/services/api/test/unit/helpers/api_clients_helper_test.rb
deleted file mode 100644
index 4901fb45df..0000000000
--- a/services/api/test/unit/helpers/api_clients_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class ApiClientsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/authorized_keys_helper_test.rb b/services/api/test/unit/helpers/authorized_keys_helper_test.rb
deleted file mode 100644
index 010a0fe453..0000000000
--- a/services/api/test/unit/helpers/authorized_keys_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class AuthorizedKeysHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/collections_helper_test.rb b/services/api/test/unit/helpers/collections_helper_test.rb
deleted file mode 100644
index dd01ca7b82..0000000000
--- a/services/api/test/unit/helpers/collections_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class CollectionsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/commit_ancestors_helper_test.rb b/services/api/test/unit/helpers/commit_ancestors_helper_test.rb
deleted file mode 100644
index 423dbf6769..0000000000
--- a/services/api/test/unit/helpers/commit_ancestors_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class CommitAncestorsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/commits_helper_test.rb b/services/api/test/unit/helpers/commits_helper_test.rb
deleted file mode 100644
index fd960a86f3..0000000000
--- a/services/api/test/unit/helpers/commits_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class CommitsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/groups_helper_test.rb b/services/api/test/unit/helpers/groups_helper_test.rb
deleted file mode 100644
index ce7a3fad2b..0000000000
--- a/services/api/test/unit/helpers/groups_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class GroupsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/humans_helper_test.rb b/services/api/test/unit/helpers/humans_helper_test.rb
deleted file mode 100644
index 22f9e819ce..0000000000
--- a/services/api/test/unit/helpers/humans_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class HumansHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/job_tasks_helper_test.rb b/services/api/test/unit/helpers/job_tasks_helper_test.rb
deleted file mode 100644
index af0302ccf3..0000000000
--- a/services/api/test/unit/helpers/job_tasks_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class JobTasksHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/jobs_helper_test.rb b/services/api/test/unit/helpers/jobs_helper_test.rb
deleted file mode 100644
index 9d64b7d832..0000000000
--- a/services/api/test/unit/helpers/jobs_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class JobsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/keep_disks_helper_test.rb b/services/api/test/unit/helpers/keep_disks_helper_test.rb
deleted file mode 100644
index 9dcc619df5..0000000000
--- a/services/api/test/unit/helpers/keep_disks_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class KeepDisksHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/links_helper_test.rb b/services/api/test/unit/helpers/links_helper_test.rb
deleted file mode 100644
index 918f145ff6..0000000000
--- a/services/api/test/unit/helpers/links_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class LinksHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/logs_helper_test.rb b/services/api/test/unit/helpers/logs_helper_test.rb
deleted file mode 100644
index 616f6e664b..0000000000
--- a/services/api/test/unit/helpers/logs_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class LogsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/nodes_helper_test.rb b/services/api/test/unit/helpers/nodes_helper_test.rb
deleted file mode 100644
index 8a92eb990d..0000000000
--- a/services/api/test/unit/helpers/nodes_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class NodesHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/pipeline_instances_helper_test.rb b/services/api/test/unit/helpers/pipeline_instances_helper_test.rb
deleted file mode 100644
index 9d3b5c48f1..0000000000
--- a/services/api/test/unit/helpers/pipeline_instances_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class PipelineInstancesHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/pipeline_templates_helper_test.rb b/services/api/test/unit/helpers/pipeline_templates_helper_test.rb
deleted file mode 100644
index 9a9a4179d6..0000000000
--- a/services/api/test/unit/helpers/pipeline_templates_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class PipelinesHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/repositories_helper_test.rb b/services/api/test/unit/helpers/repositories_helper_test.rb
deleted file mode 100644
index 33cb590513..0000000000
--- a/services/api/test/unit/helpers/repositories_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class RepositoriesHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/specimens_helper_test.rb b/services/api/test/unit/helpers/specimens_helper_test.rb
deleted file mode 100644
index 3709198065..0000000000
--- a/services/api/test/unit/helpers/specimens_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class SpecimensHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/traits_helper_test.rb b/services/api/test/unit/helpers/traits_helper_test.rb
deleted file mode 100644
index 03b6a97f41..0000000000
--- a/services/api/test/unit/helpers/traits_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class TraitsHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/helpers/virtual_machines_helper_test.rb b/services/api/test/unit/helpers/virtual_machines_helper_test.rb
deleted file mode 100644
index 99fc258cb4..0000000000
--- a/services/api/test/unit/helpers/virtual_machines_helper_test.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class VirtualMachinesHelperTest < ActionView::TestCase
-end
diff --git a/services/api/test/unit/human_test.rb b/services/api/test/unit/human_test.rb
deleted file mode 100644
index 83cc40e686..0000000000
--- a/services/api/test/unit/human_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class HumanTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
-end
diff --git a/services/api/test/unit/job_task_test.rb b/services/api/test/unit/job_task_test.rb
deleted file mode 100644
index 36a0e723f2..0000000000
--- a/services/api/test/unit/job_task_test.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class JobTaskTest < ActiveSupport::TestCase
-
-end
diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb
deleted file mode 100644
index 815079f8af..0000000000
--- a/services/api/test/unit/job_test.rb
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'helpers/git_test_helper'
-require 'helpers/docker_migration_helper'
-
-class JobTest < ActiveSupport::TestCase
- include DockerMigrationHelper
- include GitTestHelper
-
- BAD_COLLECTION = "#{'f' * 32}+0"
-
- setup do
- set_user_from_auth :active
- end
-
- def job_attrs merge_me={}
- # Default (valid) set of attributes, with given overrides
- {
- script: "hash",
- script_version: "main",
- repository: "active/foo",
- }.merge(merge_me)
- end
-
- test "Job without Docker image doesn't get locator" do
- job = Job.new job_attrs
- assert job.valid?, job.errors.full_messages.to_s
- assert_nil job.docker_image_locator
- end
-
- { 'name' => [:links, :docker_image_collection_tag, :name],
- 'hash' => [:links, :docker_image_collection_hash, :name],
- 'locator' => [:collections, :docker_image, :portable_data_hash],
- }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
- test "Job initialized with Docker image #{spec_type} gets locator" do
- image_spec = send(fixture_type, fixture_name).send(fixture_attr)
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_spec})
- assert job.valid?, job.errors.full_messages.to_s
- assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
- end
-
- test "Job modified with Docker image #{spec_type} gets locator" do
- job = Job.new job_attrs
- assert job.valid?, job.errors.full_messages.to_s
- assert_nil job.docker_image_locator
- image_spec = send(fixture_type, fixture_name).send(fixture_attr)
- job.runtime_constraints['docker_image'] = image_spec
- assert job.valid?, job.errors.full_messages.to_s
- assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
- end
- end
-
- test "removing a Docker runtime constraint removes the locator" do
- image_locator = collections(:docker_image).portable_data_hash
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_locator})
- assert job.valid?, job.errors.full_messages.to_s
- assert_equal(image_locator, job.docker_image_locator)
- job.runtime_constraints = {}
- assert job.valid?, job.errors.full_messages.to_s + "after clearing runtime constraints"
- assert_nil job.docker_image_locator
- end
-
- test "locate a Docker image with a repository + tag" do
- image_repo, image_tag =
- links(:docker_image_collection_tag2).name.split(':', 2)
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_repo,
- 'docker_image_tag' => image_tag})
- assert job.valid?, job.errors.full_messages.to_s
- assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
- end
-
- test "can't locate a Docker image with a nonexistent tag" do
- image_repo = links(:docker_image_collection_tag).name
- image_tag = '__nonexistent tag__'
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_repo,
- 'docker_image_tag' => image_tag})
- assert(job.invalid?, "Job with bad Docker tag valid")
- end
-
- [
- false,
- true
- ].each do |use_config|
- test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do
- default_docker_image = collections(:docker_image)[:portable_data_hash]
- Rails.configuration.Containers.JobsAPI.DefaultDockerImage = default_docker_image if use_config
-
- job = Job.new job_attrs
- assert job.valid?, job.errors.full_messages.to_s
-
- if use_config
- refute_nil job.docker_image_locator
- assert_equal default_docker_image, job.docker_image_locator
- else
- assert_nil job.docker_image_locator
- end
- end
- end
-
- test "locate a Docker image with a partial hash" do
- image_hash = links(:docker_image_collection_hash).name[0..24]
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_hash})
- assert job.valid?, job.errors.full_messages.to_s + " with partial hash #{image_hash}"
- assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
- end
-
- { 'name' => 'arvados_test_nonexistent',
- 'hash' => 'f' * 64,
- 'locator' => BAD_COLLECTION,
- }.each_pair do |spec_type, image_spec|
- test "Job validation fails with nonexistent Docker image #{spec_type}" do
- Rails.configuration.RemoteClusters = ConfigLoader.to_OrderedOptions({})
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => image_spec})
- assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid")
- end
- end
-
- test "Job validation fails with non-Docker Collection constraint" do
- job = Job.new job_attrs(runtime_constraints:
- {'docker_image' => collections(:foo_file).uuid})
- assert(job.invalid?, "non-Docker Collection constraint was valid")
- end
-
- test "can create Job with Docker image Collection without Docker links" do
- image_uuid = collections(:unlinked_docker_image).portable_data_hash
- job = Job.new job_attrs(runtime_constraints: {"docker_image" => image_uuid})
- assert(job.valid?, "Job created with unlinked Docker image was invalid")
- assert_equal(image_uuid, job.docker_image_locator)
- end
-
- def check_attrs_unset(job, attrs)
- assert_empty(attrs.each_key.map { |key| job.send(key) }.compact,
- "job has values for #{attrs.keys}")
- end
-
- def check_creation_prohibited(attrs)
- begin
- job = Job.new(job_attrs(attrs))
- rescue ActiveModel::MassAssignmentSecurity::Error
- # Test passes - expected attribute protection
- else
- check_attrs_unset(job, attrs)
- end
- end
-
- def check_modification_prohibited(attrs)
- job = Job.new(job_attrs)
- attrs.each_pair do |key, value|
- assert_raises(NoMethodError) { job.send("{key}=".to_sym, value) }
- end
- check_attrs_unset(job, attrs)
- end
-
- test "can't create Job with Docker image locator" do
- check_creation_prohibited(docker_image_locator: BAD_COLLECTION)
- end
-
- test "can't assign Docker image locator to Job" do
- check_modification_prohibited(docker_image_locator: BAD_COLLECTION)
- end
-
- SDK_MASTER = "ca68b24e51992e790f29df5cc4bc54ce1da4a1c2"
- SDK_TAGGED = "00634b2b8a492d6f121e3cf1d6587b821136a9a7"
-
- def sdk_constraint(version)
- {runtime_constraints: {
- "arvados_sdk_version" => version,
- "docker_image" => links(:docker_image_collection_tag).name,
- }}
- end
-
- def check_job_sdk_version(expected)
- job = yield
- if expected.nil?
- refute(job.valid?, "job valid with bad Arvados SDK version")
- else
- assert(job.valid?, "job not valid with good Arvados SDK version")
- assert_equal(expected, job.arvados_sdk_version)
- end
- end
-
- test "can't create job with SDK version assigned directly" do
- check_creation_prohibited(arvados_sdk_version: SDK_MASTER)
- end
-
- test "can't modify job to assign SDK version directly" do
- check_modification_prohibited(arvados_sdk_version: SDK_MASTER)
- end
-
- test 'script_parameters_digest is independent of key order' do
- j1 = Job.new(job_attrs(script_parameters: {'a' => 'a', 'ddee' => {'d' => 'd', 'e' => 'e'}}))
- j2 = Job.new(job_attrs(script_parameters: {'ddee' => {'e' => 'e', 'd' => 'd'}, 'a' => 'a'}))
- assert j1.valid?
- assert j2.valid?
- assert_equal(j1.script_parameters_digest, j2.script_parameters_digest)
- end
-
- test 'job fixtures have correct script_parameters_digest' do
- Job.all.each do |j|
- d = j.script_parameters_digest
- assert_equal(j.update_script_parameters_digest, d,
- "wrong script_parameters_digest for #{j.uuid}")
- end
- end
-
- test 'deep_sort_hash on array of hashes' do
- a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}
- b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}
- assert_equal Job.deep_sort_hash(a).to_json, Job.deep_sort_hash(b).to_json
- end
-
- def try_find_reusable
- foobar = jobs(:foobar)
- example_attrs = {
- script_version: foobar.script_version,
- script: foobar.script,
- script_parameters: foobar.script_parameters,
- repository: foobar.repository,
- }
-
- # Two matching jobs exist with identical outputs. The older one
- # should be reused.
- j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
- assert j
- assert_equal foobar.uuid, j.uuid
-
- # Two matching jobs exist with different outputs. Neither should
- # be reused.
- Job.where(uuid: jobs(:job_with_latest_version).uuid).
- update_all(output: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1')
- assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
-
- # ...unless config says to reuse the earlier job in such cases.
- Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer = true
- j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
- assert_equal foobar.uuid, j.uuid
- end
-
- test 'enable legacy api configuration option = true' do
- Rails.configuration.Containers.JobsAPI.Enable = "true"
- check_enable_legacy_jobs_api
- assert_equal(Disable_update_jobs_api_method_list, Rails.configuration.API.DisabledAPIs)
- end
-
- test 'enable legacy api configuration option = false' do
- Rails.configuration.Containers.JobsAPI.Enable = "false"
- check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
- end
-
- test 'enable legacy api configuration option = auto, has jobs' do
- Rails.configuration.Containers.JobsAPI.Enable = "auto"
- assert Job.count > 0
- check_enable_legacy_jobs_api
- assert_equal(Disable_update_jobs_api_method_list, Rails.configuration.API.DisabledAPIs)
- end
-
- test 'enable legacy api configuration option = auto, no jobs' do
- Rails.configuration.Containers.JobsAPI.Enable = "auto"
- act_as_system_user do
- Job.destroy_all
- end
- assert_equal 0, Job.count
- assert_equal({}, Rails.configuration.API.DisabledAPIs)
- check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
- end
-end
diff --git a/services/api/test/unit/link_test.rb b/services/api/test/unit/link_test.rb
index b9806486ad..55f8009043 100644
--- a/services/api/test/unit/link_test.rb
+++ b/services/api/test/unit/link_test.rb
@@ -13,7 +13,7 @@ class LinkTest < ActiveSupport::TestCase
test "cannot delete an object referenced by unwritable links" do
ob = act_as_user users(:active) do
- Specimen.create
+ Collection.create
end
link = act_as_user users(:admin) do
Link.create(tail_uuid: users(:active).uuid,
@@ -131,4 +131,42 @@ class LinkTest < ActiveSupport::TestCase
Link.find_by_uuid(link1).destroy
assert_empty Link.where(uuid: link2)
end
+
+ ['zzzzz-dz642-runningcontainr', ''].each do |head_uuid|
+ test "published_port link is invalid because head_uuid #{head_uuid.inspect} is not a container request UUID" do
+ act_as_user users(:active) do
+ link = Link.create(head_uuid: head_uuid,
+ link_class: 'published_port',
+ name: 'service1',
+ properties: {"port" => 80})
+ assert_equal(false, link.valid?)
+ assert_equal("must be a container request UUID", link.errors.messages[:head_uuid].first)
+ end
+ end
+ end
+
+ test "Cannot create two published_port links with the same name" do
+ act_as_user users(:active) do
+ Link.create!(head_uuid: container_requests(:running).uuid,
+ link_class: 'published_port',
+ name: 'service1',
+ properties: {"port" => 80})
+
+ # not ok
+ assert_raises(ActiveRecord::RecordNotUnique,
+ "should not be able to create two published_port links with the same name") do
+ Link.create!(head_uuid: container_requests(:running_older).uuid,
+ link_class: 'published_port',
+ name: 'service1',
+ properties: {"port" => 80})
+ end
+
+ # ok
+ Link.create!(head_uuid: container_requests(:running_older).uuid,
+ link_class: 'published_port',
+ name: 'service2',
+ properties: {"port" => 80})
+
+ end
+ end
end
diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb
index d3a1b618d5..ec2033b60e 100644
--- a/services/api/test/unit/log_test.rb
+++ b/services/api/test/unit/log_test.rb
@@ -32,6 +32,10 @@ class LogTest < ActiveSupport::TestCase
Log.where(object_uuid: thing.uuid).order("created_at ASC").all
end
+ def clear_logs_about(thing)
+ Log.where(object_uuid: thing.uuid).delete_all
+ end
+
def assert_logged(thing, event_type)
logs = get_logs_about(thing)
assert_equal(@log_count, logs.size, "log count mismatch")
@@ -42,8 +46,6 @@ class LogTest < ActiveSupport::TestCase
"log is not owned by current user")
assert_equal(current_user.andand.uuid, log.modified_by_user_uuid,
"log is not 'modified by' current user")
- assert_equal(current_api_client.andand.uuid, log.modified_by_client_uuid,
- "log is not 'modified by' current client")
assert_equal(thing.uuid, log.object_uuid, "log UUID mismatch")
assert_equal(event_type.to_s, log.event_type, "log event type mismatch")
time_method, old_props_test, new_props_test = EVENT_TEST_METHODS[event_type]
@@ -106,10 +108,11 @@ class LogTest < ActiveSupport::TestCase
test "old_attributes preserves values deep inside a hash" do
set_user_from_auth :active
- it = specimens(:owned_by_active_user)
+ it = collections(:collection_owned_by_active)
+ clear_logs_about it
it.properties = {'foo' => {'bar' => ['baz', 'qux', {'quux' => 'bleat'}]}}
it.save!
- @log_count += 1
+ assert_logged it, :update
it.properties['foo']['bar'][2]['quux'] = 'blert'
it.save!
assert_logged it, :update do |props|
@@ -132,21 +135,6 @@ class LogTest < ActiveSupport::TestCase
end
end
- test "saving an unchanged client still makes a log" do
- set_user_from_auth :admin_trustedclient
- client = api_clients(:untrusted)
- client.is_trusted = client.is_trusted
- client.save!
- assert_logged(client, :update) do |props|
- ['old', 'new'].each do |age|
- assert_equal(client.etag, props["#{age}_etag"],
- "unchanged client #{age} etag mismatch")
- assert_equal(client.attributes, props["#{age}_attributes"],
- "unchanged client #{age} attributes mismatch")
- end
- end
- end
-
test "updating a group twice makes two logs" do
set_user_from_auth :admin_trustedclient
group = groups(:empty_lonely_group)
@@ -231,6 +219,7 @@ class LogTest < ActiveSupport::TestCase
test "don't log changes only to Collection.preserve_version" do
set_user_from_auth :admin_trustedclient
col = collections(:collection_owned_by_active)
+ clear_logs_about col
start_log_count = get_logs_about(col).size
assert_equal false, col.preserve_version
col.preserve_version = true
@@ -246,7 +235,6 @@ class LogTest < ActiveSupport::TestCase
set_user_from_auth :admin_trustedclient
auth = ApiClientAuthorization.new
auth.user = users(:spectator)
- auth.api_client = api_clients(:untrusted)
auth.save!
assert_logged_with_clean_properties(auth, :create, 'api_token')
auth.expires_at = Time.now
@@ -258,27 +246,29 @@ class LogTest < ActiveSupport::TestCase
test "use ownership and permission links to determine which logs a user can see" do
known_logs = [:noop,
- :admin_changes_repository2,
- :admin_changes_specimen,
+ :admin_changes_collection_owned_by_active,
+ :admin_changes_collection_owned_by_foo,
:system_adds_foo_file,
:system_adds_baz,
:log_owned_by_active,
- :crunchstat_for_running_job]
+ :crunchstat_for_running_container]
c = Log.readable_by(users(:admin)).order("id asc").each.to_a
assert_log_result c, known_logs, known_logs
c = Log.readable_by(users(:active)).order("id asc").each.to_a
- assert_log_result c, known_logs, [:admin_changes_repository2, # owned by active
- :system_adds_foo_file, # readable via link
- :system_adds_baz, # readable via 'all users' group
- :log_owned_by_active, # log owned by active
- :crunchstat_for_running_job] # log & job owned by active
+ assert_log_result c, known_logs, [:admin_changes_collection_owned_by_active,
+ :system_adds_foo_file, # readable via link
+ :system_adds_baz, # readable via 'all users' group
+ :log_owned_by_active, # log owned by active
+ :crunchstat_for_running_container] # log & job owned by active
c = Log.readable_by(users(:spectator)).order("id asc").each.to_a
- assert_log_result c, known_logs, [:noop, # object_uuid is spectator
- :admin_changes_specimen, # object_uuid is a specimen owned by spectator
- :system_adds_baz] # readable via 'all users' group
+ assert_log_result c, known_logs, [:noop, # object_uuid is spectator
+ :system_adds_baz] # readable via 'all users' group
+
+ c = Log.readable_by(users(:user_foo_in_sharing_group)).order("id asc").each.to_a
+ assert_log_result c, known_logs, [:admin_changes_collection_owned_by_foo] # collection's parent is readable via role group
end
def assert_log_result result, known_logs, expected_logs
diff --git a/services/api/test/unit/owner_test.rb b/services/api/test/unit/owner_test.rb
index 1c1bd93b81..a96170c716 100644
--- a/services/api/test/unit/owner_test.rb
+++ b/services/api/test/unit/owner_test.rb
@@ -11,7 +11,7 @@ require 'test_helper'
# "i" is an item.
class OwnerTest < ActiveSupport::TestCase
- fixtures :users, :groups, :specimens
+ fixtures :users, :groups
setup do
set_user_from_auth :admin_trustedclient
@@ -26,22 +26,22 @@ class OwnerTest < ActiveSupport::TestCase
else
o = o_class.create!
end
- i = Specimen.create(owner_uuid: o.uuid)
+ i = Collection.create(owner_uuid: o.uuid)
assert i.valid?, "new item should pass validation"
assert i.uuid, "new item should have an ID"
- assert Specimen.where(uuid: i.uuid).any?, "new item should really be in DB"
+ assert Collection.where(uuid: i.uuid).any?, "new item should really be in DB"
end
test "create object with non-existent #{o_class} owner" do
assert_raises(ActiveRecord::RecordInvalid,
"create should fail with random owner_uuid") do
- Specimen.create!(owner_uuid: o_class.generate_uuid)
+ Collection.create!(owner_uuid: o_class.generate_uuid)
end
- i = Specimen.create(owner_uuid: o_class.generate_uuid)
+ i = Collection.create(owner_uuid: o_class.generate_uuid)
assert !i.valid?, "object with random owner_uuid should not be valid?"
- i = Specimen.new(owner_uuid: o_class.generate_uuid)
+ i = Collection.new(owner_uuid: o_class.generate_uuid)
assert !i.valid?, "new item should not pass validation"
assert !i.uuid, "new item should not have an ID"
end
@@ -53,7 +53,7 @@ class OwnerTest < ActiveSupport::TestCase
else
o_class.create!
end
- i = Specimen.create!(owner_uuid: o.uuid)
+ i = Collection.create!(owner_uuid: o.uuid)
new_o = if new_o_class == Group
new_o_class.create! group_class: "project"
@@ -61,7 +61,7 @@ class OwnerTest < ActiveSupport::TestCase
new_o_class.create!
end
- assert(Specimen.where(uuid: i.uuid).any?,
+ assert(Collection.where(uuid: i.uuid).any?,
"new item should really be in DB")
assert(i.update(owner_uuid: new_o.uuid),
"should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
@@ -102,7 +102,7 @@ class OwnerTest < ActiveSupport::TestCase
['users(:active)', 'groups(:aproject)'].each do |ofixt|
test "delete #{ofixt} that owns other objects" do
o = eval ofixt
- assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ assert_equal(true, Collection.where(owner_uuid: o.uuid).any?,
"need something to be owned by #{o.uuid} for this test")
skip_check_permissions_against_full_refresh do
@@ -115,7 +115,7 @@ class OwnerTest < ActiveSupport::TestCase
test "change uuid of #{ofixt} that owns other objects" do
o = eval ofixt
- assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ assert_equal(true, Collection.where(owner_uuid: o.uuid).any?,
"need something to be owned by #{o.uuid} for this test")
new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
assert(!o.update(uuid: new_uuid),
diff --git a/services/api/test/unit/permission_test.rb b/services/api/test/unit/permission_test.rb
index 14c810d81a..0196c5ccbe 100644
--- a/services/api/test/unit/permission_test.rb
+++ b/services/api/test/unit/permission_test.rb
@@ -222,7 +222,7 @@ class PermissionTest < ActiveSupport::TestCase
Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
manager = create :active_user, first_name: "Manage", last_name: "Er"
minion = create :active_user, first_name: "Min", last_name: "Ion"
- minions_specimen = act_as_user minion do
+ minions_collection = act_as_user minion do
g = Group.create! name: "minon project", group_class: "project"
Collection.create! owner_uuid: g.uuid
end
@@ -289,11 +289,11 @@ class PermissionTest < ActiveSupport::TestCase
end
assert_empty(Collection
.readable_by(manager)
- .where(uuid: minions_specimen.uuid),
+ .where(uuid: minions_collection.uuid),
"manager saw the minion's private stuff")
assert_raises(ArvadosModel::PermissionDeniedError,
"manager could update minion's private stuff") do
- minions_specimen.update(properties: {'x' => 'y'})
+ minions_collection.update(properties: {'x' => 'y'})
end
end
@@ -307,11 +307,11 @@ class PermissionTest < ActiveSupport::TestCase
# Now, manager can read and write Minion's stuff.
assert_not_empty(Collection
.readable_by(manager)
- .where(uuid: minions_specimen.uuid),
- "manager could not find minion's specimen by uuid")
+ .where(uuid: minions_collection.uuid),
+ "manager could not find minion's collection by uuid")
assert_equal(true,
- minions_specimen.update(properties: {'x' => 'y'}),
- "manager could not update minion's specimen object")
+ minions_collection.update(properties: {'x' => 'y'}),
+ "manager could not update minion's collection object")
end
end
@@ -341,12 +341,12 @@ class PermissionTest < ActiveSupport::TestCase
assert_not_empty(User.readable_by(a).where(uuid: b.uuid),
"#{a.first_name} should be able to see 'b' in the user list")
- a_specimen = act_as_user a do
+ a_collection = act_as_user a do
Collection.create!
end
- assert_not_empty(Collection.readable_by(a).where(uuid: a_specimen.uuid),
+ assert_not_empty(Collection.readable_by(a).where(uuid: a_collection.uuid),
"A cannot read own Collection, following test probably useless.")
- assert_empty(Collection.readable_by(b).where(uuid: a_specimen.uuid),
+ assert_empty(Collection.readable_by(b).where(uuid: a_collection.uuid),
"B can read A's Collection")
[a,b].each do |u|
assert_empty(User.readable_by(u).where(uuid: other.uuid),
@@ -407,7 +407,7 @@ class PermissionTest < ActiveSupport::TestCase
end
test "container logs created by dispatch are visible to container requestor" do
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
Log.create!(object_uuid: containers(:running).uuid,
event_type: "test")
@@ -417,7 +417,7 @@ class PermissionTest < ActiveSupport::TestCase
end
test "container logs created by dispatch are public if container request is public" do
- set_user_from_auth :dispatch1
+ set_user_from_auth :system_user
Log.create!(object_uuid: containers(:running_older).uuid,
event_type: "test")
diff --git a/services/api/test/unit/pipeline_instance_test.rb b/services/api/test/unit/pipeline_instance_test.rb
deleted file mode 100644
index 614c169e85..0000000000
--- a/services/api/test/unit/pipeline_instance_test.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class PipelineInstanceTest < ActiveSupport::TestCase
-
- [:has_component_with_no_script_parameters,
- :has_component_with_empty_script_parameters].each do |pi_name|
- test "update pipeline that #{pi_name}" do
- pi = pipeline_instances pi_name
-
- Thread.current[:user] = users(:active)
- assert_equal PipelineInstance::Ready, pi.state
- end
- end
-end
diff --git a/services/api/test/unit/pipeline_template_test.rb b/services/api/test/unit/pipeline_template_test.rb
deleted file mode 100644
index 8ead613b80..0000000000
--- a/services/api/test/unit/pipeline_template_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class PipelineTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
-end
diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb
deleted file mode 100644
index 674a34ffd8..0000000000
--- a/services/api/test/unit/repository_test.rb
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'helpers/git_test_helper'
-
-class RepositoryTest < ActiveSupport::TestCase
- include GitTestHelper
-
- def new_repo(owner_key, attrs={})
- set_user_from_auth owner_key
- owner = users(owner_key)
- Repository.new({owner_uuid: owner.uuid}.merge(attrs))
- end
-
- def changed_repo(repo_key, changes)
- repo = repositories(repo_key)
- changes.each_pair { |attr, value| repo.send("#{attr}=".to_sym, value) }
- repo
- end
-
- def default_git_url(repo_name, user_name=nil)
- if user_name
- "git@git.%s.arvadosapi.com:%s/%s.git" %
- [Rails.configuration.ClusterID, user_name, repo_name]
- else
- "git@git.%s.arvadosapi.com:%s.git" %
- [Rails.configuration.ClusterID, repo_name]
- end
- end
-
- def assert_server_path(path_tail, repo_sym)
- assert_equal(File.join(Rails.configuration.Git.Repositories, path_tail),
- repositories(repo_sym).server_path)
- end
-
- ### name validation
-
- {active: "active/", admin: "admin/", system_user: ""}.
- each_pair do |user_sym, name_prefix|
- test "valid names for #{user_sym} repo" do
- %w(a aa a0 aA Aa AA A0).each do |name|
- repo = new_repo(user_sym, name: name_prefix + name)
- assert(repo.valid?)
- end
- end
-
- test "name is required for #{user_sym} repo" do
- refute(new_repo(user_sym).valid?)
- end
-
- test "repo name beginning with numeral is invalid for #{user_sym}" do
- repo = new_repo(user_sym, name: "#{name_prefix}0a")
- refute(repo.valid?)
- end
-
- test "name containing bad char is invalid for #{user_sym}" do
- "\\.-_/!@#$%^&*()[]{}".each_char do |bad_char|
- repo = new_repo(user_sym, name: "#{name_prefix}bad#{bad_char}reponame")
- refute(repo.valid?)
- end
- end
- end
-
- test "admin can create valid repo for other user with correct name prefix" do
- owner = users(:active)
- repo = new_repo(:admin, name: "#{owner.username}/validnametest",
- owner_uuid: owner.uuid)
- assert(repo.valid?)
- end
-
- test "admin can create valid system repo without name prefix" do
- repo = new_repo(:admin, name: "validnametest",
- owner_uuid: users(:system_user).uuid)
- assert(repo.valid?)
- end
-
- test "repo name prefix must match owner_uuid username" do
- repo = new_repo(:admin, name: "admin/badusernametest",
- owner_uuid: users(:active).uuid)
- refute(repo.valid?)
- end
-
- test "repo name prefix must be empty for system repo" do
- repo = new_repo(:admin, name: "root/badprefixtest",
- owner_uuid: users(:system_user).uuid)
- refute(repo.valid?)
- end
-
- ### owner validation
-
- test "name must be unique per user" do
- repo = new_repo(:active, name: repositories(:foo).name)
- refute(repo.valid?)
- end
-
- test "name can be duplicated across users" do
- repo = new_repo(:active, name: "active/#{repositories(:arvados).name}")
- assert(repo.valid?)
- end
-
- test "repository cannot be owned by a group" do
- set_user_from_auth :active
- repo = Repository.new(owner_uuid: groups(:all_users).uuid,
- name: "ownedbygroup")
- refute(repo.valid?)
- refute_empty(repo.errors[:owner_uuid] || [])
- end
-
- ### URL generation
-
- test "fetch_url" do
- repo = new_repo(:active, name: "active/fetchtest")
- repo.save
- assert_equal(default_git_url("fetchtest", "active"), repo.fetch_url)
- end
-
- test "fetch_url owned by system user" do
- set_user_from_auth :admin
- repo = Repository.new(owner_uuid: users(:system_user).uuid,
- name: "fetchtest")
- repo.save
- assert_equal(default_git_url("fetchtest"), repo.fetch_url)
- end
-
- test "push_url" do
- repo = new_repo(:active, name: "active/pushtest")
- repo.save
- assert_equal(default_git_url("pushtest", "active"), repo.push_url)
- end
-
- test "push_url owned by system user" do
- set_user_from_auth :admin
- repo = Repository.new(owner_uuid: users(:system_user).uuid,
- name: "pushtest")
- repo.save
- assert_equal(default_git_url("pushtest"), repo.push_url)
- end
-
- ### Path generation
-
- test "disk path stored by UUID" do
- assert_server_path("zzzzz-s0uqq-382brsig8rp3666/.git", :foo)
- end
-
- test "disk path stored by name" do
- assert_server_path("arvados/.git", :arvados)
- end
-
- test "disk path for repository not on disk" do
- assert_nil(Repository.new.server_path)
- end
-
- ### Repository creation
-
- test "non-admin can create a repository for themselves" do
- repo = new_repo(:active, name: "active/newtestrepo")
- assert(repo.save)
- end
-
- test "non-admin can't create a repository for another visible user" do
- repo = new_repo(:active, name: "repoforanon",
- owner_uuid: users(:anonymous).uuid)
- assert_not_allowed { repo.save }
- end
-
- test "admin can create a repository for themselves" do
- repo = new_repo(:admin, name: "admin/newtestrepo")
- assert(repo.save)
- end
-
- test "admin can create a repository for others" do
- repo = new_repo(:admin, name: "active/repoforactive",
- owner_uuid: users(:active).uuid)
- assert(repo.save)
- end
-
- test "admin can create a system repository" do
- repo = new_repo(:admin, name: "repoforsystem",
- owner_uuid: users(:system_user).uuid)
- assert(repo.save)
- end
-
- ### Repository destruction
-
- test "non-admin can destroy their own repository" do
- set_user_from_auth :active
- assert(repositories(:foo).destroy)
- end
-
- test "non-admin can't destroy others' repository" do
- set_user_from_auth :active
- assert_not_allowed { repositories(:repository3).destroy }
- end
-
- test "non-admin can't destroy system repository" do
- set_user_from_auth :active
- assert_not_allowed { repositories(:arvados).destroy }
- end
-
- test "admin can destroy their own repository" do
- set_user_from_auth :admin
- assert(repositories(:repository3).destroy)
- end
-
- test "admin can destroy others' repository" do
- set_user_from_auth :admin
- assert(repositories(:foo).destroy)
- end
-
- test "admin can destroy system repository" do
- set_user_from_auth :admin
- assert(repositories(:arvados).destroy)
- end
-
- ### Changing ownership
-
- test "non-admin can't make their repository a system repository" do
- set_user_from_auth :active
- repo = changed_repo(:foo, owner_uuid: users(:system_user).uuid)
- assert_not_allowed { repo.save }
- end
-
- test "admin can give their repository to someone else" do
- set_user_from_auth :admin
- repo = changed_repo(:repository3, owner_uuid: users(:active).uuid,
- name: "active/foo3")
- assert(repo.save)
- end
-
- test "admin can make their repository a system repository" do
- set_user_from_auth :admin
- repo = changed_repo(:repository3, owner_uuid: users(:system_user).uuid,
- name: "foo3")
- assert(repo.save)
- end
-
- test 'write permission allows changing modified_at' do
- act_as_user users(:active) do
- r = repositories(:foo)
- modtime_was = r.modified_at
- r.modified_at = Time.now
- assert r.save
- assert_operator modtime_was, :<, r.modified_at
- end
- end
-
- test 'write permission necessary for changing modified_at' do
- act_as_user users(:spectator) do
- r = repositories(:foo)
- modtime_was = r.modified_at
- r.modified_at = Time.now
- assert_raises ArvadosModel::PermissionDeniedError do
- r.save!
- end
- r.reload
- assert_equal modtime_was, r.modified_at
- end
- end
-
- ### Renaming
-
- test "non-admin can rename own repo" do
- act_as_user users(:active) do
- assert repositories(:foo).update(name: 'active/foo12345')
- end
- end
-
- test "top level repo can be touched by non-admin with can_manage" do
- add_permission_link users(:active), repositories(:arvados), 'can_manage'
- act_as_user users(:active) do
- assert changed_repo(:arvados, modified_at: Time.now).save
- end
- end
-
- test "top level repo cannot be renamed by non-admin with can_manage" do
- add_permission_link users(:active), repositories(:arvados), 'can_manage'
- act_as_user users(:active) do
- assert_not_allowed { changed_repo(:arvados, name: 'xarvados').save }
- end
- end
-end
diff --git a/services/api/test/unit/specimen_test.rb b/services/api/test/unit/specimen_test.rb
deleted file mode 100644
index 5b2eda2f0f..0000000000
--- a/services/api/test/unit/specimen_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class SpecimenTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
-end
diff --git a/services/api/test/unit/trait_test.rb b/services/api/test/unit/trait_test.rb
deleted file mode 100644
index fe63f161f8..0000000000
--- a/services/api/test/unit/trait_test.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-
-class TraitTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
-end
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
index 810e5b45ec..71b5769be8 100644
--- a/services/api/test/unit/user_test.rb
+++ b/services/api/test/unit/user_test.rb
@@ -118,18 +118,7 @@ class UserTest < ActiveSupport::TestCase
check_new_username_setting("_", nil)
end
- test "updating username updates repository names" do
- set_user_from_auth :admin
- user = users(:active)
- user.username = "newtestname"
- assert(user.save, "username update failed")
- {foo: "newtestname/foo", repository2: "newtestname/foo2"}.
- each_pair do |repo_sym, expect_name|
- assert_equal(expect_name, repositories(repo_sym).name)
- end
- end
-
- test "admin can clear username when user owns no repositories" do
+ test "admin can clear username" do
set_user_from_auth :admin
user = users(:spectator)
user.username = nil
@@ -137,22 +126,6 @@ class UserTest < ActiveSupport::TestCase
assert_nil(user.username)
end
- test "admin can't clear username when user owns repositories" do
- set_user_from_auth :admin
- user = users(:active)
- user.username = nil
- assert_not_allowed { user.save }
- refute_empty(user.errors[:username])
- end
-
- test "failed username update doesn't change repository names" do
- set_user_from_auth :admin
- user = users(:active)
- user.username = users(:fuse).username
- assert_not_allowed { user.save }
- assert_equal("active/foo", repositories(:foo).name)
- end
-
[[false, 'foo@example.com', true, false],
[false, 'bar@example.com', false, true],
[true, 'foo@example.com', true, false],
@@ -359,37 +332,33 @@ class UserTest < ActiveSupport::TestCase
[
# Easy inactive user tests.
- [false, empty_notify_list, empty_notify_list, "inactive-none@example.com", false, false, "inactivenone"],
- [false, empty_notify_list, empty_notify_list, "inactive-vm@example.com", true, false, "inactivevm"],
- [false, empty_notify_list, empty_notify_list, "inactive-repo@example.com", false, true, "inactiverepo"],
- [false, empty_notify_list, empty_notify_list, "inactive-both@example.com", true, true, "inactiveboth"],
+ [false, empty_notify_list, empty_notify_list, "inactive-none@example.com", false, "inactivenone"],
+ [false, empty_notify_list, empty_notify_list, "inactive-vm@example.com", true, "inactivevm"],
# Easy active user tests.
- [true, active_notify_list, inactive_notify_list, "active-none@example.com", false, false, "activenone"],
- [true, active_notify_list, inactive_notify_list, "active-vm@example.com", true, false, "activevm"],
- [true, active_notify_list, inactive_notify_list, "active-repo@example.com", false, true, "activerepo"],
- [true, active_notify_list, inactive_notify_list, "active-both@example.com", true, true, "activeboth"],
+ [true, active_notify_list, inactive_notify_list, "active-none@example.com", false, "activenone"],
+ [true, active_notify_list, inactive_notify_list, "active-vm@example.com", true, "activevm"],
# Test users with malformed e-mail addresses.
- [false, empty_notify_list, empty_notify_list, nil, true, true, nil],
- [false, empty_notify_list, empty_notify_list, "arvados", true, true, nil],
- [false, empty_notify_list, empty_notify_list, "@example.com", true, true, nil],
- [true, active_notify_list, inactive_notify_list, "*!*@example.com", true, false, nil],
- [true, active_notify_list, inactive_notify_list, "*!*@example.com", false, false, nil],
+ [false, empty_notify_list, empty_notify_list, nil, true, nil],
+ [false, empty_notify_list, empty_notify_list, "arvados", true, nil],
+ [false, empty_notify_list, empty_notify_list, "@example.com", true, nil],
+ [true, active_notify_list, inactive_notify_list, "*!*@example.com", true, nil],
+ [true, active_notify_list, inactive_notify_list, "*!*@example.com", false, nil],
# Test users with various username transformations.
- [false, empty_notify_list, empty_notify_list, "arvados@example.com", false, false, "arvados2"],
- [true, active_notify_list, inactive_notify_list, "arvados@example.com", false, false, "arvados2"],
- [true, active_notify_list, inactive_notify_list, "root@example.com", true, false, "root2"],
- [false, active_notify_list, empty_notify_list, "root@example.com", true, false, "root2"],
- [true, active_notify_list, inactive_notify_list, "roo_t@example.com", false, true, "root2"],
- [false, empty_notify_list, empty_notify_list, "^^incorrect_format@example.com", true, true, "incorrectformat"],
- [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
- [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
- [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
- [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
- ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
- test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm} repo=#{auto_setup_repo}" do
+ [false, empty_notify_list, empty_notify_list, "arvados@example.com", false, "arvados2"],
+ [true, active_notify_list, inactive_notify_list, "arvados@example.com", false, "arvados2"],
+ [true, active_notify_list, inactive_notify_list, "root@example.com", true, "root2"],
+ [false, active_notify_list, empty_notify_list, "root@example.com", true, "root2"],
+ [true, active_notify_list, inactive_notify_list, "roo_t@example.com", false, "root2"],
+ [false, empty_notify_list, empty_notify_list, "^^incorrect_format@example.com", true, "incorrectformat"],
+ [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, "ad9"],
+ [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, "ad9"],
+ [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", true, "ad9"],
+ [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", false, "ad9"],
+ ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, expect_username|
+ test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm}" do
set_user_from_auth :admin
Rails.configuration.Users.AutoSetupNewUsers = true
@@ -400,8 +369,6 @@ class UserTest < ActiveSupport::TestCase
Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = ""
end
- Rails.configuration.Users.AutoSetupNewUsersWithRepository = auto_setup_repo
-
create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username
end
end
@@ -460,8 +427,7 @@ class UserTest < ActiveSupport::TestCase
vm = VirtualMachine.create
- response = user.setup(repo_name: 'foo/testrepo',
- vm_uuid: vm.uuid)
+ response = user.setup(vm_uuid: vm.uuid)
resp_user = find_obj_in_resp response, 'User'
verify_user resp_user, email
@@ -476,9 +442,6 @@ class UserTest < ActiveSupport::TestCase
assert_nil group_perm2
end
- repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
- verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
-
vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
assert_equal("foo", vm_perm.properties["username"])
@@ -494,8 +457,7 @@ class UserTest < ActiveSupport::TestCase
vm = VirtualMachine.create
- response = user.setup(repo_name: 'foo/testrepo',
- vm_uuid: vm.uuid)
+ response = user.setup(vm_uuid: vm.uuid)
resp_user = find_obj_in_resp response, 'User'
verify_user resp_user, email
@@ -503,9 +465,6 @@ class UserTest < ActiveSupport::TestCase
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
- repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
- verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
-
vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
assert_equal("foo", vm_perm.properties["username"])
@@ -529,23 +488,10 @@ class UserTest < ActiveSupport::TestCase
group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
- # invoke setup again with repo_name
- response = user.setup(repo_name: 'foo/testrepo')
- resp_user = find_obj_in_resp response, 'User', nil
- verify_user resp_user, email
- assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
-
- group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
-
- repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
- verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
-
# invoke setup again with a vm_uuid
vm = VirtualMachine.create
- response = user.setup(repo_name: 'foo/testrepo',
- vm_uuid: vm.uuid)
+ response = user.setup(vm_uuid: vm.uuid)
resp_user = find_obj_in_resp response, 'User', nil
verify_user resp_user, email
@@ -554,9 +500,6 @@ class UserTest < ActiveSupport::TestCase
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
- repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
- verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
-
vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
assert_equal("foo", vm_perm.properties["username"])
@@ -614,8 +557,6 @@ class UserTest < ActiveSupport::TestCase
can_setup = (Rails.configuration.Users.AutoSetupNewUsers and
(not expect_username.nil?))
- expect_repo_name = "#{expect_username}/#{expect_username}"
- prior_repo = Repository.where(name: expect_repo_name).first
user = User.new
user.first_name = "first_name_for_newly_created_user"
@@ -629,14 +570,6 @@ class UserTest < ActiveSupport::TestCase
groups(:all_users).uuid, user.uuid,
"permission", "can_write")
- # Check for repository.
- if named_repo = (prior_repo or
- Repository.where(name: expect_repo_name).first)
- verify_link_exists((can_setup and prior_repo.nil? and
- Rails.configuration.Users.AutoSetupNewUsersWithRepository),
- named_repo.uuid, user.uuid, "permission", "can_manage")
- end
-
# Check for VM login.
if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.go b/services/crunch-dispatch-local/crunch-dispatch-local.go
index e455981891..604ae31fce 100644
--- a/services/crunch-dispatch-local/crunch-dispatch-local.go
+++ b/services/crunch-dispatch-local/crunch-dispatch-local.go
@@ -8,11 +8,15 @@ package main
import (
"context"
+ "crypto/hmac"
+ "crypto/sha256"
"flag"
"fmt"
"os"
"os/exec"
"os/signal"
+ "runtime"
+ "strings"
"sync"
"syscall"
"time"
@@ -22,6 +26,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/dispatch"
+ "github.com/pbnjay/memory"
"github.com/sirupsen/logrus"
)
@@ -52,7 +57,7 @@ func main() {
flags.StringVar(&crunchRunCommand,
"crunch-run-command",
- "/usr/bin/crunch-run",
+ "",
"Crunch command to run container")
getVersion := flags.Bool(
@@ -82,6 +87,10 @@ func main() {
os.Exit(1)
}
+ if crunchRunCommand == "" {
+ crunchRunCommand = cluster.Containers.CrunchRunCommand
+ }
+
logger := baseLogger.WithField("ClusterID", cluster.ClusterID)
logger.Printf("crunch-dispatch-local %s started", version)
@@ -115,10 +124,14 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
+ localRun := LocalRun{startFunc, make(chan ResourceRequest), make(chan ResourceAlloc), ctx, cluster}
+
+ go localRun.throttle(logger)
+
dispatcher := dispatch.Dispatcher{
Logger: logger,
Arv: arv,
- RunContainer: (&LocalRun{startFunc, make(chan bool, 8), ctx, cluster}).run,
+ RunContainer: localRun.run,
PollPeriod: time.Duration(*pollInterval) * time.Second,
}
@@ -151,13 +164,136 @@ func startFunc(container arvados.Container, cmd *exec.Cmd) error {
return cmd.Start()
}
+type ResourceAlloc struct {
+ uuid string
+ vcpus int
+ ram int64
+ gpuStack string
+ gpus []string
+}
+
+type ResourceRequest struct {
+ uuid string
+ vcpus int
+ ram int64
+ gpuStack string
+ gpus int
+ ready chan ResourceAlloc
+}
+
type LocalRun struct {
startCmd func(container arvados.Container, cmd *exec.Cmd) error
- concurrencyLimit chan bool
+ requestResources chan ResourceRequest
+ releaseResources chan ResourceAlloc
ctx context.Context
cluster *arvados.Cluster
}
+func (lr *LocalRun) throttle(logger logrus.FieldLogger) {
+ maxVcpus := runtime.NumCPU()
+ var maxRam int64 = int64(memory.TotalMemory())
+
+ logger.Infof("AMD_VISIBLE_DEVICES=%v", os.Getenv("AMD_VISIBLE_DEVICES"))
+ logger.Infof("CUDA_VISIBLE_DEVICES=%v", os.Getenv("CUDA_VISIBLE_DEVICES"))
+
+ availableCUDAGpus := strings.Split(os.Getenv("CUDA_VISIBLE_DEVICES"), ",")
+ availableROCmGpus := strings.Split(os.Getenv("AMD_VISIBLE_DEVICES"), ",")
+
+ gpuStack := ""
+ maxGpus := 0
+ availableGpus := []string{}
+
+ if maxGpus = len(availableCUDAGpus); maxGpus > 0 && availableCUDAGpus[0] != "" {
+ gpuStack = "cuda"
+ availableGpus = availableCUDAGpus
+ } else if maxGpus = len(availableROCmGpus); maxGpus > 0 && availableROCmGpus[0] != "" {
+ gpuStack = "rocm"
+ availableGpus = availableROCmGpus
+ }
+
+ availableVcpus := maxVcpus
+ availableRam := maxRam
+
+ pending := []ResourceRequest{}
+
+NextEvent:
+ for {
+ select {
+ case rr := <-lr.requestResources:
+ pending = append(pending, rr)
+
+ case rr := <-lr.releaseResources:
+ availableVcpus += rr.vcpus
+ availableRam += rr.ram
+ for _, gpu := range rr.gpus {
+ availableGpus = append(availableGpus, gpu)
+ }
+
+ logger.Infof("%v released allocation (cpus: %v ram: %v gpus: %v); now available (cpus: %v ram: %v gpus: %v)",
+ rr.uuid, rr.vcpus, rr.ram, rr.gpus,
+ availableVcpus, availableRam, availableGpus)
+
+ case <-lr.ctx.Done():
+ return
+ }
+
+ for len(pending) > 0 {
+ rr := pending[0]
+ if rr.vcpus < 1 || rr.vcpus > maxVcpus {
+ logger.Infof("%v requested vcpus %v but maxVcpus is %v", rr.uuid, rr.vcpus, maxVcpus)
+ // resource request can never be fulfilled,
+ // return a zero struct
+ rr.ready <- ResourceAlloc{}
+ continue
+ }
+ if rr.ram < 1 || rr.ram > maxRam {
+ logger.Infof("%v requested ram %v but maxRam is %v", rr.uuid, rr.ram, maxRam)
+ // resource request can never be fulfilled,
+ // return a zero struct
+ rr.ready <- ResourceAlloc{}
+ continue
+ }
+ if rr.gpus > maxGpus || (rr.gpus > 0 && rr.gpuStack != gpuStack) {
+ logger.Infof("%v requested %v gpus with stack %v but maxGpus is %v and gpuStack is %q", rr.uuid, rr.gpus, rr.gpuStack, maxGpus, gpuStack)
+ // resource request can never be fulfilled,
+ // return a zero struct
+ rr.ready <- ResourceAlloc{}
+ continue
+ }
+
+ if rr.vcpus > availableVcpus || rr.ram > availableRam || rr.gpus > len(availableGpus) {
+ logger.Infof("Insufficient resources to start %v, waiting for next event", rr.uuid)
+ // can't be scheduled yet, go up to
+ // the top and wait for the next event
+ continue NextEvent
+ }
+
+ alloc := ResourceAlloc{uuid: rr.uuid, vcpus: rr.vcpus, ram: rr.ram}
+
+ availableVcpus -= rr.vcpus
+ availableRam -= rr.ram
+ alloc.gpuStack = rr.gpuStack
+
+ for i := 0; i < rr.gpus; i++ {
+ alloc.gpus = append(alloc.gpus, availableGpus[len(availableGpus)-1])
+ availableGpus = availableGpus[0 : len(availableGpus)-1]
+ }
+ rr.ready <- alloc
+
+ logger.Infof("%v added allocation (cpus: %v ram: %v gpus: %v); now available (cpus: %v ram: %v gpus: %v)",
+ rr.uuid, rr.vcpus, rr.ram, rr.gpus,
+ availableVcpus, availableRam, availableGpus)
+
+ // shift array down
+ for i := 0; i < len(pending)-1; i++ {
+ pending[i] = pending[i+1]
+ }
+ pending = pending[0 : len(pending)-1]
+ }
+
+ }
+}
+
// Run a container.
//
// If the container is Locked, start a new crunch-run process and wait until
@@ -174,14 +310,42 @@ func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
if container.State == dispatch.Locked {
+ gpuStack := container.RuntimeConstraints.GPU.Stack
+ gpus := container.RuntimeConstraints.GPU.DeviceCount
+
+ resourceRequest := ResourceRequest{
+ uuid: container.UUID,
+ vcpus: container.RuntimeConstraints.VCPUs,
+ ram: (container.RuntimeConstraints.RAM +
+ container.RuntimeConstraints.KeepCacheRAM +
+ int64(lr.cluster.Containers.ReserveExtraRAM)),
+ gpuStack: gpuStack,
+ gpus: gpus,
+ ready: make(chan ResourceAlloc)}
+
select {
- case lr.concurrencyLimit <- true:
+ case lr.requestResources <- resourceRequest:
break
case <-lr.ctx.Done():
return lr.ctx.Err()
}
- defer func() { <-lr.concurrencyLimit }()
+ var resourceAlloc ResourceAlloc
+ select {
+ case resourceAlloc = <-resourceRequest.ready:
+ case <-lr.ctx.Done():
+ return lr.ctx.Err()
+ }
+
+ if resourceAlloc.vcpus == 0 {
+ dispatcher.Logger.Warnf("Container resource request %v cannot be fulfilled.", uuid)
+ dispatcher.UpdateState(uuid, dispatch.Cancelled)
+ return nil
+ }
+
+ defer func() {
+ lr.releaseResources <- resourceAlloc
+ }()
select {
case c := <-status:
@@ -197,11 +361,31 @@ func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
waitGroup.Add(1)
defer waitGroup.Done()
- cmd := exec.Command(crunchRunCommand, "--runtime-engine="+lr.cluster.Containers.RuntimeEngine, uuid)
+ args := []string{"--runtime-engine=" + lr.cluster.Containers.RuntimeEngine}
+ args = append(args, lr.cluster.Containers.CrunchRunArgumentsList...)
+ args = append(args, uuid)
+
+ cmd := exec.Command(crunchRunCommand, args...)
cmd.Stdin = nil
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stderr
+ cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%v", os.Getenv("PATH")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("TMPDIR=%v", os.Getenv("TMPDIR")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("ARVADOS_API_HOST=%v", os.Getenv("ARVADOS_API_HOST")))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("ARVADOS_API_TOKEN=%v", os.Getenv("ARVADOS_API_TOKEN")))
+
+ h := hmac.New(sha256.New, []byte(lr.cluster.SystemRootToken))
+ fmt.Fprint(h, container.UUID)
+ cmd.Env = append(cmd.Env, fmt.Sprintf("GatewayAuthSecret=%x", h.Sum(nil)))
+
+ if resourceAlloc.gpuStack == "rocm" {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("AMD_VISIBLE_DEVICES=%v", strings.Join(resourceAlloc.gpus, ",")))
+ }
+ if resourceAlloc.gpuStack == "cuda" {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("CUDA_VISIBLE_DEVICES=%v", strings.Join(resourceAlloc.gpus, ",")))
+ }
+
dispatcher.Logger.Printf("starting container %v", uuid)
// Add this crunch job to the list of runningCmds only if we
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.service b/services/crunch-dispatch-local/crunch-dispatch-local.service
index b4fc10f83e..f40359de2e 100644
--- a/services/crunch-dispatch-local/crunch-dispatch-local.service
+++ b/services/crunch-dispatch-local/crunch-dispatch-local.service
@@ -11,11 +11,8 @@ StartLimitIntervalSec=0
Type=simple
EnvironmentFile=-/etc/arvados/crunch-dispatch-local-credentials
ExecStart=/usr/bin/crunch-dispatch-local -poll-interval=1 -crunch-run-command=/usr/bin/crunch-run
-# Set a reasonable default for the open file limit
-LimitNOFILE=65536
Restart=always
RestartSec=1
-LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local_test.go b/services/crunch-dispatch-local/crunch-dispatch-local_test.go
index e5ce5c66c5..b74ae00791 100644
--- a/services/crunch-dispatch-local/crunch-dispatch-local_test.go
+++ b/services/crunch-dispatch-local/crunch-dispatch-local_test.go
@@ -21,6 +21,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/dispatch"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
)
@@ -82,7 +83,9 @@ func (s *TestSuite) TestIntegration(c *C) {
dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) error {
defer cancel()
- return (&LocalRun{startCmd, make(chan bool, 8), ctx, &cl}).run(d, c, s)
+ lr := LocalRun{startCmd, make(chan ResourceRequest), make(chan ResourceAlloc), ctx, &cl}
+ go lr.throttle(logrus.StandardLogger())
+ return lr.run(d, c, s)
}
err = dispatcher.Run(ctx)
@@ -127,11 +130,34 @@ func (s *MockArvadosServerSuite) Test_APIErrorUpdatingContainerState(c *C) {
func (s *MockArvadosServerSuite) Test_ContainerStillInRunningAfterRun(c *C) {
apiStubResponses := make(map[string]arvadostest.StubResponse)
apiStubResponses["/arvados/v1/containers"] =
- arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2","State":"Queued","Priority":1}]}`)}
+ arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{
+"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2",
+"state":"Queued",
+"priority":1,
+"runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1000000
+}}]}`)}
apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2/lock"] =
- arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Locked", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
+ arvadostest.StubResponse{200, string(`{
+"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2",
+"state":"Locked",
+"priority":1,
+"locked_by_uuid": "zzzzz-gj3su-000000000000000",
+"runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1000000
+}}`)}
apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2"] =
- arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Running", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
+ arvadostest.StubResponse{200, string(`{
+"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2",
+"state":"Running",
+"priority":1,
+"locked_by_uuid": "zzzzz-gj3su-000000000000000",
+"runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1000000
+}}`)}
testWithServerStub(c, apiStubResponses, "echo",
`after \\"echo\\" process termination, container state for zzzzz-dz642-xxxxxxxxxxxxxx2 is \\"Running\\"; updating it to \\"Cancelled\\"`)
@@ -140,17 +166,32 @@ func (s *MockArvadosServerSuite) Test_ContainerStillInRunningAfterRun(c *C) {
func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
apiStubResponses := make(map[string]arvadostest.StubResponse)
apiStubResponses["/arvados/v1/containers"] =
- arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3","State":"Queued","Priority":1}]}`)}
+ arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{
+"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3",
+"state":"Queued",
+"priority":1,
+"runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1000000
+}}]}`)}
apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3/lock"] =
- arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Locked", "priority":1}`)}
+ arvadostest.StubResponse{200, string(`{
+"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3",
+"state":"Locked",
+"priority":1,
+"runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1000000
+}
+}`)}
testWithServerStub(c, apiStubResponses, "nosuchcommand", `error starting \\"nosuchcommand\\" for zzzzz-dz642-xxxxxxxxxxxxxx3`)
}
func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
apiStubResponses["/arvados/v1/api_client_authorizations/current"] =
- arvadostest.StubResponse{200, string(`{"uuid": "` + arvadostest.Dispatch1AuthUUID + `", "api_token": "xyz"}`)}
+ arvadostest.StubResponse{200, string(`{"uuid": "zzzzz-gj3su-000000000000000", "api_token": "xyz"}`)}
apiStub := arvadostest.ServerStub{apiStubResponses}
@@ -186,10 +227,13 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
}
cl := arvados.Cluster{Containers: arvados.ContainersConfig{RuntimeEngine: "docker"}}
+ runningCmds = make(map[string]*exec.Cmd)
dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) error {
defer cancel()
- return (&LocalRun{startCmd, make(chan bool, 8), ctx, &cl}).run(d, c, s)
+ lr := LocalRun{startCmd, make(chan ResourceRequest), make(chan ResourceAlloc), ctx, &cl}
+ go lr.throttle(logrus.StandardLogger())
+ return lr.run(d, c, s)
}
re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
index fb433e65cd..057751aec0 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
@@ -48,7 +48,7 @@ type IntegrationSuite struct {
func (s *IntegrationSuite) SetUpTest(c *C) {
arvadostest.ResetEnv()
arvadostest.ResetDB(c)
- os.Setenv("ARVADOS_API_TOKEN", arvadostest.Dispatch1Token)
+ os.Setenv("ARVADOS_API_TOKEN", arvadostest.SystemRootToken)
s.disp = Dispatcher{}
s.disp.cluster = &arvados.Cluster{}
s.disp.setup()
@@ -249,7 +249,7 @@ func (s *StubbedSuite) SetUpTest(c *C) {
func (s *StubbedSuite) TestAPIErrorGettingContainers(c *C) {
apiStubResponses := make(map[string]arvadostest.StubResponse)
- apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
+ apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"zzzzz-gj3su-000000000000000"}`}
apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
s.testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
diff --git a/services/crunch/crunch-job b/services/crunch/crunch-job
deleted file mode 120000
index ff0e7022bf..0000000000
--- a/services/crunch/crunch-job
+++ /dev/null
@@ -1 +0,0 @@
-../../sdk/cli/bin/arv-crunch-job
\ No newline at end of file
diff --git a/services/dockercleaner/arvados_version.py b/services/dockercleaner/arvados_version.py
index 794b6afe42..cafc7391b4 100644
--- a/services/dockercleaner/arvados_version.py
+++ b/services/dockercleaner/arvados_version.py
@@ -26,6 +26,7 @@ PACKAGE_DEPENDENCY_MAP = {
'arvados-user-activity': ['arvados-python-client'],
'arvados_fuse': ['arvados-python-client'],
'crunchstat_summary': ['arvados-python-client'],
+ 'arvados_cluster_activity': ['arvados-python-client'],
}
PACKAGE_MODULE_MAP = {
'arvados-cwl-runner': 'arvados_cwl',
@@ -34,6 +35,7 @@ PACKAGE_MODULE_MAP = {
'arvados-user-activity': 'arvados_user_activity',
'arvados_fuse': 'arvados_fuse',
'crunchstat_summary': 'crunchstat_summary',
+ 'arvados_cluster_activity': 'arvados_cluster_activity',
}
PACKAGE_SRCPATH_MAP = {
'arvados-cwl-runner': Path('sdk', 'cwl'),
@@ -42,6 +44,7 @@ PACKAGE_SRCPATH_MAP = {
'arvados-user-activity': Path('tools', 'user-activity'),
'arvados_fuse': Path('services', 'fuse'),
'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+ 'arvados_cluster_activity': Path('tools', 'cluster-activity'),
}
ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -72,14 +75,6 @@ if REPO_PATH is None:
if (SETUP_DIR / mod_name).is_dir()
)
-def short_tests_only(arglist=sys.argv):
- try:
- arglist.remove('--short-tests-only')
- except ValueError:
- return False
- else:
- return True
-
def git_log_output(path, *args):
return subprocess.check_output(
['git', '-C', str(REPO_PATH),
@@ -120,7 +115,7 @@ def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
return read_version(setup_dir, module)
else:
version = git_version_at_commit()
- version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ version = version.replace("~dev", ".dev").replace("~rc", "rc").lstrip("development-")
save_version(setup_dir, module, version)
return version
diff --git a/services/dockercleaner/bin/arvados-docker-cleaner b/services/dockercleaner/bin/arvados-docker-cleaner
index b9dcd79500..abc723fcf3 100755
--- a/services/dockercleaner/bin/arvados-docker-cleaner
+++ b/services/dockercleaner/bin/arvados-docker-cleaner
@@ -3,7 +3,5 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import, print_function
-
from arvados_docker.cleaner import main
main()
diff --git a/services/dockercleaner/pytest.ini b/services/dockercleaner/pytest.ini
new file mode 120000
index 0000000000..05a82dbfef
--- /dev/null
+++ b/services/dockercleaner/pytest.ini
@@ -0,0 +1 @@
+../../sdk/python/pytest.ini
\ No newline at end of file
diff --git a/services/dockercleaner/setup.py b/services/dockercleaner/setup.py
index 9c69879b45..cb0fc8d88a 100644
--- a/services/dockercleaner/setup.py
+++ b/services/dockercleaner/setup.py
@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
import os
import sys
import re
@@ -12,7 +11,6 @@ from setuptools import setup, find_packages
import arvados_version
version = arvados_version.get_version()
-short_tests_only = arvados_version.short_tests_only()
README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
setup(name="arvados-docker-cleaner",
diff --git a/services/fuse/README.rst b/services/fuse/README.rst
index 12c6ae6ca1..01a6a9b098 100644
--- a/services/fuse/README.rst
+++ b/services/fuse/README.rst
@@ -45,24 +45,43 @@ You can test the change by running::
arv-mount --version
-Installing on Debian systems
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-1. Add this Arvados repository to your sources list::
-
- deb http://apt.arvados.org/buster buster main
-
-2. Update your package list.
-
-3. Install the ``python3-arvados-fuse`` package.
+Installing on Debian and Ubuntu systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Arvados publishes packages for Debian 11 "bullseye," Debian 12 "bookworm," Ubuntu 20.04 "focal," and Ubuntu 22.04 "jammy." You can install the Python SDK package on any of these distributions by running the following commands::
+
+ sudo install -d /etc/apt/keyrings
+ sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
+ sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null </dev/null <<'EOF'
+ [arvados]
+ name=Arvados
+ baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/
+ gpgcheck=1
+ gpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados
+ EOF
+ sudo dnf install python3-arvados-fuse
Configuration
-------------
This driver needs two pieces of information to connect to
Arvados: the DNS name of the API server, and an API authorization
-token. You can set these in environment variables, or the file
-``$HOME/.config/arvados/settings.conf``. `The Arvados user
+token. `The Arvados user
documentation
`_ describes
how to find this information in the Arvados Workbench, and install it
@@ -71,9 +90,9 @@ on your system.
Testing and Development
-----------------------
-Debian packages you need to build llfuse:
+Debian packages you need to build llfuse::
-$ apt-get install python-dev pkg-config libfuse-dev libattr1-dev
+ sudo apt install python-dev pkg-config libfuse-dev
This package is one part of the Arvados source package, and it has
integration tests to check interoperability with other Arvados
diff --git a/services/fuse/arvados_fuse/__init__.py b/services/fuse/arvados_fuse/__init__.py
index d827aefab7..3ef087a07b 100644
--- a/services/fuse/arvados_fuse/__init__.py
+++ b/services/fuse/arvados_fuse/__init__.py
@@ -54,11 +54,6 @@ inode assigned to it and appears in the Inodes._entries dictionary.
"""
-from __future__ import absolute_import
-from __future__ import division
-from builtins import next
-from builtins import str
-from builtins import object
import os
import llfuse
import errno
@@ -101,7 +96,7 @@ class Handle(object):
def release(self):
self.obj.dec_use()
- def flush(self):
+ def flush(self, force):
pass
@@ -109,10 +104,24 @@ class FileHandle(Handle):
"""Connects a numeric file handle to a File object that has
been opened by the client."""
- def flush(self):
- if self.obj.writable():
- return self.obj.flush()
+ def __init__(self, fh, obj, parent_obj, open_for_writing):
+ super(FileHandle, self).__init__(fh, obj)
+ self.parent_obj = parent_obj
+ if self.parent_obj is not None:
+ self.parent_obj.inc_use()
+ self.open_for_writing = open_for_writing
+ def release(self):
+ super(FileHandle, self).release()
+ if self.parent_obj is not None:
+ self.parent_obj.dec_use()
+
+ def flush(self, force):
+ if not self.open_for_writing and not force:
+ return
+ self.obj.flush()
+ if self.parent_obj is not None:
+ self.parent_obj.flush()
class DirectoryHandle(Handle):
"""Connects a numeric file handle to a Directory object that has
@@ -137,6 +146,9 @@ class DirectoryHandle(Handle):
ent[1].dec_use()
super(DirectoryHandle, self).release()
+ def flush(self, force):
+ self.obj.flush()
+
class InodeCache(object):
"""Records the memory footprint of objects and when they are last used.
@@ -304,7 +316,6 @@ class Inodes(object):
self._inode_remove_thread.daemon = True
self._inode_remove_thread.start()
- self.cap_cache_event = threading.Event()
self._by_uuid = collections.defaultdict(list)
def __getitem__(self, item):
@@ -334,8 +345,7 @@ class Inodes(object):
def cap_cache(self):
"""Notify the _inode_remove thread to recheck the cache."""
- if not self.cap_cache_event.is_set():
- self.cap_cache_event.set()
+ if self._inode_remove_queue.empty():
self._inode_remove_queue.put(EvictCandidates())
def update_uuid(self, entry):
@@ -390,35 +400,40 @@ class Inodes(object):
"""
locked_ops = collections.deque()
- while True:
+ shutting_down = False
+ while not shutting_down:
+ tasks_done = 0
blocking_get = True
while True:
try:
qentry = self._inode_remove_queue.get(blocking_get)
except queue.Empty:
break
+
blocking_get = False
if qentry is None:
- return
-
- if self._shutdown_started.is_set():
+ shutting_down = True
continue
- # Process this entry
- if qentry.inode_op(self, locked_ops):
- self._inode_remove_queue.task_done()
+ # Process (or defer) this entry
+ qentry.inode_op(self, locked_ops)
+ tasks_done += 1
# Give up the reference
qentry = None
with llfuse.lock:
while locked_ops:
- if locked_ops.popleft().inode_op(self, None):
- self._inode_remove_queue.task_done()
- self.cap_cache_event.clear()
+ locked_ops.popleft().inode_op(self, None)
for entry in self.inode_cache.evict_candidates():
self._remove(entry)
+ # Unblock _inode_remove_queue.join() only when all of the
+ # deferred work is done, i.e., after calling inode_op()
+ # and then evict_candidates().
+ for _ in range(tasks_done):
+ self._inode_remove_queue.task_done()
+
def wait_remove_queue_empty(self):
# used by tests
self._inode_remove_queue.join()
@@ -622,10 +637,10 @@ class Operations(llfuse.Operations):
self.num_retries = num_retries
- self.read_counter = arvados.keep.Counter()
- self.write_counter = arvados.keep.Counter()
- self.read_ops_counter = arvados.keep.Counter()
- self.write_ops_counter = arvados.keep.Counter()
+ self.read_counter = arvados.keep._Counter()
+ self.write_counter = arvados.keep._Counter()
+ self.read_ops_counter = arvados.keep._Counter()
+ self.write_ops_counter = arvados.keep._Counter()
self.events = None
@@ -837,11 +852,18 @@ class Operations(llfuse.Operations):
if isinstance(p, Directory):
raise llfuse.FUSEError(errno.EISDIR)
- if ((flags & os.O_WRONLY) or (flags & os.O_RDWR)) and not p.writable():
+ open_for_writing = (flags & os.O_WRONLY) or (flags & os.O_RDWR)
+ if open_for_writing and not p.writable():
raise llfuse.FUSEError(errno.EPERM)
fh = next(self._filehandles_counter)
- self._filehandles[fh] = FileHandle(fh, p)
+
+ if p.stale():
+ p.checkupdate()
+ self.inodes.invalidate_inode(p)
+
+ parent_inode = self.inodes[p.parent_inode] if p.parent_inode in self.inodes else None
+ self._filehandles[fh] = FileHandle(fh, p, parent_inode, open_for_writing)
self.inodes.touch(p)
# Normally, we will have received an "update" event if the
@@ -907,7 +929,7 @@ class Operations(llfuse.Operations):
if fh in self._filehandles:
_logger.debug("arv-mount release fh %i", fh)
try:
- self._filehandles[fh].flush()
+ self._filehandles[fh].flush(False)
except Exception:
raise
finally:
@@ -1011,7 +1033,7 @@ class Operations(llfuse.Operations):
# The file entry should have been implicitly created by callback.
f = p[name]
fh = next(self._filehandles_counter)
- self._filehandles[fh] = FileHandle(fh, f)
+ self._filehandles[fh] = FileHandle(fh, f, p, True)
self.inodes.touch(p)
f.inc_ref()
@@ -1062,10 +1084,32 @@ class Operations(llfuse.Operations):
@catch_exceptions
def flush(self, fh):
if fh in self._filehandles:
- self._filehandles[fh].flush()
+ self._filehandles[fh].flush(False)
def fsync(self, fh, datasync):
- self.flush(fh)
+ if fh in self._filehandles:
+ self._filehandles[fh].flush(True)
+ self.inodes.invalidate_inode(self._filehandles[fh].obj)
def fsyncdir(self, fh, datasync):
- self.flush(fh)
+ if fh in self._filehandles:
+ self._filehandles[fh].flush(True)
+
+ @catch_exceptions
+ def mknod(self, parent_inode, name, mode, rdev, ctx=None):
+ if not stat.S_ISREG(mode):
+ # Can only be used to create regular files.
+ raise NotImplementedError()
+
+ name = name.decode(encoding=self.inodes.encoding)
+ _logger.debug("arv-mount mknod: parent_inode %i '%s' %o", parent_inode, name, mode)
+
+ p = self._check_writable(parent_inode)
+ p.create(name)
+
+ # The file entry should have been implicitly created by callback.
+ f = p[name]
+ self.inodes.touch(p)
+
+ f.inc_ref()
+ return self.getattr(f.inode)
diff --git a/services/fuse/arvados_fuse/command.py b/services/fuse/arvados_fuse/command.py
index f52121d862..6b28cf4ba5 100644
--- a/services/fuse/arvados_fuse/command.py
+++ b/services/fuse/arvados_fuse/command.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from future.utils import native_str
-from builtins import range
-from builtins import object
import argparse
import arvados
import daemon
@@ -308,7 +305,7 @@ After this time, the mount will be forcefully unmounted.
cache.add_argument(
'--disk-cache-dir',
metavar="DIRECTORY",
- help="Filesystem cache location (default `~/.cache/arvados/keep`)",
+ help="Set custom filesystem cache location",
)
cache.add_argument(
'--directory-cache',
@@ -349,6 +346,13 @@ Filesystem character encoding
metavar='CLASSES',
help="Comma-separated list of storage classes to request for new collections",
)
+ plumbing.add_argument(
+ '--refresh-time',
+ metavar='SECONDS',
+ default=15,
+ type=int,
+ help="Upper limit on how long mount contents may be out of date with upstream Arvados before being refreshed on next access (default 15 seconds)",
+ )
# This is a hidden argument used by tests. Normally this
# value will be extracted from the cluster config, but mocking
# the cluster config under the presence of multiple threads
@@ -410,7 +414,7 @@ class Mount(object):
if self.args.replace:
unmount(path=self.args.mountpoint,
timeout=self.args.unmount_timeout)
- llfuse.init(self.operations, native_str(self.args.mountpoint), self._fuse_options())
+ llfuse.init(self.operations, str(self.args.mountpoint), self._fuse_options())
if self.daemon:
daemon.DaemonContext(
working_directory=os.path.dirname(self.args.mountpoint),
@@ -430,6 +434,7 @@ class Mount(object):
self.operations.events.close(timeout=self.args.unmount_timeout)
subprocess.call(["fusermount", "-u", "-z", self.args.mountpoint])
self.llfuse_thread.join(timeout=self.args.unmount_timeout)
+ self.api.keep.block_cache.clear()
if self.llfuse_thread.is_alive():
self.logger.warning("Mount.__exit__:"
" llfuse thread still alive %fs after umount"
@@ -584,7 +589,6 @@ class Mount(object):
elif self.args.mode == 'home':
dir_class = ProjectDirectory
dir_args.append(usr)
- dir_args.append(True)
elif self.args.mode == 'all':
self.args.mount_by_id = ['by_id']
self.args.mount_by_tag = ['by_tag']
@@ -594,9 +598,9 @@ class Mount(object):
if dir_class is not None:
if dir_class in [TagsDirectory, CollectionDirectory]:
- ent = dir_class(*dir_args)
+ ent = dir_class(*dir_args, poll_time=self.args.refresh_time)
else:
- ent = dir_class(*dir_args, storage_classes=storage_classes)
+ ent = dir_class(*dir_args, storage_classes=storage_classes, poll_time=self.args.refresh_time)
self.operations.inodes.add_entry(ent)
self.listen_for_events = ent.want_event_subscribe()
return
@@ -610,17 +614,25 @@ class Mount(object):
dir_args[0] = e.inode
for name in self.args.mount_by_id:
- self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=False, storage_classes=storage_classes))
+ self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=False,
+ storage_classes=storage_classes,
+ poll_time=self.args.refresh_time))
for name in self.args.mount_by_pdh:
- self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=True))
+ self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=True,
+ poll_time=self.args.refresh_time))
for name in self.args.mount_by_tag:
self._add_mount(e, name, TagsDirectory(*dir_args))
for name in self.args.mount_home:
- self._add_mount(e, name, ProjectDirectory(*dir_args, project_object=usr, poll=True, storage_classes=storage_classes))
+ self._add_mount(e, name, ProjectDirectory(*dir_args, project_object=usr,
+ storage_classes=storage_classes,
+ poll_time=self.args.refresh_time))
for name in self.args.mount_shared:
- self._add_mount(e, name, SharedDirectory(*dir_args, exclude=usr, poll=True, storage_classes=storage_classes))
+ self._add_mount(e, name, SharedDirectory(*dir_args, exclude=usr,
+ storage_classes=storage_classes,
+ poll_time=self.args.refresh_time))
for name in self.args.mount_tmp:
- self._add_mount(e, name, TmpCollectionDirectory(*dir_args, storage_classes=storage_classes))
+ self._add_mount(e, name, TmpCollectionDirectory(*dir_args,
+ storage_classes=storage_classes))
if mount_readme:
text = self._readme_text(
diff --git a/services/fuse/arvados_fuse/crunchstat.py b/services/fuse/arvados_fuse/crunchstat.py
index 0cb585a6ff..313c34971f 100644
--- a/services/fuse/arvados_fuse/crunchstat.py
+++ b/services/fuse/arvados_fuse/crunchstat.py
@@ -2,10 +2,9 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from builtins import str
-from builtins import object
import sys
import time
+
from collections import namedtuple
Stat = namedtuple("Stat", ['name', 'get'])
diff --git a/services/fuse/arvados_fuse/fresh.py b/services/fuse/arvados_fuse/fresh.py
index 508ee7fb73..472e0fd0cc 100644
--- a/services/fuse/arvados_fuse/fresh.py
+++ b/services/fuse/arvados_fuse/fresh.py
@@ -2,11 +2,10 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from builtins import object
-import time
import ciso8601
import calendar
import functools
+import time
def convertTime(t):
"""Parse Arvados timestamp to unix time."""
@@ -151,3 +150,10 @@ class FreshBase(object):
return t
else:
return self._poll_time
+
+ def update(self):
+ pass
+
+ def checkupdate(self):
+ if self.stale():
+ self.update()
diff --git a/services/fuse/arvados_fuse/fusedir.py b/services/fuse/arvados_fuse/fusedir.py
index 9c78805107..a22638207f 100644
--- a/services/fuse/arvados_fuse/fusedir.py
+++ b/services/fuse/arvados_fuse/fusedir.py
@@ -291,11 +291,12 @@ class CollectionDirectoryBase(Directory):
__slots__ = ("collection", "collection_root", "collection_record_file")
- def __init__(self, parent_inode, inodes, enable_write, filters, collection, collection_root):
+ def __init__(self, parent_inode, inodes, enable_write, filters, collection, collection_root, poll_time=15):
super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, enable_write, filters)
self.collection = collection
self.collection_root = collection_root
self.collection_record_file = None
+ self._poll_time = poll_time
def new_entry(self, name, item, mtime):
name = self.sanitize_filename(name)
@@ -314,69 +315,88 @@ class CollectionDirectoryBase(Directory):
self._filters,
item,
self.collection_root,
+ poll_time=self._poll_time
))
self._entries[name].populate(mtime)
else:
- self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
+ self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime,
+ self._enable_write,
+ self._poll, self._poll_time))
item.fuse_entry = self._entries[name]
def on_event(self, event, collection, name, item):
+
# These are events from the Collection object (ADD/DEL/MOD)
# emitted by operations on the Collection object (like
# "mkdirs" or "remove"), and by "update", which we need to
# synchronize with our FUSE objects that are assigned inodes.
- if collection == self.collection:
- name = self.sanitize_filename(name)
-
- #
- # It's possible for another thread to have llfuse.lock and
- # be waiting on collection.lock. Meanwhile, we released
- # llfuse.lock earlier in the stack, but are still holding
- # on to the collection lock, and now we need to re-acquire
- # llfuse.lock. If we don't release the collection lock,
- # we'll deadlock where we're holding the collection lock
- # waiting for llfuse.lock and the other thread is holding
- # llfuse.lock and waiting for the collection lock.
- #
- # The correct locking order here is to take llfuse.lock
- # first, then the collection lock.
- #
- # Since collection.lock is an RLock, it might be locked
- # multiple times, so we need to release it multiple times,
- # keep a count, then re-lock it the correct number of
- # times.
- #
- lockcount = 0
- try:
- while True:
- self.collection.lock.release()
- lockcount += 1
- except RuntimeError:
- pass
+ if collection != self.collection:
+ return
- try:
- with llfuse.lock:
- with self.collection.lock:
- if event == arvados.collection.ADD:
- self.new_entry(name, item, self.mtime())
- elif event == arvados.collection.DEL:
- ent = self._entries[name]
- del self._entries[name]
+ name = self.sanitize_filename(name)
+
+ #
+ # It's possible for another thread to have llfuse.lock and
+ # be waiting on collection.lock. Meanwhile, we released
+ # llfuse.lock earlier in the stack, but are still holding
+ # on to the collection lock, and now we need to re-acquire
+ # llfuse.lock. If we don't release the collection lock,
+ # we'll deadlock where we're holding the collection lock
+ # waiting for llfuse.lock and the other thread is holding
+ # llfuse.lock and waiting for the collection lock.
+ #
+ # The correct locking order here is to take llfuse.lock
+ # first, then the collection lock.
+ #
+ # Since collection.lock is an RLock, it might be locked
+ # multiple times, so we need to release it multiple times,
+ # keep a count, then re-lock it the correct number of
+ # times.
+ #
+ lockcount = 0
+ try:
+ while True:
+ self.collection.lock.release()
+ lockcount += 1
+ except RuntimeError:
+ pass
+
+ try:
+ with llfuse.lock:
+ with self.collection.lock:
+ if event == arvados.collection.ADD:
+ self.new_entry(name, item, self.mtime())
+ elif event == arvados.collection.DEL:
+ ent = self._entries[name]
+ del self._entries[name]
+ self.inodes.invalidate_entry(self, name)
+ self.inodes.del_entry(ent)
+ elif event == arvados.collection.MOD:
+ # MOD events have (modified_from, newitem)
+ newitem = item[1]
+ entry = None
+ if hasattr(newitem, "fuse_entry") and newitem.fuse_entry is not None:
+ entry = newitem.fuse_entry
+ elif name in self._entries:
+ entry = self._entries[name]
+
+ if entry is not None:
+ entry.invalidate()
+ self.inodes.invalidate_inode(entry)
+
+ if name in self._entries:
self.inodes.invalidate_entry(self, name)
- self.inodes.del_entry(ent)
- elif event == arvados.collection.MOD:
- if hasattr(item, "fuse_entry") and item.fuse_entry is not None:
- self.inodes.invalidate_inode(item.fuse_entry)
- elif name in self._entries:
- self.inodes.invalidate_inode(self._entries[name])
-
- if self.collection_record_file is not None:
- self.collection_record_file.invalidate()
- self.inodes.invalidate_inode(self.collection_record_file)
- finally:
- while lockcount > 0:
- self.collection.lock.acquire()
- lockcount -= 1
+
+ # TOK and WRITE events just invalidate the
+ # collection record file.
+
+ if self.collection_record_file is not None:
+ self.collection_record_file.invalidate()
+ self.inodes.invalidate_inode(self.collection_record_file)
+ finally:
+ while lockcount > 0:
+ self.collection.lock.acquire()
+ lockcount -= 1
def populate(self, mtime):
self._mtime = mtime
@@ -470,16 +490,13 @@ class CollectionDirectory(CollectionDirectoryBase):
__slots__ = ("api", "num_retries", "collection_locator",
"_manifest_size", "_writable", "_updating_lock")
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters=None, collection_record=None, explicit_collection=None):
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write,
+ filters=None, collection_record=None,
+ poll_time=15):
super(CollectionDirectory, self).__init__(parent_inode, inodes, enable_write, filters, None, self)
self.api = api
self.num_retries = num_retries
self._poll = True
- try:
- self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)
- except:
- _logger.debug("Error getting blobSignatureTtl from discovery document: %s", sys.exc_info()[0])
- self._poll_time = 60*60
if isinstance(collection_record, dict):
self.collection_locator = collection_record['uuid']
@@ -487,9 +504,25 @@ class CollectionDirectory(CollectionDirectoryBase):
else:
self.collection_locator = collection_record
self._mtime = 0
+
+ is_uuid = (self.collection_locator is not None) and (uuid_pattern.match(self.collection_locator) is not None)
+
+ if is_uuid:
+ # It is a uuid, it may be updated upstream, so recheck it periodically.
+ self._poll_time = poll_time
+ else:
+ # It is not a uuid. For immutable collections, collection
+ # only needs to be refreshed if it is very long lived
+ # (long enough that there's a risk of the blob signatures
+ # expiring).
+ try:
+ self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)
+ except:
+ _logger.debug("Error getting blobSignatureTtl from discovery document: %s", sys.exc_info()[0])
+ self._poll_time = 60*60
+
+ self._writable = is_uuid and enable_write
self._manifest_size = 0
- if self.collection_locator:
- self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
self._updating_lock = threading.Lock()
def same(self, i):
@@ -500,8 +533,6 @@ class CollectionDirectory(CollectionDirectoryBase):
@use_counter
def flush(self):
- if not self.writable():
- return
with llfuse.lock_released:
with self._updating_lock:
if self.collection.committed():
@@ -541,10 +572,6 @@ class CollectionDirectory(CollectionDirectoryBase):
@use_counter
def update(self):
try:
- if self.collection is not None and portable_data_hash_pattern.match(self.collection_locator):
- # It's immutable, nothing to update
- return True
-
if self.collection_locator is None:
# No collection locator to retrieve from
self.fresh()
@@ -697,33 +724,9 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
# save to the backend
super(TmpCollectionDirectory, self).__init__(
parent_inode, inodes, True, filters, collection, self)
+ self._poll = False
self.populate(self.mtime())
- def on_event(self, *args, **kwargs):
- super(TmpCollectionDirectory, self).on_event(*args, **kwargs)
- if self.collection_record_file is None:
- return
-
- # See discussion in CollectionDirectoryBase.on_event
- lockcount = 0
- try:
- while True:
- self.collection.lock.release()
- lockcount += 1
- except RuntimeError:
- pass
-
- try:
- with llfuse.lock:
- with self.collection.lock:
- self.collection_record_file.invalidate()
- self.inodes.invalidate_inode(self.collection_record_file)
- _logger.debug("%s invalidated collection record", self.inode)
- finally:
- while lockcount > 0:
- self.collection.lock.acquire()
- lockcount -= 1
-
def collection_record(self):
with llfuse.lock_released:
return {
@@ -792,12 +795,15 @@ and the directory will appear if it exists.
""".lstrip()
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, pdh_only=False, storage_classes=None):
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+ pdh_only=False, storage_classes=None, poll_time=15):
super(MagicDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.pdh_only = pdh_only
self.storage_classes = storage_classes
+ self._poll = False
+ self._poll_time = poll_time
def __setattr__(self, name, value):
super(MagicDirectory, self).__setattr__(name, value)
@@ -815,7 +821,9 @@ and the directory will appear if it exists.
self.num_retries,
self._enable_write,
self._filters,
- self.pdh_only,
+ pdh_only=self.pdh_only,
+ storage_classes=self.storage_classes,
+ poll_time=self._poll_time
))
def __contains__(self, k):
@@ -847,6 +855,7 @@ and the directory will appear if it exists.
self._filters,
project[u'items'][0],
storage_classes=self.storage_classes,
+ poll_time=self._poll_time
))
else:
e = self.inodes.add_entry(CollectionDirectory(
@@ -857,6 +866,7 @@ and the directory will appear if it exists.
self._enable_write,
self._filters,
k,
+ poll_time=self._poll_time
))
if e.update():
@@ -1018,14 +1028,14 @@ class ProjectDirectory(Directory):
"_current_user", "_full_listing", "storage_classes", "recursively_contained")
def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
- project_object, poll=True, poll_time=3, storage_classes=None):
+ project_object, poll_time=15, storage_classes=None):
super(ProjectDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.project_object = project_object
self.project_object_file = None
self.project_uuid = project_object['uuid']
- self._poll = poll
+ self._poll = True
self._poll_time = poll_time
self._updating_lock = threading.Lock()
self._current_user = None
@@ -1051,12 +1061,13 @@ class ProjectDirectory(Directory):
def createDirectory(self, i):
common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)
if collection_uuid_pattern.match(i['uuid']):
- return CollectionDirectory(*common_args, i)
+ return CollectionDirectory(*common_args, i, poll_time=self._poll_time)
elif group_uuid_pattern.match(i['uuid']):
- return ProjectDirectory(*common_args, i, self._poll, self._poll_time, self.storage_classes)
+ return ProjectDirectory(*common_args, i, poll_time=self._poll_time,
+ storage_classes=self.storage_classes)
elif link_uuid_pattern.match(i['uuid']):
if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
- return CollectionDirectory(*common_args, i['head_uuid'])
+ return CollectionDirectory(*common_args, i['head_uuid'], poll_time=self._poll_time)
else:
return None
elif uuid_pattern.match(i['uuid']):
@@ -1212,10 +1223,7 @@ class ProjectDirectory(Directory):
def writable(self):
if not self._enable_write:
return False
- with llfuse.lock_released:
- if not self._current_user:
- self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
- return self._current_user["uuid"] in self.project_object.get("writable_by", [])
+ return self.project_object.get("can_write") is True
def persisted(self):
return True
@@ -1344,7 +1352,7 @@ class SharedDirectory(Directory):
"""A special directory that represents users or groups who have shared projects with me."""
def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
- exclude, poll=False, poll_time=60, storage_classes=None):
+ exclude, poll_time=60, storage_classes=None):
super(SharedDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
@@ -1462,7 +1470,6 @@ class SharedDirectory(Directory):
self._enable_write,
self._filters,
i[1],
- poll=self._poll,
poll_time=self._poll_time,
storage_classes=self.storage_classes,
),
diff --git a/services/fuse/arvados_fuse/fusefile.py b/services/fuse/arvados_fuse/fusefile.py
index 9279f7d99d..ee51a73d5e 100644
--- a/services/fuse/arvados_fuse/fusefile.py
+++ b/services/fuse/arvados_fuse/fusefile.py
@@ -2,15 +2,13 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from builtins import bytes
import json
import llfuse
import logging
import re
import time
-from .fresh import FreshBase, convertTime
+from .fresh import FreshBase, convertTime, check_update
_logger = logging.getLogger('arvados.arvados_fuse')
@@ -19,11 +17,13 @@ class File(FreshBase):
__slots__ = ("inode", "parent_inode", "_mtime")
- def __init__(self, parent_inode, _mtime=0):
+ def __init__(self, parent_inode, _mtime=0, poll=False, poll_time=0):
super(File, self).__init__()
self.inode = None
self.parent_inode = parent_inode
self._mtime = _mtime
+ self._poll = poll
+ self._poll_time = poll_time
def size(self):
return 0
@@ -52,8 +52,8 @@ class FuseArvadosFile(File):
__slots__ = ('arvfile', '_enable_write')
- def __init__(self, parent_inode, arvfile, _mtime, enable_write):
- super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
+ def __init__(self, parent_inode, arvfile, _mtime, enable_write, poll, poll_time):
+ super(FuseArvadosFile, self).__init__(parent_inode, _mtime, poll=poll, poll_time=poll_time)
self.arvfile = arvfile
self._enable_write = enable_write
@@ -63,15 +63,12 @@ class FuseArvadosFile(File):
def readfrom(self, off, size, num_retries=0):
with llfuse.lock_released:
- return self.arvfile.readfrom(off, size, num_retries, exact=True)
+ return self.arvfile.readfrom(off, size, num_retries, exact=True, return_memoryview=True)
def writeto(self, off, buf, num_retries=0):
with llfuse.lock_released:
return self.arvfile.writeto(off, buf, num_retries)
- def stale(self):
- return False
-
def writable(self):
return self._enable_write and self.arvfile.writable()
@@ -148,17 +145,15 @@ class FuncToJSONFile(StringFile):
# caching entirely.
self.allow_attr_cache = False
+ @check_update
def size(self):
- self._update()
return super(FuncToJSONFile, self).size()
+ @check_update
def readfrom(self, *args, **kwargs):
- self._update()
return super(FuncToJSONFile, self).readfrom(*args, **kwargs)
- def _update(self):
- if not self.stale():
- return
+ def update(self):
self._mtime = time.time()
obj = self.func()
self.contents = json.dumps(obj, indent=4, sort_keys=True) + "\n"
diff --git a/services/fuse/arvados_version.py b/services/fuse/arvados_version.py
index 794b6afe42..cafc7391b4 100644
--- a/services/fuse/arvados_version.py
+++ b/services/fuse/arvados_version.py
@@ -26,6 +26,7 @@ PACKAGE_DEPENDENCY_MAP = {
'arvados-user-activity': ['arvados-python-client'],
'arvados_fuse': ['arvados-python-client'],
'crunchstat_summary': ['arvados-python-client'],
+ 'arvados_cluster_activity': ['arvados-python-client'],
}
PACKAGE_MODULE_MAP = {
'arvados-cwl-runner': 'arvados_cwl',
@@ -34,6 +35,7 @@ PACKAGE_MODULE_MAP = {
'arvados-user-activity': 'arvados_user_activity',
'arvados_fuse': 'arvados_fuse',
'crunchstat_summary': 'crunchstat_summary',
+ 'arvados_cluster_activity': 'arvados_cluster_activity',
}
PACKAGE_SRCPATH_MAP = {
'arvados-cwl-runner': Path('sdk', 'cwl'),
@@ -42,6 +44,7 @@ PACKAGE_SRCPATH_MAP = {
'arvados-user-activity': Path('tools', 'user-activity'),
'arvados_fuse': Path('services', 'fuse'),
'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+ 'arvados_cluster_activity': Path('tools', 'cluster-activity'),
}
ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -72,14 +75,6 @@ if REPO_PATH is None:
if (SETUP_DIR / mod_name).is_dir()
)
-def short_tests_only(arglist=sys.argv):
- try:
- arglist.remove('--short-tests-only')
- except ValueError:
- return False
- else:
- return True
-
def git_log_output(path, *args):
return subprocess.check_output(
['git', '-C', str(REPO_PATH),
@@ -120,7 +115,7 @@ def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
return read_version(setup_dir, module)
else:
version = git_version_at_commit()
- version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ version = version.replace("~dev", ".dev").replace("~rc", "rc").lstrip("development-")
save_version(setup_dir, module, version)
return version
diff --git a/services/fuse/fpm-info.sh b/services/fuse/fpm-info.sh
index 4d98172f8d..39d65bf189 100644
--- a/services/fuse/fpm-info.sh
+++ b/services/fuse/fpm-info.sh
@@ -2,13 +2,19 @@
#
# SPDX-License-Identifier: AGPL-3.0
+# We depend on the fuse package because arv-mount may run the `fusermount` tool.
fpm_depends+=(fuse)
case "$TARGET" in
centos*|rocky*)
+ # We depend on libfuse for llfuse.
+ # We should declare a libcurl dependency, but it's a little academic
+ # because rpm itself depends on it, so we can be pretty sure it's installed.
fpm_depends+=(fuse-libs)
;;
debian* | ubuntu*)
- fpm_depends+=(libcurl3-gnutls)
+ # We depend on libfuse2 for llfuse.
+ # We depend on libcurl because the Python SDK does for its Keep client.
+ fpm_depends+=(libfuse2 libcurl4)
;;
esac
diff --git a/services/fuse/pytest.ini b/services/fuse/pytest.ini
new file mode 120000
index 0000000000..05a82dbfef
--- /dev/null
+++ b/services/fuse/pytest.ini
@@ -0,0 +1 @@
+../../sdk/python/pytest.ini
\ No newline at end of file
diff --git a/services/fuse/setup.py b/services/fuse/setup.py
index 77dbd036d0..f9b0fcf91d 100644
--- a/services/fuse/setup.py
+++ b/services/fuse/setup.py
@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
import os
import sys
import re
@@ -12,7 +11,6 @@ from setuptools import setup, find_packages
import arvados_version
version = arvados_version.get_version()
-short_tests_only = arvados_version.short_tests_only()
README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
setup(name='arvados_fuse',
@@ -34,7 +32,6 @@ setup(name='arvados_fuse',
install_requires=[
*arvados_version.iter_dependencies(version),
'arvados-llfuse >= 1.5.1',
- 'future',
'python-daemon',
'ciso8601 >= 2.0.0',
'setuptools',
@@ -45,6 +42,6 @@ setup(name='arvados_fuse',
'Programming Language :: Python :: 3',
],
test_suite='tests',
- tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML', 'parameterized',],
+ tests_require=['PyYAML', 'parameterized',],
zip_safe=False
)
diff --git a/services/fuse/tests/fstest.py b/services/fuse/tests/fstest.py
index 51e3f311ab..296f23919c 100644
--- a/services/fuse/tests/fstest.py
+++ b/services/fuse/tests/fstest.py
@@ -2,14 +2,11 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import print_function
-from __future__ import absolute_import
-from builtins import str
-from builtins import range
-from multiprocessing import Process
import os
import subprocess
import sys
+
+from multiprocessing import Process
from . import prof
def fn(n):
diff --git a/services/fuse/tests/integration_test.py b/services/fuse/tests/integration_test.py
index e80b6983a1..24ac7baf04 100644
--- a/services/fuse/tests/integration_test.py
+++ b/services/fuse/tests/integration_test.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
import arvados
import arvados_fuse
import arvados_fuse.command
@@ -12,12 +11,15 @@ import inspect
import logging
import multiprocessing
import os
-from . import run_test_server
import signal
import sys
import tempfile
import unittest
+import pytest
+
+from . import run_test_server
+
@atexit.register
def _pool_cleanup():
if _pool is None:
@@ -91,9 +93,15 @@ class IntegrationTest(unittest.TestCase):
return func(self, *args, **kwargs)
finally:
if self.mount and self.mount.llfuse_thread.is_alive():
- logging.warning("IntegrationTest.mount:"
- " llfuse thread still alive after umount"
- " -- killing test suite to avoid deadlock")
- os.kill(os.getpid(), signal.SIGKILL)
+ # pytest uses exit status 2 when test collection failed.
+ # A UnitTest failing in setup/teardown counts as a
+ # collection failure, so pytest will exit with status 2
+ # no matter what status you specify here. run-tests.sh
+ # looks for this status, so specify 2 just to keep
+ # everything as consistent as possible.
+ # TODO: If we refactor these tests so they're not built
+ # on unittest, consider using a dedicated, non-pytest
+ # exit code like TEMPFAIL.
+ pytest.exit("llfuse thread outlived test - aborting test suite to avoid deadlock", 2)
return wrapper
return decorator
diff --git a/services/fuse/tests/mount_test_base.py b/services/fuse/tests/mount_test_base.py
index 02f4009724..d69cdf1c1a 100644
--- a/services/fuse/tests/mount_test_base.py
+++ b/services/fuse/tests/mount_test_base.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
import arvados
import arvados.keep
import arvados_fuse as fuse
@@ -11,7 +10,6 @@ import llfuse
import logging
import multiprocessing
import os
-from . import run_test_server
import shutil
import signal
import subprocess
@@ -21,20 +19,27 @@ import threading
import time
import unittest
-logger = logging.getLogger('arvados.arv-mount')
+import pytest
+from . import run_test_server
from .integration_test import workerPool
-def make_block_cache(disk_cache):
- if disk_cache:
- disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
- shutil.rmtree(disk_cache_dir, ignore_errors=True)
- block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache)
- return block_cache
+logger = logging.getLogger('arvados.arv-mount')
class MountTestBase(unittest.TestCase):
disk_cache = False
+ @classmethod
+ def setUpClass(cls):
+ if cls.disk_cache:
+ cls._disk_cache_dir = tempfile.mkdtemp(prefix='MountTest-')
+ else:
+ cls._disk_cache_dir = None
+ cls._keep_block_cache = arvados.keep.KeepBlockCache(
+ disk_cache=cls.disk_cache,
+ disk_cache_dir=cls._disk_cache_dir,
+ )
+
def setUp(self, api=None, local_store=True):
# The underlying C implementation of open() makes a fstat() syscall
# with the GIL still held. When the GETATTR message comes back to
@@ -56,11 +61,16 @@ class MountTestBase(unittest.TestCase):
self.api = api if api else arvados.safeapi.ThreadSafeApiCache(
arvados.config.settings(),
- keep_params={"block_cache": make_block_cache(self.disk_cache)},
+ keep_params={"block_cache": self._keep_block_cache},
version='v1',
)
self.llfuse_thread = None
+ @classmethod
+ def tearDownClass(cls):
+ if cls._disk_cache_dir:
+ shutil.rmtree(cls._disk_cache_dir)
+
# This is a copy of Mount's method. TODO: Refactor MountTestBase
# to use a Mount instead of copying its code.
def _llfuse_main(self):
@@ -104,10 +114,16 @@ class MountTestBase(unittest.TestCase):
t0 = time.time()
self.llfuse_thread.join(timeout=60)
if self.llfuse_thread.is_alive():
- logger.warning("MountTestBase.tearDown():"
- " llfuse thread still alive 60s after umount"
- " -- exiting with SIGKILL")
- os.kill(os.getpid(), signal.SIGKILL)
+ # pytest uses exit status 2 when test collection failed.
+ # A UnitTest failing in setup/teardown counts as a
+ # collection failure, so pytest will exit with status 2
+ # no matter what status you specify here. run-tests.sh
+ # looks for this status, so specify 2 just to keep
+ # everything as consistent as possible.
+ # TODO: If we refactor these tests so they're not built
+ # on unittest, consider using a dedicated, non-pytest
+ # exit code like TEMPFAIL.
+ pytest.exit("llfuse thread outlived test - aborting test suite to avoid deadlock", 2)
waited = time.time() - t0
if waited > 0.1:
logger.warning("MountTestBase.tearDown(): waited %f s for llfuse thread to end", waited)
diff --git a/services/fuse/tests/performance/__init__.py b/services/fuse/tests/performance/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/services/fuse/tests/performance/performance_profiler.py b/services/fuse/tests/performance/performance_profiler.py
deleted file mode 120000
index 01a6805b32..0000000000
--- a/services/fuse/tests/performance/performance_profiler.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../../sdk/python/tests/performance/performance_profiler.py
\ No newline at end of file
diff --git a/services/fuse/tests/performance/test_collection_performance.py b/services/fuse/tests/performance/test_collection_performance.py
deleted file mode 100644
index 98bc98abd4..0000000000
--- a/services/fuse/tests/performance/test_collection_performance.py
+++ /dev/null
@@ -1,491 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-from __future__ import absolute_import
-from future.utils import viewitems
-from builtins import str
-from builtins import range
-import arvados
-import arvados_fuse as fuse
-import llfuse
-import logging
-import os
-import sys
-import unittest
-from .. import run_test_server
-from ..mount_test_base import MountTestBase
-from ..slow_test import slow_test
-
-logger = logging.getLogger('arvados.arv-mount')
-
-from .performance_profiler import profiled
-
-def fuse_createCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
- class Test(unittest.TestCase):
- def runTest(self):
- self.createCollectionWithMultipleBlocks()
-
- @profiled
- def createCollectionWithMultipleBlocks(self):
- for i in range(0, streams):
- os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
-
- # Create files
- for j in range(0, files_per_stream):
- with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
- f.write(data)
-
- Test().runTest()
-
-def fuse_readContentsFromCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
- class Test(unittest.TestCase):
- def runTest(self):
- self.readContentsFromCollectionWithMultipleBlocks()
-
- @profiled
- def readContentsFromCollectionWithMultipleBlocks(self):
- for i in range(0, streams):
- d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
- for j in range(0, files_per_stream):
- with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
- self.assertEqual(data, f.read())
-
- Test().runTest()
-
-def fuse_moveFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.moveFileFromCollectionWithMultipleBlocks()
-
- @profiled
- def moveFileFromCollectionWithMultipleBlocks(self):
- d1 = llfuse.listdir(os.path.join(mounttmp, stream))
- self.assertIn(filename, d1)
-
- os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
-
- d1 = llfuse.listdir(os.path.join(mounttmp))
- self.assertIn('moved_from_'+stream+'_'+filename, d1)
-
- d1 = llfuse.listdir(os.path.join(mounttmp, stream))
- self.assertNotIn(filename, d1)
-
- Test().runTest()
-
-def fuse_deleteFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.deleteFileFromCollectionWithMultipleBlocks()
-
- @profiled
- def deleteFileFromCollectionWithMultipleBlocks(self):
- os.remove(os.path.join(mounttmp, stream, filename))
-
- Test().runTest()
-
-# Create a collection with 2 streams, 3 files_per_stream, 2 blocks_per_file, 2**26 bytes_per_block
-class CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile(MountTestBase):
- def setUp(self):
- super(CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile, self).setUp()
-
- @slow_test
- def test_CreateCollectionWithManyBlocksAndMoveAndDeleteFile(self):
- collection = arvados.collection.Collection(api_client=self.api)
- collection.save_new()
-
- m = self.make_mount(fuse.CollectionDirectory)
- with llfuse.lock:
- m.new_collection(collection.api_response(), collection)
- self.assertTrue(m.writable())
-
- streams = 2
- files_per_stream = 3
- blocks_per_file = 2
- bytes_per_block = 2**26
-
- data = 'x' * blocks_per_file * bytes_per_block
-
- self.pool.apply(fuse_createCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- for i in range(0, streams):
- self.assertIn('./stream' + str(i), collection2["manifest_text"])
-
- for i in range(0, files_per_stream):
- self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
-
- # Read file contents
- self.pool.apply(fuse_readContentsFromCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
-
- # Move file0.txt out of the streams into .
- for i in range(0, streams):
- self.pool.apply(fuse_moveFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- manifest_streams = collection2['manifest_text'].split('\n')
- self.assertEqual(4, len(manifest_streams))
-
- for i in range(0, streams):
- self.assertIn('file0.txt', manifest_streams[0])
-
- for i in range(0, streams):
- self.assertNotIn('file0.txt', manifest_streams[i+1])
-
- for i in range(0, streams):
- for j in range(1, files_per_stream):
- self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
-
- # Delete 'file1.txt' from all the streams
- for i in range(0, streams):
- self.pool.apply(fuse_deleteFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- manifest_streams = collection2['manifest_text'].split('\n')
- self.assertEqual(4, len(manifest_streams))
-
- for i in range(0, streams):
- self.assertIn('file0.txt', manifest_streams[0])
-
- self.assertNotIn('file1.txt', collection2['manifest_text'])
-
- for i in range(0, streams):
- for j in range(2, files_per_stream):
- self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
-
-
-def fuse_createCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
- class Test(unittest.TestCase):
- def runTest(self):
- self.createCollectionWithManyFiles()
-
- @profiled
- def createCollectionWithManyFiles(self):
- for i in range(0, streams):
- os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
-
- # Create files
- for j in range(0, files_per_stream):
- with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
- f.write(data)
-
- Test().runTest()
-
-def fuse_readContentsFromCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
- class Test(unittest.TestCase):
- def runTest(self):
- self.readContentsFromCollectionWithManyFiles()
-
- @profiled
- def readContentsFromCollectionWithManyFiles(self):
- for i in range(0, streams):
- d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
- for j in range(0, files_per_stream):
- with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
- self.assertEqual(data, f.read())
-
- Test().runTest()
-
-def fuse_moveFileFromCollectionWithManyFiles(mounttmp, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.moveFileFromCollectionWithManyFiles()
-
- @profiled
- def moveFileFromCollectionWithManyFiles(self):
- d1 = llfuse.listdir(os.path.join(mounttmp, stream))
- self.assertIn(filename, d1)
-
- os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
-
- d1 = llfuse.listdir(os.path.join(mounttmp))
- self.assertIn('moved_from_'+stream+'_'+filename, d1)
-
- d1 = llfuse.listdir(os.path.join(mounttmp, stream))
- self.assertNotIn(filename, d1)
-
- Test().runTest()
-
-def fuse_deleteFileFromCollectionWithManyFiles(mounttmp, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.deleteFileFromCollectionWithManyFiles()
-
- @profiled
- def deleteFileFromCollectionWithManyFiles(self):
- os.remove(os.path.join(mounttmp, stream, filename))
-
- Test().runTest()
-
-# Create a collection with two streams, each with 200 files
-class CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
- def setUp(self):
- super(CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
-
- @slow_test
- def test_CreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
- collection = arvados.collection.Collection(api_client=self.api)
- collection.save_new()
-
- m = self.make_mount(fuse.CollectionDirectory)
- with llfuse.lock:
- m.new_collection(collection.api_response(), collection)
- self.assertTrue(m.writable())
-
- streams = 2
- files_per_stream = 200
- data = 'x'
-
- self.pool.apply(fuse_createCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- for i in range(0, streams):
- self.assertIn('./stream' + str(i), collection2["manifest_text"])
-
- for i in range(0, files_per_stream):
- self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
-
- # Read file contents
- self.pool.apply(fuse_readContentsFromCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
-
- # Move file0.txt out of the streams into .
- for i in range(0, streams):
- self.pool.apply(fuse_moveFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- manifest_streams = collection2['manifest_text'].split('\n')
- self.assertEqual(4, len(manifest_streams))
-
- for i in range(0, streams):
- self.assertIn('file0.txt', manifest_streams[0])
-
- for i in range(0, streams):
- self.assertNotIn('file0.txt', manifest_streams[i+1])
-
- for i in range(0, streams):
- for j in range(1, files_per_stream):
- self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
-
- # Delete 'file1.txt' from all the streams
- for i in range(0, streams):
- self.pool.apply(fuse_deleteFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
-
- collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
-
- manifest_streams = collection2['manifest_text'].split('\n')
- self.assertEqual(4, len(manifest_streams))
-
- for i in range(0, streams):
- self.assertIn('file0.txt', manifest_streams[0])
-
- self.assertNotIn('file1.txt', collection2['manifest_text'])
-
- for i in range(0, streams):
- for j in range(2, files_per_stream):
- self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
-
-
-def magicDirTest_MoveFileFromCollection(mounttmp, collection1, collection2, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.magicDirTest_moveFileFromCollection()
-
- @profiled
- def magicDirTest_moveFileFromCollection(self):
- os.rename(os.path.join(mounttmp, collection1, filename), os.path.join(mounttmp, collection2, filename))
-
- Test().runTest()
-
-def magicDirTest_RemoveFileFromCollection(mounttmp, collection1, stream, filename):
- class Test(unittest.TestCase):
- def runTest(self):
- self.magicDirTest_removeFileFromCollection()
-
- @profiled
- def magicDirTest_removeFileFromCollection(self):
- os.remove(os.path.join(mounttmp, collection1, filename))
-
- Test().runTest()
-
-class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
- def setUp(self):
- super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
-
- @profiled
- def magicDirTest_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
- # Create collection
- collection = arvados.collection.Collection(api_client=self.api)
- for j in range(0, files_per_stream):
- with collection.open("file"+str(j)+".txt", "w") as f:
- f.write(data)
- collection.save_new()
- return collection
-
- @profiled
- def magicDirTest_readCollectionContents(self, collection, streams=1, files_per_stream=1, data='x'):
- mount_ls = os.listdir(os.path.join(self.mounttmp, collection))
-
- files = {}
- for j in range(0, files_per_stream):
- files[os.path.join(self.mounttmp, collection, 'file'+str(j)+'.txt')] = data
-
- for k, v in viewItems(files):
- with open(os.path.join(self.mounttmp, collection, k)) as f:
- self.assertEqual(v, f.read())
-
- @slow_test
- def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
- streams = 2
- files_per_stream = 200
- data = 'x'
-
- collection1 = self.magicDirTest_createCollectionWithManyFiles()
- # Create collection with multiple files
- collection2 = self.magicDirTest_createCollectionWithManyFiles(streams, files_per_stream, data)
-
- # Mount FuseMagicDir
- self.make_mount(fuse.MagicDirectory)
-
- self.magicDirTest_readCollectionContents(collection2.manifest_locator(), streams, files_per_stream, data)
-
- # Move file0.txt out of the collection2 into collection1
- self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(),
- collection1.manifest_locator(), 'stream0', 'file0.txt',))
- updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
- self.assertFalse('file0.txt' in updated_collection['manifest_text'])
- self.assertTrue('file1.txt' in updated_collection['manifest_text'])
-
- # Delete file1.txt from collection2
- self.pool.apply(magicDirTest_RemoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(), 'stream0', 'file1.txt',))
- updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
- self.assertFalse('file1.txt' in updated_collection['manifest_text'])
- self.assertTrue('file2.txt' in updated_collection['manifest_text'])
-
-
-def magicDirTest_MoveAllFilesFromCollection(mounttmp, from_collection, to_collection, stream, files_per_stream):
- class Test(unittest.TestCase):
- def runTest(self):
- self.magicDirTest_moveAllFilesFromCollection()
-
- @profiled
- def magicDirTest_moveAllFilesFromCollection(self):
- for j in range(0, files_per_stream):
- os.rename(os.path.join(mounttmp, from_collection, 'file'+str(j)+'.txt'), os.path.join(mounttmp, to_collection, 'file'+str(j)+'.txt'))
-
- Test().runTest()
-
-class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(MountTestBase):
- def setUp(self):
- super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother, self).setUp()
-
- @profiled
- def magicDirTestMoveAllFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0,
- blocks_per_file=0, bytes_per_block=0, data='x'):
- # Create collection
- collection = arvados.collection.Collection(api_client=self.api)
- for j in range(0, files_per_stream):
- with collection.open("file"+str(j)+".txt", "w") as f:
- f.write(data)
- collection.save_new()
- return collection
-
- @slow_test
- def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(self):
- streams = 2
- files_per_stream = 200
- data = 'x'
-
- collection1 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles()
- # Create collection with multiple files
- collection2 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
-
- # Mount FuseMagicDir
- self.make_mount(fuse.MagicDirectory)
-
- # Move all files from collection2 into collection1
- self.pool.apply(magicDirTest_MoveAllFilesFromCollection, (self.mounttmp, collection2.manifest_locator(),
- collection1.manifest_locator(), 'stream0', files_per_stream,))
-
- updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
- file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
- for name in file_names:
- self.assertFalse(name in updated_collection['manifest_text'])
-
- updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
- for name in file_names:
- self.assertTrue(name in updated_collection['manifest_text'])
-
-
-# Move one file at a time from one collection into another
-class UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother(MountTestBase):
- def setUp(self):
- super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother, self).setUp()
-
- @profiled
- def magicDirTestMoveFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
- # Create collection
- collection = arvados.collection.Collection(api_client=self.api)
- for j in range(0, files_per_stream):
- with collection.open("file"+str(j)+".txt", "w") as f:
- f.write(data)
- collection.save_new()
- return collection
-
- def magicDirTestMoveFiles_oneEachIntoAnother(self, from_collection, to_collection, files_per_stream):
- for j in range(0, files_per_stream):
- self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, from_collection.manifest_locator(),
- to_collection.manifest_locator(), 'stream0', 'file'+str(j)+'.txt',))
-
- @slow_test
- def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveEachFileIntoAnother(self):
- streams = 2
- files_per_stream = 200
- data = 'x'
-
- collection1 = self.magicDirTestMoveFiles_createCollectionWithManyFiles()
- # Create collection with multiple files
- collection2 = self.magicDirTestMoveFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
-
- # Mount FuseMagicDir
- self.make_mount(fuse.MagicDirectory)
-
- # Move all files from collection2 into collection1
- self.magicDirTestMoveFiles_oneEachIntoAnother(collection2, collection1, files_per_stream)
-
- updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
- file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
- for name in file_names:
- self.assertFalse(name in updated_collection['manifest_text'])
-
- updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
- for name in file_names:
- self.assertTrue(name in updated_collection['manifest_text'])
-
-class FuseListLargeProjectContents(MountTestBase):
- @profiled
- def getProjectWithManyCollections(self):
- project_contents = llfuse.listdir(self.mounttmp)
- self.assertEqual(201, len(project_contents))
- self.assertIn('Collection_1', project_contents)
-
- @profiled
- def listContentsInProjectWithManyCollections(self):
- project_contents = llfuse.listdir(self.mounttmp)
- self.assertEqual(201, len(project_contents))
- self.assertIn('Collection_1', project_contents)
-
- for collection_name in project_contents:
- collection_contents = llfuse.listdir(os.path.join(self.mounttmp, collection_name))
- self.assertIn('baz', collection_contents)
-
- @slow_test
- def test_listLargeProjectContents(self):
- self.make_mount(fuse.ProjectDirectory,
- project_object=run_test_server.fixture('groups')['project_with_201_collections'])
- self.getProjectWithManyCollections()
- self.listContentsInProjectWithManyCollections()
diff --git a/services/fuse/tests/prof.py b/services/fuse/tests/prof.py
index f9ce1881de..5bdb1b2e7b 100644
--- a/services/fuse/tests/prof.py
+++ b/services/fuse/tests/prof.py
@@ -2,8 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import print_function
-from builtins import object
import time
class CountTime(object):
diff --git a/services/fuse/tests/slow_test.py b/services/fuse/tests/slow_test.py
deleted file mode 120000
index c7e1f7fada..0000000000
--- a/services/fuse/tests/slow_test.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../sdk/python/tests/slow_test.py
\ No newline at end of file
diff --git a/services/fuse/tests/test_cache.py b/services/fuse/tests/test_cache.py
index 46ed0be411..3f6b804b92 100644
--- a/services/fuse/tests/test_cache.py
+++ b/services/fuse/tests/test_cache.py
@@ -2,7 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from builtins import range
import arvados
import arvados.collection
import arvados_fuse
diff --git a/services/fuse/tests/test_command_args.py b/services/fuse/tests/test_command_args.py
index b08ab19335..cc6ffc2580 100644
--- a/services/fuse/tests/test_command_args.py
+++ b/services/fuse/tests/test_command_args.py
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from __future__ import print_function
-from six import assertRegex
import arvados
import arvados_fuse
import arvados_fuse.command
@@ -14,14 +11,16 @@ import io
import json
import llfuse
import logging
-import mock
import os
-from . import run_test_server
import sys
import tempfile
import unittest
import resource
+from unittest import mock
+
+from . import run_test_server
+
def noexit(func):
"""If argparse or arvados_fuse tries to exit, fail the test instead"""
class SystemExitCaught(Exception):
@@ -74,23 +73,28 @@ class MountArgsTest(unittest.TestCase):
@noexit
def test_default_all(self):
args = arvados_fuse.command.ArgumentParser().parse_args([
+ '--refresh-time=27',
'--foreground', self.mntdir])
self.assertEqual(args.mode, None)
self.mnt = arvados_fuse.command.Mount(args)
+
e = self.check_ent_type(arvados_fuse.ProjectDirectory, 'home')
self.assertEqual(e.project_object['uuid'],
run_test_server.fixture('users')['active']['uuid'])
+ self.assertEqual(e._poll_time, 27)
+
e = self.check_ent_type(arvados_fuse.MagicDirectory, 'by_id')
+ self.assertEqual(e._poll_time, 27)
e = self.check_ent_type(arvados_fuse.StringFile, 'README')
readme = e.readfrom(0, -1).decode()
- assertRegex(self, readme, r'active-user@arvados\.local')
- assertRegex(self, readme, r'\n$')
+ self.assertRegex(readme, r'active-user@arvados\.local')
+ self.assertRegex(readme, r'\n$')
e = self.check_ent_type(arvados_fuse.StringFile, 'by_id', 'README')
txt = e.readfrom(0, -1).decode()
- assertRegex(self, txt, r'portable data hash')
- assertRegex(self, txt, r'\n$')
+ self.assertRegex(txt, r'portable data hash')
+ self.assertRegex(txt, r'\n$')
@noexit
def test_by_id(self):
@@ -130,11 +134,16 @@ class MountArgsTest(unittest.TestCase):
cid = c[id_type]
args = arvados_fuse.command.ArgumentParser().parse_args([
'--collection', cid,
+ '--refresh-time=27',
'--foreground', self.mntdir])
self.mnt = arvados_fuse.command.Mount(args)
e = self.check_ent_type(arvados_fuse.CollectionDirectory)
self.assertEqual(e.collection_locator, cid)
self.assertEqual(id_type == 'uuid', self.mnt.listen_for_events)
+ if id_type == 'uuid':
+ self.assertEqual(e._poll_time, 27)
+ else:
+ self.assertGreaterEqual(e._poll_time, 60*60)
def test_collection_pdh(self):
self.test_collection('portable_data_hash')
@@ -150,6 +159,7 @@ class MountArgsTest(unittest.TestCase):
self.assertEqual(e.project_object['uuid'],
run_test_server.fixture('users')['active']['uuid'])
self.assertEqual(True, self.mnt.listen_for_events)
+ self.assertEqual(e._poll_time, 15)
def test_mutually_exclusive_args(self):
cid = run_test_server.fixture('collections')['public_text_file']['uuid']
@@ -175,6 +185,7 @@ class MountArgsTest(unittest.TestCase):
self.mnt = arvados_fuse.command.Mount(args)
e = self.check_ent_type(arvados_fuse.ProjectDirectory)
self.assertEqual(e.project_object['uuid'], uuid)
+ self.assertEqual(e._poll_time, 15)
@noexit
def test_shared(self):
@@ -199,7 +210,7 @@ class MountArgsTest(unittest.TestCase):
with self.assertRaises(SystemExit):
args = arvados_fuse.command.ArgumentParser().parse_args(['--version'])
- assertRegex(self, sys.stdout.getvalue(), "[0-9]+\.[0-9]+\.[0-9]+")
+ self.assertRegex(sys.stdout.getvalue(), r'[0-9]+\.[0-9]+\.[0-9]+')
sys.stderr.close()
sys.stderr = origerr
sys.stdout = origout
diff --git a/services/fuse/tests/test_crunchstat.py b/services/fuse/tests/test_crunchstat.py
index 3cf15fe113..32272a83c4 100644
--- a/services/fuse/tests/test_crunchstat.py
+++ b/services/fuse/tests/test_crunchstat.py
@@ -2,12 +2,10 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
import subprocess
from .integration_test import IntegrationTest
-
class CrunchstatTest(IntegrationTest):
def test_crunchstat(self):
output = subprocess.check_output(
diff --git a/services/fuse/tests/test_exec.py b/services/fuse/tests/test_exec.py
index f977990026..c67cc55f34 100644
--- a/services/fuse/tests/test_exec.py
+++ b/services/fuse/tests/test_exec.py
@@ -2,17 +2,15 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from six import assertRegex
import arvados_fuse.command
import json
import multiprocessing
import os
-from . import run_test_server
import shlex
import tempfile
import unittest
+from . import run_test_server
from .integration_test import workerPool
def try_exec(mnt, cmd):
@@ -58,7 +56,4 @@ class ExecMode(unittest.TestCase):
shlex.quote(os.path.join(self.okfile)),
)]))
with open(self.okfile) as f:
- assertRegex(
- self,
- json.load(f)['manifest_text'],
- r' 0:3:foo.txt\n')
+ self.assertRegex(json.load(f)['manifest_text'], r' 0:3:foo.txt\n')
diff --git a/services/fuse/tests/test_inodes.py b/services/fuse/tests/test_inodes.py
index c5c92a9b3f..cc22f521e0 100644
--- a/services/fuse/tests/test_inodes.py
+++ b/services/fuse/tests/test_inodes.py
@@ -3,11 +3,12 @@
# SPDX-License-Identifier: AGPL-3.0
import arvados_fuse
-import mock
import unittest
import llfuse
import logging
+from unittest import mock
+
class InodeTests(unittest.TestCase):
# The following tests call next(inodes._counter) because inode 1
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
index b3bec39cc5..7c62fc60ff 100644
--- a/services/fuse/tests/test_mount.py
+++ b/services/fuse/tests/test_mount.py
@@ -2,36 +2,33 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from future.utils import viewitems
-from builtins import str
-from builtins import object
-from pathlib import Path
-from six import assertRegex
import errno
import json
import llfuse
import logging
-import mock
import os
import subprocess
import time
import unittest
import tempfile
-import parameterized
+import stat
+
+from pathlib import Path
+from unittest import mock
import arvados
import arvados_fuse as fuse
+import parameterized
+
from arvados_fuse import fusedir
-from . import run_test_server
+from . import run_test_server
from .integration_test import IntegrationTest
from .mount_test_base import MountTestBase
from .test_tmp_collection import storage_classes_desired
logger = logging.getLogger('arvados.arv-mount')
-
class AssertWithTimeout(object):
"""Allow some time for an assertion to pass."""
@@ -63,44 +60,38 @@ class FuseMountTest(MountTestBase):
def setUp(self):
super(FuseMountTest, self).setUp()
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
- cw.start_new_file('thing2.txt')
- cw.write("data 2")
-
- cw.start_new_stream('dir1')
- cw.start_new_file('thing3.txt')
- cw.write("data 3")
- cw.start_new_file('thing4.txt')
- cw.write("data 4")
-
- cw.start_new_stream('dir2')
- cw.start_new_file('thing5.txt')
- cw.write("data 5")
- cw.start_new_file('thing6.txt')
- cw.write("data 6")
-
- cw.start_new_stream('dir2/dir3')
- cw.start_new_file('thing7.txt')
- cw.write("data 7")
-
- cw.start_new_file('thing8.txt')
- cw.write("data 8")
-
- cw.start_new_stream('edgecases')
- for f in ":/.../-/*/ ".split("/"):
- cw.start_new_file(f)
- cw.write('x')
-
- for f in ":/.../-/*/ ".split("/"):
- cw.start_new_stream('edgecases/dirs/' + f)
- cw.start_new_file('x/x')
- cw.write('x')
-
- self.testcollection = cw.finish()
- self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+ cw = arvados.collection.Collection()
+ with cw.open('thing1.txt', 'w') as f:
+ f.write('data 1')
+ with cw.open('thing2.txt', 'w') as f:
+ f.write('data 2')
+
+ with cw.open('dir1/thing3.txt', 'w') as f:
+ f.write('data 3')
+ with cw.open('dir1/thing4.txt', 'w') as f:
+ f.write('data 4')
+
+ with cw.open('dir2/thing5.txt', 'w') as f:
+ f.write('data 5')
+ with cw.open('dir2/thing6.txt', 'w') as f:
+ f.write('data 6')
+
+ with cw.open('dir2/dir3/thing7.txt', 'w') as f:
+ f.write('data 7')
+ with cw.open('dir2/dir3/thing8.txt', 'w') as f:
+ f.write('data 8')
+
+ for fnm in ":/.../-/*/ ".split("/"):
+ with cw.open('edgecases/'+fnm, 'w') as f:
+ f.write('x')
+
+ for fnm in ":/.../-/*/ ".split("/"):
+ with cw.open('edgecases/dirs/'+fnm+'/x/x', 'w') as f:
+ f.write('x')
+
+ self.testcollection = cw.portable_data_hash()
+ self.test_manifest = cw.manifest_text()
+ self.api.collections().create(body={"manifest_text": self.test_manifest}).execute()
def runTest(self):
self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection)
@@ -124,7 +115,7 @@ class FuseMountTest(MountTestBase):
'dir2/dir3/thing7.txt': 'data 7',
'dir2/dir3/thing8.txt': 'data 8'}
- for k, v in viewitems(files):
+ for k, v in files.items():
with open(os.path.join(self.mounttmp, k), 'rb') as f:
self.assertEqual(v, f.read().decode())
@@ -140,12 +131,11 @@ class FuseMagicTest(MountTestBase):
self.collection_in_test_project = run_test_server.fixture('collections')['foo_collection_in_aproject']['name']
self.collection_in_filter_group = run_test_server.fixture('collections')['baz_file']['name']
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
+ cw = arvados.collection.Collection()
+ with cw.open('thing1.txt', 'w') as f:
+ f.write('data 1')
- self.testcollection = cw.finish()
+ self.testcollection = cw.portable_data_hash()
self.test_manifest = cw.manifest_text()
coll = self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
self.test_manifest_pdh = coll['portable_data_hash']
@@ -189,7 +179,7 @@ class FuseMagicTest(MountTestBase):
files = {}
files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
- for k, v in viewitems(files):
+ for k, v in files.items():
with open(os.path.join(self.mounttmp, k), 'rb') as f:
self.assertEqual(v, f.read().decode())
@@ -312,7 +302,7 @@ class FuseHomeTest(MountTestBase):
'anonymously_accessible_project']
found_in = 0
found_not_in = 0
- for name, item in viewitems(run_test_server.fixture('collections')):
+ for name, item in run_test_server.fixture('collections').items():
if 'name' not in item:
pass
elif item['owner_uuid'] == public_project['uuid']:
@@ -451,7 +441,7 @@ class FuseCreateFileTest(MountTestBase):
self.assertEqual(["file1.txt"], d1)
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\. d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:file1\.txt$')
@@ -494,9 +484,60 @@ class FuseWriteFileTest(MountTestBase):
self.assertEqual(12, self.operations.read_counter.get())
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+def fuseMknodTestHelperReadFile(mounttmp):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ with open(os.path.join(mounttmp, "file1.txt"), "r") as f:
+ self.assertEqual(f.read(), "")
+ Test().runTest()
+
+class FuseMknodTest(MountTestBase):
+ def runTest(self):
+ # Check that os.mknod() can be used to create normal files.
+ collection = arvados.collection.Collection(api_client=self.api)
+ collection.save_new()
+
+ m = self.make_mount(fuse.CollectionDirectory)
+ with llfuse.lock:
+ m.new_collection(collection.api_response(), collection)
+ self.assertTrue(m.writable())
+
+ self.assertNotIn("file1.txt", collection)
+
+ self.assertEqual(0, self.operations.write_counter.get())
+ self.pool.apply(os.mknod, (os.path.join(self.mounttmp, "file1.txt"),))
+
+ with collection.open("file1.txt") as f:
+ self.assertEqual(f.read(), "")
+
+ self.pool.apply(fuseMknodTestHelperReadFile, (self.mounttmp,))
+
+ # Fail trying to create a FIFO
+ with self.assertRaises(OSError) as exc_check:
+ self.pool.apply(os.mknod, (os.path.join(self.mounttmp, "file2.txt"), stat.S_IFIFO))
+
+class FuseMknodReadOnlyTest(MountTestBase):
+ def runTest(self):
+ collection = arvados.collection.Collection(api_client=self.api)
+ collection.save_new()
+
+ m = self.make_mount(fuse.CollectionDirectory, enable_write=False)
+ with llfuse.lock:
+ m.new_collection(collection.api_response(), collection)
+ self.assertTrue(m.writable() is False)
+ with self.assertRaises(OSError) as exc_check:
+ self.pool.apply(os.mknod, (os.path.join(self.mounttmp, "file1.txt"),))
+
+class FuseMknodProjectTest(MountTestBase):
+ def runTest(self):
+ self.make_mount(fuse.ProjectDirectory,
+ project_object=self.api.users().current().execute())
+ with self.assertRaises(OSError) as exc_check:
+ self.pool.apply(os.mknod, (os.path.join(self.mounttmp, "file1.txt"),))
+
def fuseUpdateFileTestHelper(mounttmp):
class Test(unittest.TestCase):
@@ -533,7 +574,7 @@ class FuseUpdateFileTest(MountTestBase):
self.pool.apply(fuseUpdateFileTestHelper, (self.mounttmp,))
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\. daaef200ebb921e011e3ae922dd3266b\+11\+A\S+ 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:11:file1\.txt 22:1:file1\.txt$')
@@ -573,7 +614,7 @@ class FuseMkdirTest(MountTestBase):
self.pool.apply(fuseMkdirTestHelper, (self.mounttmp,))
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
@@ -640,13 +681,13 @@ class FuseRmTest(MountTestBase):
# Starting manifest
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
self.pool.apply(fuseRmTestHelperDeleteFile, (self.mounttmp,))
# Empty directories are represented by an empty file named "."
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
self.pool.apply(fuseRmTestHelperRmdir, (self.mounttmp,))
@@ -697,13 +738,13 @@ class FuseMvFileTest(MountTestBase):
# Starting manifest
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
self.pool.apply(fuseMvFileTestHelperMoveFile, (self.mounttmp,))
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt\n\./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
@@ -731,7 +772,7 @@ class FuseRenameTest(MountTestBase):
# Starting manifest
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
d1 = llfuse.listdir(os.path.join(self.mounttmp))
@@ -747,7 +788,7 @@ class FuseRenameTest(MountTestBase):
self.assertEqual(["file1.txt"], d1)
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- assertRegex(self, collection2["manifest_text"],
+ self.assertRegex(collection2["manifest_text"],
r'\./testdir2 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
@@ -818,7 +859,7 @@ def fuseFileConflictTestHelper(mounttmp, uuid, keeptmp, settings):
with open(os.path.join(mounttmp, "file1.txt"), "r") as f:
self.assertEqual(f.read(), "bar")
- assertRegex(self, d1[1],
+ self.assertRegex(d1[1],
r'file1\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~')
with open(os.path.join(mounttmp, d1[1]), "r") as f:
@@ -923,7 +964,7 @@ class FuseMvFileBetweenCollectionsTest(MountTestBase):
collection1.update()
collection2.update()
- assertRegex(self, collection1.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+ self.assertRegex(collection1.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
self.assertEqual(collection2.manifest_text(), "")
self.pool.apply(fuseMvFileBetweenCollectionsTest2, (self.mounttmp,
@@ -934,7 +975,7 @@ class FuseMvFileBetweenCollectionsTest(MountTestBase):
collection2.update()
self.assertEqual(collection1.manifest_text(), "")
- assertRegex(self, collection2.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file2\.txt$")
+ self.assertRegex(collection2.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file2\.txt$")
collection1.stop_threads()
collection2.stop_threads()
@@ -994,7 +1035,7 @@ class FuseMvDirBetweenCollectionsTest(MountTestBase):
collection1.update()
collection2.update()
- assertRegex(self, collection1.manifest_text(), r"\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+ self.assertRegex(collection1.manifest_text(), r"\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
self.assertEqual(collection2.manifest_text(), "")
self.pool.apply(fuseMvDirBetweenCollectionsTest2, (self.mounttmp,
@@ -1005,7 +1046,7 @@ class FuseMvDirBetweenCollectionsTest(MountTestBase):
collection2.update()
self.assertEqual(collection1.manifest_text(), "")
- assertRegex(self, collection2.manifest_text(), r"\./testdir2 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+ self.assertRegex(collection2.manifest_text(), r"\./testdir2 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
collection1.stop_threads()
collection2.stop_threads()
@@ -1164,12 +1205,11 @@ class FuseMagicTestPDHOnly(MountTestBase):
def setUp(self, api=None):
super(FuseMagicTestPDHOnly, self).setUp(api=api)
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
+ cw = arvados.collection.Collection()
+ with cw.open('thing1.txt', 'w') as f:
+ f.write('data 1')
- self.testcollection = cw.finish()
+ self.testcollection = cw.portable_data_hash()
self.test_manifest = cw.manifest_text()
created = self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
self.testcollectionuuid = str(created['uuid'])
@@ -1200,7 +1240,7 @@ class FuseMagicTestPDHOnly(MountTestBase):
files = {}
files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
- for k, v in viewitems(files):
+ for k, v in files.items():
with open(os.path.join(self.mounttmp, k), 'rb') as f:
self.assertEqual(v, f.read().decode())
@@ -1267,8 +1307,8 @@ class SlashSubstitutionTest(IntegrationTest):
f.write('foo')
def checkContents(self):
- self.assertRegexpMatches(self.api.collections().get(uuid=self.testcoll['uuid']).execute()['manifest_text'], ' acbd18db') # md5(foo)
- self.assertRegexpMatches(self.api.collections().get(uuid=self.testcolleasy['uuid']).execute()['manifest_text'], ' f561aaf6') # md5(xxx)
+ self.assertRegex(self.api.collections().get(uuid=self.testcoll['uuid']).execute()['manifest_text'], r' acbd18db') # md5(foo)
+ self.assertRegex(self.api.collections().get(uuid=self.testcolleasy['uuid']).execute()['manifest_text'], r' f561aaf6') # md5(xxx)
@IntegrationTest.mount(argv=mnt_args)
@mock.patch('arvados.util.get_config_once')
@@ -1276,7 +1316,7 @@ class SlashSubstitutionTest(IntegrationTest):
self.testcollconflict = self.api.collections().create(body={"name": self.fusename}).execute()
get_config_once.return_value = {"Collections": {"ForwardSlashNameSubstitution": "[SLASH]"}}
self.pool_test(os.path.join(self.mnt, 'zzz'), self.fusename)
- self.assertRegexpMatches(self.api.collections().get(uuid=self.testcollconflict['uuid']).execute()['manifest_text'], ' acbd18db') # md5(foo)
+ self.assertRegex(self.api.collections().get(uuid=self.testcollconflict['uuid']).execute()['manifest_text'], r' acbd18db') # md5(foo)
# foo/bar/baz collection unchanged, because it is masked by foo[SLASH]bar[SLASH]baz
self.assertEqual(self.api.collections().get(uuid=self.testcoll['uuid']).execute()['manifest_text'], '')
@staticmethod
diff --git a/services/fuse/tests/test_retry.py b/services/fuse/tests/test_retry.py
index 44ab5cce91..2fc7ae7c3c 100644
--- a/services/fuse/tests/test_retry.py
+++ b/services/fuse/tests/test_retry.py
@@ -2,22 +2,19 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
import arvados
import arvados_fuse.command
import json
-import mock
import os
import pycurl
import queue
-from . import run_test_server
import tempfile
import unittest
-from .integration_test import IntegrationTest
+from unittest import mock
+from . import run_test_server
+from .integration_test import IntegrationTest
class KeepClientRetry(unittest.TestCase):
origKeepClient = arvados.keep.KeepClient
@@ -57,7 +54,7 @@ class RetryPUT(IntegrationTest):
q.put(mockedCurl)
q.put(pycurl.Curl())
q.put(pycurl.Curl())
- with mock.patch('arvados.keep.KeepClient.KeepService._get_user_agent', side_effect=q.get_nowait):
+ with mock.patch('arvados.keep.KeepClient._KeepService._get_user_agent', side_effect=q.get_nowait):
self.pool_test(os.path.join(self.mnt, 'zzz'))
self.assertTrue(mockedCurl.perform.called)
@staticmethod
diff --git a/services/fuse/tests/test_tmp_collection.py b/services/fuse/tests/test_tmp_collection.py
index c59024267a..d6993750f7 100644
--- a/services/fuse/tests/test_tmp_collection.py
+++ b/services/fuse/tests/test_tmp_collection.py
@@ -2,8 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from builtins import range
-from six import assertRegex
import arvados
import arvados_fuse
import arvados_fuse.command
@@ -18,7 +16,6 @@ from .mount_test_base import MountTestBase
logger = logging.getLogger('arvados.arv-mount')
-
class TmpCollectionArgsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
@@ -107,8 +104,7 @@ class TmpCollectionTest(IntegrationTest):
def _test_tmp_onefile(self, tmpdir):
with open(os.path.join(tmpdir, 'foo'), 'w') as f:
f.write('foo')
- assertRegex(
- self,
+ self.assertRegex(
current_manifest(tmpdir),
r'^\. acbd18db4cc2f85cedef654fccc4a4d8\+3(\+\S+)? 0:3:foo\n$')
@@ -137,7 +133,7 @@ class TmpCollectionTest(IntegrationTest):
else:
with open(path, 'w') as f:
f.write(content)
- assertRegex(self, current_manifest(tmpdir), expect)
+ self.assertRegex(current_manifest(tmpdir), expect)
@IntegrationTest.mount(argv=mnt_args)
def test_tmp_rewrite(self):
@@ -150,4 +146,4 @@ class TmpCollectionTest(IntegrationTest):
f.write("b2")
with open(os.path.join(tmpdir, "b1"), 'w') as f:
f.write("1b")
- assertRegex(self, current_manifest(tmpdir), "^\. ed4f3f67c70b02b29c50ce1ea26666bd\+4(\+\S+)? 0:2:b1 2:2:b2\n$")
+ self.assertRegex(current_manifest(tmpdir), r'^\. ed4f3f67c70b02b29c50ce1ea26666bd\+4(\+\S+)? 0:2:b1 2:2:b2\n$')
diff --git a/services/fuse/tests/test_token_expiry.py b/services/fuse/tests/test_token_expiry.py
index 040db2e096..89076d72cb 100644
--- a/services/fuse/tests/test_token_expiry.py
+++ b/services/fuse/tests/test_token_expiry.py
@@ -2,12 +2,10 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from builtins import range
import apiclient
import arvados
import arvados_fuse
import logging
-import mock
import multiprocessing
import os
import re
@@ -15,6 +13,8 @@ import sys
import time
import unittest
+from unittest import mock
+
from .integration_test import IntegrationTest
logger = logging.getLogger('arvados.arv-mount')
@@ -65,8 +65,8 @@ class TokenExpiryTest(IntegrationTest):
@staticmethod
def _test_refresh_old_manifest(self, zzz):
- uuid = 'zzzzz-4zz18-op4e2lbej01tcvu'
- fnm = 'zzzzz-8i9sb-0vsrcqi7whchuil.log.txt'
+ uuid = 'zzzzz-4zz18-logcollection02'
+ fnm = 'crunch-run.txt'
os.listdir(os.path.join(zzz, uuid))
for _ in range(8):
with open(os.path.join(zzz, uuid, fnm)) as f:
diff --git a/services/fuse/tests/test_unmount.py b/services/fuse/tests/test_unmount.py
index 6a19b33454..3949fd5de4 100644
--- a/services/fuse/tests/test_unmount.py
+++ b/services/fuse/tests/test_unmount.py
@@ -2,8 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-from __future__ import absolute_import
-from builtins import bytes
import arvados_fuse.unmount
import os
import subprocess
diff --git a/services/githttpd/auth_handler.go b/services/githttpd/auth_handler.go
deleted file mode 100644
index c6b23fd4c8..0000000000
--- a/services/githttpd/auth_handler.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "errors"
- "log"
- "net/http"
- "os"
- "regexp"
- "strings"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/httpserver"
- "github.com/sirupsen/logrus"
-)
-
-type authHandler struct {
- handler http.Handler
- clientPool *arvadosclient.ClientPool
- cluster *arvados.Cluster
-}
-
-func (h *authHandler) CheckHealth() error {
- return nil
-}
-
-func (h *authHandler) Done() <-chan struct{} {
- return nil
-}
-
-func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
- var statusCode int
- var statusText string
- var apiToken string
-
- w := httpserver.WrapResponseWriter(wOrig)
-
- if r.Method == "OPTIONS" {
- method := r.Header.Get("Access-Control-Request-Method")
- if method != "GET" && method != "POST" {
- w.WriteHeader(http.StatusMethodNotAllowed)
- return
- }
- w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
- w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Header().Set("Access-Control-Max-Age", "86400")
- w.WriteHeader(http.StatusOK)
- return
- }
-
- if r.Header.Get("Origin") != "" {
- // Allow simple cross-origin requests without user
- // credentials ("user credentials" as defined by CORS,
- // i.e., cookies, HTTP authentication, and client-side
- // SSL certificates. See
- // http://www.w3.org/TR/cors/#user-credentials).
- w.Header().Set("Access-Control-Allow-Origin", "*")
- }
-
- defer func() {
- if w.WroteStatus() == 0 {
- // Nobody has called WriteHeader yet: that
- // must be our job.
- w.WriteHeader(statusCode)
- if statusCode >= 400 {
- w.Write([]byte(statusText))
- }
- }
- }()
-
- creds := auth.CredentialsFromRequest(r)
- if len(creds.Tokens) == 0 {
- statusCode, statusText = http.StatusUnauthorized, "no credentials provided"
- w.Header().Add("WWW-Authenticate", "Basic realm=\"git\"")
- return
- }
- apiToken = creds.Tokens[0]
-
- // Access to paths "/foo/bar.git/*" and "/foo/bar/.git/*" are
- // protected by the permissions on the repository named
- // "foo/bar".
- pathParts := strings.SplitN(r.URL.Path[1:], ".git/", 2)
- if len(pathParts) != 2 {
- statusCode, statusText = http.StatusNotFound, "not found"
- return
- }
- repoName := pathParts[0]
- repoName = strings.TrimRight(repoName, "/")
- httpserver.SetResponseLogFields(r.Context(), logrus.Fields{
- "repoName": repoName,
- })
-
- arv := h.clientPool.Get()
- if arv == nil {
- statusCode, statusText = http.StatusInternalServerError, "connection pool failed: "+h.clientPool.Err().Error()
- return
- }
- defer h.clientPool.Put(arv)
-
- // Log the UUID if the supplied token is a v2 token, otherwise
- // just the last five characters.
- httpserver.SetResponseLogFields(r.Context(), logrus.Fields{
- "tokenUUID": func() string {
- if strings.HasPrefix(apiToken, "v2/") && strings.IndexRune(apiToken[3:], '/') == 27 {
- // UUID part of v2 token
- return apiToken[3:30]
- } else if len(apiToken) > 5 {
- return "[...]" + apiToken[len(apiToken)-5:]
- } else {
- return apiToken
- }
- }(),
- })
-
- // Ask API server whether the repository is readable using
- // this token (by trying to read it!)
- arv.ApiToken = apiToken
- repoUUID, err := h.lookupRepo(arv, repoName)
- if err != nil {
- statusCode, statusText = http.StatusInternalServerError, err.Error()
- return
- }
- if repoUUID == "" {
- statusCode, statusText = http.StatusNotFound, "not found"
- return
- }
-
- isWrite := strings.HasSuffix(r.URL.Path, "/git-receive-pack")
- if !isWrite {
- statusText = "read"
- } else {
- err := arv.Update("repositories", repoUUID, arvadosclient.Dict{
- "repository": arvadosclient.Dict{
- "modified_at": time.Now().String(),
- },
- }, &arvadosclient.Dict{})
- if err != nil {
- statusCode, statusText = http.StatusForbidden, err.Error()
- return
- }
- statusText = "write"
- }
-
- // Regardless of whether the client asked for "/foo.git" or
- // "/foo/.git", we choose whichever variant exists in our repo
- // root, and we try {uuid}.git and {uuid}/.git first. If none
- // of these exist, we 404 even though the API told us the repo
- // _should_ exist (presumably this means the repo was just
- // created, and gitolite sync hasn't run yet).
- rewrittenPath := ""
- tryDirs := []string{
- "/" + repoUUID + ".git",
- "/" + repoUUID + "/.git",
- "/" + repoName + ".git",
- "/" + repoName + "/.git",
- }
- for _, dir := range tryDirs {
- if fileInfo, err := os.Stat(h.cluster.Git.Repositories + dir); err != nil {
- if !os.IsNotExist(err) {
- statusCode, statusText = http.StatusInternalServerError, err.Error()
- return
- }
- } else if fileInfo.IsDir() {
- rewrittenPath = dir + "/" + pathParts[1]
- break
- }
- }
- if rewrittenPath == "" {
- log.Println("WARNING:", repoUUID,
- "git directory not found in", h.cluster.Git.Repositories, tryDirs)
- // We say "content not found" to disambiguate from the
- // earlier "API says that repo does not exist" error.
- statusCode, statusText = http.StatusNotFound, "content not found"
- return
- }
- r.URL.Path = rewrittenPath
-
- h.handler.ServeHTTP(w, r)
-}
-
-var uuidRegexp = regexp.MustCompile(`^[0-9a-z]{5}-s0uqq-[0-9a-z]{15}$`)
-
-func (h *authHandler) lookupRepo(arv *arvadosclient.ArvadosClient, repoName string) (string, error) {
- reposFound := arvadosclient.Dict{}
- var column string
- if uuidRegexp.MatchString(repoName) {
- column = "uuid"
- } else {
- column = "name"
- }
- err := arv.List("repositories", arvadosclient.Dict{
- "filters": [][]string{{column, "=", repoName}},
- }, &reposFound)
- if err != nil {
- return "", err
- } else if avail, ok := reposFound["items_available"].(float64); !ok {
- return "", errors.New("bad list response from API")
- } else if avail < 1 {
- return "", nil
- } else if avail > 1 {
- return "", errors.New("name collision")
- }
- return reposFound["items"].([]interface{})[0].(map[string]interface{})["uuid"].(string), nil
-}
diff --git a/services/githttpd/auth_handler_test.go b/services/githttpd/auth_handler_test.go
deleted file mode 100644
index 2d1ec966a4..0000000000
--- a/services/githttpd/auth_handler_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "io"
- "log"
- "net/http"
- "net/http/httptest"
- "net/url"
- "path/filepath"
- "strings"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&AuthHandlerSuite{})
-
-type AuthHandlerSuite struct {
- cluster *arvados.Cluster
-}
-
-func (s *AuthHandlerSuite) SetUpTest(c *check.C) {
- arvadostest.ResetEnv()
- repoRoot, err := filepath.Abs("../api/tmp/git/test")
- c.Assert(err, check.IsNil)
-
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.Equals, nil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.Equals, nil)
-
- s.cluster.Services.GitHTTP.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: "localhost:0"}: {}}
- s.cluster.TLS.Insecure = true
- s.cluster.Git.GitCommand = "/usr/bin/git"
- s.cluster.Git.Repositories = repoRoot
-}
-
-func (s *AuthHandlerSuite) TestPermission(c *check.C) {
- client, err := arvados.NewClientFromConfig(s.cluster)
- c.Assert(err, check.IsNil)
- ac, err := arvadosclient.New(client)
- c.Assert(err, check.IsNil)
- h := &authHandler{
- cluster: s.cluster,
- clientPool: &arvadosclient.ClientPool{Prototype: ac},
- handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- log.Printf("%v", r.URL)
- io.WriteString(w, r.URL.Path)
- }),
- }
- baseURL, err := url.Parse("http://git.example/")
- c.Assert(err, check.IsNil)
- for _, trial := range []struct {
- label string
- token string
- pathIn string
- pathOut string
- status int
- }{
- {
- label: "read repo by name",
- token: arvadostest.ActiveToken,
- pathIn: arvadostest.Repository2Name + ".git/git-upload-pack",
- pathOut: arvadostest.Repository2UUID + ".git/git-upload-pack",
- },
- {
- label: "read repo by uuid",
- token: arvadostest.ActiveToken,
- pathIn: arvadostest.Repository2UUID + ".git/git-upload-pack",
- pathOut: arvadostest.Repository2UUID + ".git/git-upload-pack",
- },
- {
- label: "write repo by name",
- token: arvadostest.ActiveToken,
- pathIn: arvadostest.Repository2Name + ".git/git-receive-pack",
- pathOut: arvadostest.Repository2UUID + ".git/git-receive-pack",
- },
- {
- label: "write repo by uuid",
- token: arvadostest.ActiveToken,
- pathIn: arvadostest.Repository2UUID + ".git/git-receive-pack",
- pathOut: arvadostest.Repository2UUID + ".git/git-receive-pack",
- },
- {
- label: "uuid not found",
- token: arvadostest.ActiveToken,
- pathIn: strings.Replace(arvadostest.Repository2UUID, "6", "z", -1) + ".git/git-upload-pack",
- status: http.StatusNotFound,
- },
- {
- label: "name not found",
- token: arvadostest.ActiveToken,
- pathIn: "nonexistent-bogus.git/git-upload-pack",
- status: http.StatusNotFound,
- },
- {
- label: "read read-only repo",
- token: arvadostest.SpectatorToken,
- pathIn: arvadostest.FooRepoName + ".git/git-upload-pack",
- pathOut: arvadostest.FooRepoUUID + "/.git/git-upload-pack",
- },
- {
- label: "write read-only repo",
- token: arvadostest.SpectatorToken,
- pathIn: arvadostest.FooRepoName + ".git/git-receive-pack",
- status: http.StatusForbidden,
- },
- } {
- c.Logf("trial label: %q", trial.label)
- u, err := baseURL.Parse(trial.pathIn)
- c.Assert(err, check.IsNil)
- resp := httptest.NewRecorder()
- req := &http.Request{
- Method: "POST",
- URL: u,
- Header: http.Header{
- "Authorization": {"Bearer " + trial.token}}}
- h.ServeHTTP(resp, req)
- if trial.status == 0 {
- trial.status = http.StatusOK
- }
- c.Check(resp.Code, check.Equals, trial.status)
- if trial.status < 400 {
- if trial.pathOut != "" && !strings.HasPrefix(trial.pathOut, "/") {
- trial.pathOut = "/" + trial.pathOut
- }
- c.Check(resp.Body.String(), check.Equals, trial.pathOut)
- }
- }
-}
-
-func (s *AuthHandlerSuite) TestCORS(c *check.C) {
- h := &authHandler{cluster: s.cluster}
-
- // CORS preflight
- resp := httptest.NewRecorder()
- req := &http.Request{
- Method: "OPTIONS",
- Header: http.Header{
- "Origin": {"*"},
- "Access-Control-Request-Method": {"GET"},
- },
- }
- h.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "GET, POST")
- c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type")
- c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
- c.Check(resp.Body.String(), check.Equals, "")
-
- // CORS actual request. Bogus token and path ensure
- // authHandler responds 4xx without calling our wrapped (nil)
- // handler.
- u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
- c.Assert(err, check.Equals, nil)
- resp = httptest.NewRecorder()
- req = &http.Request{
- Method: "GET",
- URL: u,
- Header: http.Header{
- "Origin": {"*"},
- "Authorization": {"OAuth2 foobar"},
- },
- }
- h.ServeHTTP(resp, req)
- c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
-}
diff --git a/services/githttpd/cmd.go b/services/githttpd/cmd.go
deleted file mode 100644
index e6ca3c0743..0000000000
--- a/services/githttpd/cmd.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "context"
-
- "git.arvados.org/arvados.git/lib/service"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var Command = service.Command(arvados.ServiceNameGitHTTP, newHandler)
-
-func newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
- client, err := arvados.NewClientFromConfig(cluster)
- if err != nil {
- return service.ErrorHandler(ctx, cluster, err)
- }
- ac, err := arvadosclient.New(client)
- if err != nil {
- return service.ErrorHandler(ctx, cluster, err)
- }
- return &authHandler{
- clientPool: &arvadosclient.ClientPool{Prototype: ac},
- cluster: cluster,
- handler: newGitHandler(ctx, cluster),
- }
-}
diff --git a/services/githttpd/git_handler.go b/services/githttpd/git_handler.go
deleted file mode 100644
index 7c94294c04..0000000000
--- a/services/githttpd/git_handler.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "context"
- "net"
- "net/http"
- "net/http/cgi"
- "os"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
-)
-
-// gitHandler is an http.Handler that invokes git-http-backend (or
-// whatever backend is configured) via CGI, with appropriate
-// environment variables in place for git-http-backend or
-// gitolite-shell.
-type gitHandler struct {
- cgi.Handler
-}
-
-func newGitHandler(ctx context.Context, cluster *arvados.Cluster) http.Handler {
- const glBypass = "GL_BYPASS_ACCESS_CHECKS"
- const glHome = "GITOLITE_HTTP_HOME"
- var env []string
- path := os.Getenv("PATH")
- if cluster.Git.GitoliteHome != "" {
- env = append(env,
- glHome+"="+cluster.Git.GitoliteHome,
- glBypass+"=1")
- path = path + ":" + cluster.Git.GitoliteHome + "/bin"
- } else if home, bypass := os.Getenv(glHome), os.Getenv(glBypass); home != "" || bypass != "" {
- env = append(env, glHome+"="+home, glBypass+"="+bypass)
- ctxlog.FromContext(ctx).Printf("DEPRECATED: Passing through %s and %s environment variables. Use GitoliteHome configuration instead.", glHome, glBypass)
- }
-
- var listen arvados.URL
- for listen = range cluster.Services.GitHTTP.InternalURLs {
- break
- }
- env = append(env,
- "GIT_PROJECT_ROOT="+cluster.Git.Repositories,
- "GIT_HTTP_EXPORT_ALL=",
- "SERVER_ADDR="+listen.Host,
- "PATH="+path)
- return &gitHandler{
- Handler: cgi.Handler{
- Path: cluster.Git.GitCommand,
- Dir: cluster.Git.Repositories,
- Env: env,
- Args: []string{"http-backend"},
- },
- }
-}
-
-func (h *gitHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- remoteHost, remotePort, err := net.SplitHostPort(r.RemoteAddr)
- if err != nil {
- ctxlog.FromContext(r.Context()).Errorf("Internal error: SplitHostPort(r.RemoteAddr==%q): %s", r.RemoteAddr, err)
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
-
- // Copy the wrapped cgi.Handler, so these request-specific
- // variables don't leak into the next request.
- handlerCopy := h.Handler
- handlerCopy.Env = append(handlerCopy.Env,
- // In Go1.5 we can skip this, net/http/cgi will do it for us:
- "REMOTE_HOST="+remoteHost,
- "REMOTE_ADDR="+remoteHost,
- "REMOTE_PORT="+remotePort,
- // Ideally this would be a real username:
- "REMOTE_USER="+r.RemoteAddr,
- )
- handlerCopy.ServeHTTP(w, r)
-}
diff --git a/services/githttpd/git_handler_test.go b/services/githttpd/git_handler_test.go
deleted file mode 100644
index ef2ee28e79..0000000000
--- a/services/githttpd/git_handler_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "context"
- "net/http"
- "net/http/httptest"
- "net/url"
- "regexp"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&GitHandlerSuite{})
-
-type GitHandlerSuite struct {
- cluster *arvados.Cluster
-}
-
-func (s *GitHandlerSuite) SetUpTest(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.Equals, nil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.Equals, nil)
-
- s.cluster.Services.GitHTTP.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: "localhost:80"}: {}}
- s.cluster.Git.GitoliteHome = "/test/ghh"
- s.cluster.Git.Repositories = "/"
-}
-
-func (s *GitHandlerSuite) TestEnvVars(c *check.C) {
- u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
- c.Check(err, check.Equals, nil)
- resp := httptest.NewRecorder()
- req := &http.Request{
- Method: "GET",
- URL: u,
- RemoteAddr: "[::1]:12345",
- }
- h := newGitHandler(context.Background(), s.cluster)
- h.(*gitHandler).Path = "/bin/sh"
- h.(*gitHandler).Args = []string{"-c", "printf 'Content-Type: text/plain\r\n\r\n'; env"}
-
- h.ServeHTTP(resp, req)
-
- c.Check(resp.Code, check.Equals, http.StatusOK)
- body := resp.Body.String()
- c.Check(body, check.Matches, `(?ms).*^PATH=.*:/test/ghh/bin$.*`)
- c.Check(body, check.Matches, `(?ms).*^GITOLITE_HTTP_HOME=/test/ghh$.*`)
- c.Check(body, check.Matches, `(?ms).*^GL_BYPASS_ACCESS_CHECKS=1$.*`)
- c.Check(body, check.Matches, `(?ms).*^REMOTE_HOST=::1$.*`)
- c.Check(body, check.Matches, `(?ms).*^REMOTE_PORT=12345$.*`)
- c.Check(body, check.Matches, `(?ms).*^SERVER_ADDR=`+regexp.QuoteMeta("localhost:80")+`$.*`)
-}
-
-func (s *GitHandlerSuite) TestCGIErrorOnSplitHostPortError(c *check.C) {
- u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
- c.Check(err, check.Equals, nil)
- resp := httptest.NewRecorder()
- req := &http.Request{
- Method: "GET",
- URL: u,
- RemoteAddr: "test.bad.address.missing.port",
- }
- h := newGitHandler(context.Background(), s.cluster)
- h.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusInternalServerError)
- c.Check(resp.Body.String(), check.Equals, "")
-}
diff --git a/services/githttpd/gitolite_test.go b/services/githttpd/gitolite_test.go
deleted file mode 100644
index d34c413c1b..0000000000
--- a/services/githttpd/gitolite_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&GitoliteSuite{})
-
-// GitoliteSuite tests need an API server, an arvados-git-httpd
-// server, and a repository hosted by gitolite.
-type GitoliteSuite struct {
- IntegrationSuite
- gitoliteHome string
-}
-
-func (s *GitoliteSuite) SetUpTest(c *check.C) {
- var err error
- s.gitoliteHome, err = ioutil.TempDir("", "githttp")
- c.Assert(err, check.Equals, nil)
-
- runGitolite := func(prog string, args ...string) {
- c.Log(prog, " ", args)
- cmd := exec.Command(prog, args...)
- cmd.Dir = s.gitoliteHome
- cmd.Env = []string{"HOME=" + s.gitoliteHome}
- for _, e := range os.Environ() {
- if !strings.HasPrefix(e, "HOME=") {
- cmd.Env = append(cmd.Env, e)
- }
- }
- diags, err := cmd.CombinedOutput()
- c.Log(string(diags))
- c.Assert(err, check.Equals, nil)
- }
-
- runGitolite("gitolite", "setup", "--admin", "root")
-
- s.tmpRepoRoot = s.gitoliteHome + "/repositories"
-
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.Equals, nil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.Equals, nil)
-
- s.cluster.Services.GitHTTP.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: "localhost:0"}: {}}
- s.cluster.TLS.Insecure = true
- s.cluster.Git.GitCommand = "/usr/share/gitolite3/gitolite-shell"
- s.cluster.Git.GitoliteHome = s.gitoliteHome
- s.cluster.Git.Repositories = s.tmpRepoRoot
-
- s.IntegrationSuite.SetUpTest(c)
-
- // Install the gitolite hooks in the bare repo we made in
- // (*IntegrationTest)SetUpTest() -- see 2.2.4 at
- // http://gitolite.com/gitolite/gitolite.html
- runGitolite("gitolite", "setup")
-}
-
-func (s *GitoliteSuite) TearDownTest(c *check.C) {
- // We really want Unsetenv here, but it's not worth forcing an
- // upgrade to Go 1.4.
- os.Setenv("GITOLITE_HTTP_HOME", "")
- os.Setenv("GL_BYPASS_ACCESS_CHECKS", "")
- if s.gitoliteHome != "" {
- err := os.RemoveAll(s.gitoliteHome)
- c.Check(err, check.Equals, nil)
- }
- s.IntegrationSuite.TearDownTest(c)
-}
-
-func (s *GitoliteSuite) TestFetch(c *check.C) {
- err := s.RunGit(c, activeToken, "fetch", "active/foo.git", "refs/heads/main")
- c.Check(err, check.Equals, nil)
-}
-
-func (s *GitoliteSuite) TestFetchUnreadable(c *check.C) {
- err := s.RunGit(c, anonymousToken, "fetch", "active/foo.git")
- c.Check(err, check.ErrorMatches, `.* not found.*`)
-}
-
-func (s *GitoliteSuite) TestPush(c *check.C) {
- err := s.RunGit(c, activeToken, "push", "active/foo.git", "main:gitolite-push")
- c.Check(err, check.Equals, nil)
-
- // Check that the commit hash appears in the gitolite log, as
- // assurance that the gitolite hooks really did run.
-
- sha1, err := exec.Command("git", "--git-dir", s.tmpWorkdir+"/.git",
- "log", "-n1", "--format=%H").CombinedOutput()
- c.Logf("git-log in workdir: %q", string(sha1))
- c.Assert(err, check.Equals, nil)
- c.Assert(len(sha1), check.Equals, 41)
-
- gitoliteLog, err := exec.Command("grep", "-r", string(sha1[:40]), s.gitoliteHome+"/.gitolite/logs").CombinedOutput()
- c.Check(err, check.Equals, nil)
- c.Logf("gitolite log message: %q", string(gitoliteLog))
-}
-
-func (s *GitoliteSuite) TestPushUnwritable(c *check.C) {
- err := s.RunGit(c, spectatorToken, "push", "active/foo.git", "main:gitolite-push-fail")
- c.Check(err, check.ErrorMatches, `.*HTTP (code = )?403.*`)
-}
diff --git a/services/githttpd/integration_test.go b/services/githttpd/integration_test.go
deleted file mode 100644
index c819272d3e..0000000000
--- a/services/githttpd/integration_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "context"
- "errors"
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
- "testing"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/httpserver"
- check "gopkg.in/check.v1"
-)
-
-// Gocheck boilerplate
-func Test(t *testing.T) {
- check.TestingT(t)
-}
-
-// IntegrationSuite tests need an API server and an arvados-git-httpd
-// server. See GitSuite and GitoliteSuite.
-type IntegrationSuite struct {
- tmpRepoRoot string
- tmpWorkdir string
- testServer *httpserver.Server
- cluster *arvados.Cluster
-}
-
-func (s *IntegrationSuite) SetUpTest(c *check.C) {
- arvadostest.ResetEnv()
-
- var err error
- if s.tmpRepoRoot == "" {
- s.tmpRepoRoot, err = ioutil.TempDir("", "githttp")
- c.Assert(err, check.Equals, nil)
- }
- s.tmpWorkdir, err = ioutil.TempDir("", "githttp")
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("git", "init", "--bare", s.tmpRepoRoot+"/zzzzz-s0uqq-382brsig8rp3666.git").Output()
- c.Assert(err, check.Equals, nil)
- // we need git 2.28 to specify the initial branch with -b; Buster only has 2.20; so we do it in 2 steps
- _, err = exec.Command("git", "init", s.tmpWorkdir).Output()
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && git checkout -b main").Output()
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && echo initial >initial && git add initial && git -c user.name=Initial -c user.email=Initial commit -am 'foo: initial commit'").CombinedOutput()
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && git push "+s.tmpRepoRoot+"/zzzzz-s0uqq-382brsig8rp3666.git main:main").CombinedOutput()
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && echo work >work && git add work && git -c user.name=Foo -c user.email=Foo commit -am 'workdir: test'").CombinedOutput()
- c.Assert(err, check.Equals, nil)
-
- if s.cluster == nil {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.Equals, nil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.Equals, nil)
-
- s.cluster.Services.GitHTTP.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: "localhost:0"}: {}}
- s.cluster.TLS.Insecure = true
- s.cluster.Git.GitCommand = "/usr/bin/git"
- s.cluster.Git.Repositories = s.tmpRepoRoot
- s.cluster.ManagementToken = arvadostest.ManagementToken
- }
-
- s.testServer = &httpserver.Server{}
- s.testServer.Handler = httpserver.LogRequests(newHandler(context.Background(), s.cluster, "", nil))
- err = s.testServer.Start()
- c.Assert(err, check.Equals, nil)
-
- _, err = exec.Command("git", "config",
- "--file", s.tmpWorkdir+"/.git/config",
- "credential.http://"+s.testServer.Addr+"/.helper",
- "!cred(){ cat >/dev/null; if [ \"$1\" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred").Output()
- c.Assert(err, check.Equals, nil)
- _, err = exec.Command("git", "config",
- "--file", s.tmpWorkdir+"/.git/config",
- "credential.http://"+s.testServer.Addr+"/.username",
- "none").Output()
- c.Assert(err, check.Equals, nil)
-
- // Clear ARVADOS_API_* env vars before starting up the server,
- // to make sure arvados-git-httpd doesn't use them or complain
- // about them being missing.
- os.Unsetenv("ARVADOS_API_HOST")
- os.Unsetenv("ARVADOS_API_HOST_INSECURE")
- os.Unsetenv("ARVADOS_API_TOKEN")
-}
-
-func (s *IntegrationSuite) TearDownTest(c *check.C) {
- var err error
- if s.testServer != nil {
- err = s.testServer.Close()
- }
- c.Check(err, check.Equals, nil)
- s.testServer = nil
-
- if s.tmpRepoRoot != "" {
- err = os.RemoveAll(s.tmpRepoRoot)
- c.Check(err, check.Equals, nil)
- }
- s.tmpRepoRoot = ""
-
- if s.tmpWorkdir != "" {
- err = os.RemoveAll(s.tmpWorkdir)
- c.Check(err, check.Equals, nil)
- }
- s.tmpWorkdir = ""
-
- s.cluster = nil
-}
-
-func (s *IntegrationSuite) RunGit(c *check.C, token, gitCmd, repo string, args ...string) error {
- cwd, err := os.Getwd()
- c.Assert(err, check.Equals, nil)
- defer os.Chdir(cwd)
- os.Chdir(s.tmpWorkdir)
-
- gitargs := append([]string{
- gitCmd, "http://" + s.testServer.Addr + "/" + repo,
- }, args...)
- cmd := exec.Command("git", gitargs...)
- cmd.Env = append(os.Environ(), "ARVADOS_API_TOKEN="+token)
- w, err := cmd.StdinPipe()
- c.Assert(err, check.Equals, nil)
- w.Close()
- output, err := cmd.CombinedOutput()
- c.Log("git ", gitargs, " => ", err)
- c.Log(string(output))
- if err != nil && len(output) > 0 {
- // If messages appeared on stderr, they are more
- // helpful than the err returned by CombinedOutput().
- //
- // Easier to match error strings without newlines:
- err = errors.New(strings.Replace(string(output), "\n", " // ", -1))
- }
- return err
-}
diff --git a/services/githttpd/server_test.go b/services/githttpd/server_test.go
deleted file mode 100644
index 02c13a3112..0000000000
--- a/services/githttpd/server_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package githttpd
-
-import (
- "os"
- "os/exec"
-
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&GitSuite{})
-
-const (
- spectatorToken = "zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu"
- activeToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
- anonymousToken = "4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi"
- expiredToken = "2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx"
-)
-
-type GitSuite struct {
- IntegrationSuite
-}
-
-func (s *GitSuite) TestPathVariants(c *check.C) {
- s.makeArvadosRepo(c)
- for _, repo := range []string{"active/foo.git", "active/foo/.git", "arvados.git", "arvados/.git"} {
- err := s.RunGit(c, spectatorToken, "fetch", repo, "refs/heads/main")
- c.Assert(err, check.Equals, nil)
- }
-}
-
-func (s *GitSuite) TestReadonly(c *check.C) {
- err := s.RunGit(c, spectatorToken, "fetch", "active/foo.git", "refs/heads/main")
- c.Assert(err, check.Equals, nil)
- err = s.RunGit(c, spectatorToken, "push", "active/foo.git", "main:newbranchfail")
- c.Assert(err, check.ErrorMatches, `.*HTTP (code = )?403.*`)
- _, err = os.Stat(s.tmpRepoRoot + "/zzzzz-s0uqq-382brsig8rp3666.git/refs/heads/newbranchfail")
- c.Assert(err, check.FitsTypeOf, &os.PathError{})
-}
-
-func (s *GitSuite) TestReadwrite(c *check.C) {
- err := s.RunGit(c, activeToken, "fetch", "active/foo.git", "refs/heads/main")
- c.Assert(err, check.Equals, nil)
- err = s.RunGit(c, activeToken, "push", "active/foo.git", "main:newbranch")
- c.Assert(err, check.Equals, nil)
- _, err = os.Stat(s.tmpRepoRoot + "/zzzzz-s0uqq-382brsig8rp3666.git/refs/heads/newbranch")
- c.Assert(err, check.Equals, nil)
-}
-
-func (s *GitSuite) TestNonexistent(c *check.C) {
- err := s.RunGit(c, spectatorToken, "fetch", "thisrepodoesnotexist.git", "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* not found.*`)
-}
-
-func (s *GitSuite) TestMissingGitdirReadableRepository(c *check.C) {
- err := s.RunGit(c, activeToken, "fetch", "active/foo2.git", "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* not found.*`)
-}
-
-func (s *GitSuite) TestNoPermission(c *check.C) {
- for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
- err := s.RunGit(c, anonymousToken, "fetch", repo, "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* not found.*`)
- }
-}
-
-func (s *GitSuite) TestExpiredToken(c *check.C) {
- for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
- err := s.RunGit(c, expiredToken, "fetch", repo, "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* (500 while accessing|requested URL returned error: 500).*`)
- }
-}
-
-func (s *GitSuite) TestInvalidToken(c *check.C) {
- for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
- err := s.RunGit(c, "s3cr3tp@ssw0rd", "fetch", repo, "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* requested URL returned error.*`)
- }
-}
-
-func (s *GitSuite) TestShortToken(c *check.C) {
- for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
- err := s.RunGit(c, "s3cr3t", "fetch", repo, "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.* (500 while accessing|requested URL returned error: 500).*`)
- }
-}
-
-func (s *GitSuite) TestShortTokenBadReq(c *check.C) {
- for _, repo := range []string{"bogus"} {
- err := s.RunGit(c, "s3cr3t", "fetch", repo, "refs/heads/main")
- c.Assert(err, check.ErrorMatches, `.*not found.*`)
- }
-}
-
-// Make a bare arvados repo at {tmpRepoRoot}/arvados.git
-func (s *GitSuite) makeArvadosRepo(c *check.C) {
- msg, err := exec.Command("git", "init", "--bare", s.tmpRepoRoot+"/zzzzz-s0uqq-arvadosrepo0123.git").CombinedOutput()
- c.Log(string(msg))
- c.Assert(err, check.Equals, nil)
- msg, err = exec.Command("git", "--git-dir", s.tmpRepoRoot+"/zzzzz-s0uqq-arvadosrepo0123.git", "fetch", "../../.git", "HEAD:main").CombinedOutput()
- c.Log(string(msg))
- c.Assert(err, check.Equals, nil)
-}
diff --git a/services/keep-balance/integration_test.go b/services/keep-balance/integration_test.go
index 20d0040b1f..b4bf423cd7 100644
--- a/services/keep-balance/integration_test.go
+++ b/services/keep-balance/integration_test.go
@@ -86,7 +86,7 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
for iter := 0; iter < 20; iter++ {
logBuf.Reset()
logger := logrus.New()
- logger.Out = io.MultiWriter(&logBuf, os.Stderr)
+ logger.Out = io.MultiWriter(&logBuf, ctxlog.LogWriter(c.Log))
opts := RunOptions{
CommitConfirmedFields: true,
Logger: logger,
diff --git a/services/keep-web/cache.go b/services/keep-web/cache.go
index d443bc0829..b5b6cc4fa5 100644
--- a/services/keep-web/cache.go
+++ b/services/keep-web/cache.go
@@ -179,6 +179,7 @@ func (c *cache) checkout(token string) (*cachedSession, error) {
}
client.AuthToken = token
client.Timeout = time.Minute
+ client.Logger = c.logger
// A non-empty origin header tells controller to
// prioritize our traffic as interactive, which is
// true most of the time.
@@ -188,11 +189,13 @@ func (c *cache) checkout(token string) (*cachedSession, error) {
if err != nil {
return nil, err
}
+ kc := keepclient.New(arvadosclient)
+ kc.DiskCacheSize = c.cluster.Collections.WebDAVCache.DiskCacheSize
sess = &cachedSession{
cache: c,
client: client,
arvadosclient: arvadosclient,
- keepclient: keepclient.New(arvadosclient),
+ keepclient: kc,
}
c.sessions[token] = sess
}
diff --git a/services/keep-web/cache_test.go b/services/keep-web/cache_test.go
index e95ebcf846..76d88e7c36 100644
--- a/services/keep-web/cache_test.go
+++ b/services/keep-web/cache_test.go
@@ -78,11 +78,11 @@ func (s *IntegrationSuite) TestCache(c *check.C) {
for i := 0; i < 7; i++ {
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusOK)
+ c.Check(resp.Result().StatusCode, check.Equals, http.StatusOK)
resp2 := httptest.NewRecorder()
s.handler.ServeHTTP(resp2, req2)
- c.Check(resp2.Code, check.Equals, http.StatusOK)
+ c.Check(resp2.Result().StatusCode, check.Equals, http.StatusOK)
}
s.checkCacheMetrics(c,
"hits 20",
@@ -97,8 +97,8 @@ func (s *IntegrationSuite) TestForceReloadPDH(c *check.C) {
client := arvados.NewClientFromEnv()
client.AuthToken = arvadostest.ActiveToken
- _, resp := s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/"+filename, arvadostest.ActiveToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ _, resp := s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/"+filename, arvadostest.ActiveToken, nil, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
var coll arvados.Collection
err := client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
@@ -113,14 +113,14 @@ func (s *IntegrationSuite) TestForceReloadPDH(c *check.C) {
_, resp = s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/"+filename, "", http.Header{
"Authorization": {"Bearer " + arvadostest.ActiveToken},
"Cache-Control": {"must-revalidate"},
- })
- c.Check(resp.Code, check.Equals, http.StatusOK)
+ }, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
_, resp = s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/missingfile", "", http.Header{
"Authorization": {"Bearer " + arvadostest.ActiveToken},
"Cache-Control": {"must-revalidate"},
- })
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ }, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
func (s *IntegrationSuite) TestForceReloadUUID(c *check.C) {
@@ -129,29 +129,31 @@ func (s *IntegrationSuite) TestForceReloadUUID(c *check.C) {
var coll arvados.Collection
err := client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
"collection": map[string]string{
- "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:oldfile\n",
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:empty_file\n",
},
})
c.Assert(err, check.IsNil)
defer client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil)
- _, resp := s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
- _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/oldfile", arvadostest.ActiveToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ _, resp := s.do("GET", "http://"+coll.UUID+".keep-web.example/different_empty_file", arvadostest.ActiveToken, nil, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/empty_file", arvadostest.ActiveToken, nil, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/different_empty_file", arvadostest.ActiveToken, nil, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
err = client.RequestAndDecode(&coll, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
"collection": map[string]string{
- "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:oldfile 0:0:newfile\n",
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:different_empty_file\n",
},
})
c.Assert(err, check.IsNil)
- _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
- _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", "", http.Header{
+ // If we set the force-reload header, we get the latest
+ // version and empty_file is gone.
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/empty_file", "", http.Header{
"Authorization": {"Bearer " + arvadostest.ActiveToken},
"Cache-Control": {"must-revalidate"},
- })
- c.Check(resp.Code, check.Equals, http.StatusOK)
+ }, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/different_empty_file", arvadostest.ActiveToken, nil, nil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
}
diff --git a/services/keep-web/cadaver_test.go b/services/keep-web/cadaver_test.go
index 026deeb5ee..789df72035 100644
--- a/services/keep-web/cadaver_test.go
+++ b/services/keep-web/cadaver_test.go
@@ -52,7 +52,7 @@ func (s *IntegrationSuite) TestCadaverUserProject(c *check.C) {
}
func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc func(arvados.Collection) (string, string, string), skip func(string) bool) {
- testdata := []byte("the human tragedy consists in the necessity of living with the consequences of actions performed under the pressure of compulsions we do not understand")
+ testdata := "the human tragedy consists in the necessity of living with the consequences of actions performed under the pressure of compulsions we do not understand"
tempdir, err := ioutil.TempDir("", "keep-web-test-")
c.Assert(err, check.IsNil)
@@ -60,7 +60,7 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
localfile, err := ioutil.TempFile(tempdir, "localfile")
c.Assert(err, check.IsNil)
- localfile.Write(testdata)
+ localfile.Write([]byte(testdata))
emptyfile, err := ioutil.TempFile(tempdir, "emptyfile")
c.Assert(err, check.IsNil)
@@ -79,10 +79,11 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
matchToday := time.Now().Format("Jan +2")
type testcase struct {
- path string
- cmd string
- match string
- data []byte
+ path string
+ cmd string
+ match string
+ data string
+ checkemptydata bool
}
for _, trial := range []testcase{
{
@@ -116,10 +117,10 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
match: `(?ms).*Uploading .* succeeded.*`,
},
{
- path: writePath,
- cmd: "get emptyfile '" + checkfile.Name() + "'\n",
- match: `(?ms).*Downloading .* succeeded.*`,
- data: []byte{},
+ path: writePath,
+ cmd: "get emptyfile '" + checkfile.Name() + "'\n",
+ match: `(?ms).*Downloading .* succeeded.*`,
+ checkemptydata: true,
},
{
path: writePath,
@@ -281,7 +282,7 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
match: `(?ms).*Locking .* failed:.*405 Method Not Allowed.*`,
},
} {
- c.Logf("%s %+v", s.testServer.URL, trial)
+ c.Logf("=== %s trial %+v", s.testServer.URL, trial)
if skip != nil && skip(trial.path) {
c.Log("(skip)")
continue
@@ -292,14 +293,14 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
stdout := s.runCadaver(c, password, trial.path, trial.cmd)
c.Check(stdout, check.Matches, trial.match)
- if trial.data == nil {
+ if trial.data == "" && !trial.checkemptydata {
continue
}
checkfile, err = os.Open(checkfile.Name())
c.Assert(err, check.IsNil)
checkfile.Seek(0, os.SEEK_SET)
got, err := ioutil.ReadAll(checkfile)
- c.Check(got, check.DeepEquals, trial.data)
+ c.Check(string(got), check.Equals, trial.data)
c.Check(err, check.IsNil)
}
}
diff --git a/services/keep-web/handler.go b/services/keep-web/handler.go
index cdd51f0bb7..cb81f681c5 100644
--- a/services/keep-web/handler.go
+++ b/services/keep-web/handler.go
@@ -5,15 +5,20 @@
package keepweb
import (
+ "context"
"encoding/json"
"errors"
"fmt"
"html"
"html/template"
"io"
+ "mime"
+ "net"
"net/http"
"net/url"
"os"
+ "path"
+ "slices"
"sort"
"strconv"
"strings"
@@ -21,12 +26,14 @@ import (
"time"
"git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/lib/webdavfs"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/gotd/contrib/http_range"
"github.com/sirupsen/logrus"
"golang.org/x/net/webdav"
)
@@ -36,9 +43,18 @@ type handler struct {
Cluster *arvados.Cluster
metrics *metrics
- lockMtx sync.Mutex
- lock map[string]*sync.RWMutex
- lockTidied time.Time
+ fileEventLogs map[fileEventLog]time.Time
+ fileEventLogsMtx sync.Mutex
+ fileEventLogsNextTidy time.Time
+
+ s3SecretCache map[string]*cachedS3Secret
+ s3SecretCacheMtx sync.Mutex
+ s3SecretCacheNextTidy time.Time
+
+ dbConnector *ctrlctx.DBConnector
+ dbConnectorMtx sync.Mutex
+
+ repacking sync.Map
}
var urlPDHDecoder = strings.NewReplacer(" ", "+", "-", "+")
@@ -123,7 +139,6 @@ var (
"MOVE": true,
"PROPPATCH": true,
"PUT": true,
- "RMCOL": true,
"UNLOCK": true,
}
webdavMethod = map[string]bool{
@@ -172,12 +187,37 @@ func (h *handler) Done() <-chan struct{} {
return nil
}
+// Close releases the active database connection, if any.
+//
+// Currently Close() is not part of the service.Handler interface.
+// However, it is used by the test suite to avoid accumulating
+// database connections when starting up lots of keep-web
+// servers/handlers.
+func (h *handler) Close() {
+ h.getDBConnector().Close()
+}
+
+func (h *handler) getDBConnector() *ctrlctx.DBConnector {
+ h.dbConnectorMtx.Lock()
+ defer h.dbConnectorMtx.Unlock()
+ if h.dbConnector == nil {
+ h.dbConnector = &ctrlctx.DBConnector{PostgreSQL: h.Cluster.PostgreSQL}
+ }
+ return h.dbConnector
+}
+
// ServeHTTP implements http.Handler.
func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
r.URL.Scheme = xfp
}
+ httpserver.SetResponseLogFields(r.Context(), logrus.Fields{
+ "webdavDepth": r.Header.Get("Depth"),
+ "webdavDestination": r.Header.Get("Destination"),
+ "webdavOverwrite": r.Header.Get("Overwrite"),
+ })
+
wbuffer := newWriteBuffer(wOrig, int(h.Cluster.Collections.WebDAVOutputBuffer))
defer wbuffer.Close()
w := httpserver.WrapResponseWriter(responseWriter{
@@ -208,6 +248,16 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
return
}
+ // webdavPrefix is the leading portion of r.URL.Path that
+ // should be ignored by the webdav handler, if any.
+ //
+ // req "/c={id}/..." -> webdavPrefix "/c={id}"
+ // req "/by_id/..." -> webdavPrefix ""
+ //
+ // Note: in the code immediately below, we set webdavPrefix
+ // only if it was explicitly set by the client. Otherwise, it
+ // gets set later, after checking the request path for cases
+ // like "/c={id}/...".
webdavPrefix := ""
arvPath := r.URL.Path
if prefix := r.Header.Get("X-Webdav-Prefix"); prefix != "" {
@@ -344,6 +394,12 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
stripParts++
}
+ // fsprefix is the path from sitefs root to the sitefs
+ // directory (implicitly or explicitly) indicated by the
+ // leading / in the request path.
+ //
+ // Request "/by_id/..." -> fsprefix ""
+ // Request "/c={id}/..." -> fsprefix "/by_id/{id}/"
fsprefix := ""
if useSiteFS {
if writeMethod[r.Method] {
@@ -364,6 +420,19 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
}
if src := r.Header.Get("X-Webdav-Source"); strings.HasPrefix(src, "/") && !strings.Contains(src, "//") && !strings.Contains(src, "/../") {
+ // Clients (specifically, the container log gateway)
+ // use X-Webdav-Source to specify that although the
+ // request path (and other webdav fields in the
+ // request) refer to target "/abc", the intended
+ // target is actually
+ // "{x-webdav-source-value}/abc".
+ //
+ // This, combined with X-Webdav-Prefix, enables the
+ // container log gateway to effectively alter the
+ // target path when proxying a request, without
+ // needing to rewrite all the other webdav
+ // request/response fields that might mention the
+ // target path.
fsprefix += src[1:]
}
@@ -395,6 +464,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
var token string
var tokenUser *arvados.User
var sessionFS arvados.CustomFileSystem
+ var targetFS arvados.FileSystem
var session *cachedSession
var collectionDir arvados.File
for _, token = range tokens {
@@ -430,22 +500,11 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
return
}
defer f.Close()
- defer sess.Release()
collectionDir, sessionFS, session, tokenUser = f, fs, sess, user
break
}
- if forceReload && collectionDir != nil {
- err := collectionDir.Sync()
- if err != nil {
- if he := errorWithHTTPStatus(nil); errors.As(err, &he) {
- http.Error(w, err.Error(), he.HTTPStatus())
- } else {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
- return
- }
- }
+
if session == nil {
if pathToken {
// The URL is a "secret sharing link" that
@@ -494,14 +553,27 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
redirkey = "redirectToDownload"
}
callback := "/c=" + collectionID + "/" + strings.Join(targetPath, "/")
- // target.RawQuery = url.Values{redirkey:
- // {target}}.Encode() would be the obvious
- // thing to do here, but wb2 doesn't decode
- // this as a query param -- it takes
- // everything after "${redirkey}=" as the
- // target URL. If we encode "/" as "%2F" etc.,
- // the redirect won't work.
- target.RawQuery = redirkey + "=" + callback
+ query := url.Values{redirkey: {callback}}
+ queryString := query.Encode()
+ // Note: Encode (and QueryEscape function) turns space
+ // into plus sign (+) rather than %20 (the plus sign
+ // becomes %2B); that is the rule for web forms data
+ // sent in URL query part via GET, but we're not
+ // emulating forms here. Client JS APIs
+ // (URLSearchParam#get, decodeURIComponent) will
+ // decode %20, but while the former also expects the
+ // form-specific encoding, the latter doesn't.
+ // Encode() almost encodes everything; RFC 3986 3.4
+ // says "it is sometimes better for usability" to not
+ // encode / and ? when passing URI reference in query.
+ // This is also legal according to WHATWG URL spec and
+ // can be desirable for debugging webapp.
+ // We can let slash / appear in the encoded query, and
+ // equality-sign = too, but exempting ? is not very
+ // useful.
+ // Plus-sign, hash, and ampersand are never exempt.
+ r := strings.NewReplacer("+", "%20", "%2F", "/", "%3D", "=")
+ target.RawQuery = r.Replace(queryString)
w.Header().Add("Location", target.String())
w.WriteHeader(http.StatusSeeOther)
return
@@ -519,13 +591,61 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
return
}
+ // The first call to releaseSession() calls session.Release(),
+ // then subsequent calls are no-ops. This lets us use a defer
+ // call here to ensure it gets called in all code paths, and
+ // also call it inline (see below) in the cases where we want
+ // to release the lock before returning.
+ var releaseSessionOnce sync.Once
+ releaseSession := func() { releaseSessionOnce.Do(func() { session.Release() }) }
+ defer releaseSession()
+
+ colltarget := strings.Join(targetPath, "/")
+ colltarget = strings.TrimSuffix(colltarget, "/")
+ fstarget := fsprefix + colltarget
+ if !forceReload {
+ need, err := h.needSync(r.Context(), sessionFS, fstarget)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ forceReload = need
+ }
+ if forceReload {
+ err := collectionDir.Sync()
+ if err != nil {
+ if he := errorWithHTTPStatus(nil); errors.As(err, &he) {
+ http.Error(w, err.Error(), he.HTTPStatus())
+ } else {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ return
+ }
+ }
+
+ accept := r.Header.Get("Accept")
+ if acceptq := r.FormValue("accept"); acceptq != "" && attachment {
+ // For the convenience of web frontend code, we accept
+ // "?accept=X" in the query as an override of the
+ // "Accept: X" header.
+ accept = acceptq
+ }
+ if acceptlist := strings.Split(accept, ","); len(acceptlist) == 1 {
+ mediatype, _, err := mime.ParseMediaType(acceptlist[0])
+ if err == nil && mediatype == "application/zip" {
+ releaseSession()
+ h.serveZip(w, r, session, sessionFS, fstarget, tokenUser)
+ return
+ }
+ }
+
if r.Method == http.MethodGet || r.Method == http.MethodHead {
- targetfnm := fsprefix + strings.Join(pathParts[stripParts:], "/")
- if fi, err := sessionFS.Stat(targetfnm); err == nil && fi.IsDir() {
+ if fi, err := sessionFS.Stat(fstarget); err == nil && fi.IsDir() {
+ releaseSession() // because we won't be writing anything
if !strings.HasSuffix(r.URL.Path, "/") {
h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
} else {
- h.serveDirectory(w, r, fi.Name(), sessionFS, targetfnm, !useSiteFS)
+ h.serveDirectory(w, r, fi.Name(), sessionFS, fstarget, !useSiteFS)
}
return
}
@@ -543,38 +663,142 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
http.Error(w, "Not permitted", http.StatusForbidden)
return
}
- h.logUploadOrDownload(r, session.arvadosclient, sessionFS, fsprefix+strings.Join(targetPath, "/"), nil, tokenUser)
+ h.logUploadOrDownload(r, session.arvadosclient, sessionFS, fstarget, 1, nil, tokenUser)
- writing := writeMethod[r.Method]
- locker := h.collectionLock(collectionID, writing)
- defer locker.Unlock()
+ if webdavPrefix == "" && stripParts > 0 {
+ webdavPrefix = "/" + strings.Join(pathParts[:stripParts], "/")
+ }
+ writing := writeMethod[r.Method]
if writing {
- // Save the collection only if/when all
- // webdav->filesystem operations succeed --
- // and send a 500 error if the modified
- // collection can't be saved.
+ // We implement write operations by writing to a
+ // temporary collection, then applying the change to
+ // the real collection using the replace_files option
+ // in a collection update request. This lets us do
+ // the slow part (i.e., receive the file data from the
+ // client and write it to Keep) without worrying about
+ // side effects of other read/write operations.
//
- // Perform the write in a separate sitefs, so
- // concurrent read operations on the same
- // collection see the previous saved
- // state. After the write succeeds and the
- // collection record is updated, we reset the
- // session so the updates are visible in
- // subsequent read requests.
+ // Collection update requests for a given collection
+ // are serialized by the controller, so we don't need
+ // to do any locking for that part either.
+
+ // collprefix is the subdirectory in the target
+ // collection which (according to X-Webdav-Source) we
+ // should pretend is "/" for this request.
+ collprefix := strings.TrimPrefix(fsprefix, "by_id/"+collectionID+"/")
+ if len(collprefix) == len(fsprefix) {
+ http.Error(w, "internal error: writing to anything other than /by_id/{collectionID}", http.StatusInternalServerError)
+ return
+ }
+
+ // Create a temporary collection filesystem for webdav
+ // to operate on.
+ var tmpcoll arvados.Collection
client := session.client.WithRequestID(r.Header.Get("X-Request-Id"))
- sessionFS = client.SiteFileSystem(session.keepclient)
- writingDir, err := sessionFS.OpenFile(fsprefix, os.O_RDONLY, 0)
+ tmpfs, err := tmpcoll.FileSystem(client, session.keepclient)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- defer writingDir.Close()
+ snap, err := arvados.Snapshot(sessionFS, "by_id/"+collectionID+"/")
+ if err != nil {
+ http.Error(w, "snapshot: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ err = arvados.Splice(tmpfs, "/", snap)
+ if err != nil {
+ http.Error(w, "splice: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ targetFS = tmpfs
+ fsprefix = collprefix
+ replace := make(map[string]string)
+
+ switch r.Method {
+ case "COPY", "MOVE":
+ dsttarget, err := copyMoveDestination(r, webdavPrefix)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ srcspec := "current/" + colltarget
+ // RFC 4918 9.8.3: A COPY of "Depth: 0" only
+ // instructs that the collection and its
+ // properties, but not resources identified by
+ // its internal member URLs, are to be copied.
+ //
+ // ...meaning we will be creating an empty
+ // directory.
+ //
+ // RFC 4918 9.9.2: A client MUST NOT submit a
+ // Depth header on a MOVE on a collection with
+ // any value but "infinity".
+ //
+ // ...meaning we only need to consider this
+ // case for COPY, not for MOVE.
+ if fi, err := tmpfs.Stat(colltarget); err == nil && fi.IsDir() && r.Method == "COPY" && r.Header.Get("Depth") == "0" {
+ srcspec = "manifest_text/"
+ }
+
+ replace[strings.TrimSuffix(dsttarget, "/")] = srcspec
+ if r.Method == "MOVE" {
+ replace["/"+colltarget] = ""
+ }
+ case "MKCOL":
+ replace["/"+colltarget] = "manifest_text/"
+ case "DELETE":
+ if depth := r.Header.Get("Depth"); depth != "" && depth != "infinity" {
+ http.Error(w, "invalid depth header, see RFC 4918 9.6.1", http.StatusBadRequest)
+ return
+ }
+ replace["/"+colltarget] = ""
+ case "PUT":
+ // changes will be applied by updateOnSuccess
+ // update func below
+ case "LOCK", "UNLOCK", "PROPPATCH":
+ // no changes
+ default:
+ http.Error(w, "method missing", http.StatusInternalServerError)
+ return
+ }
+
+ // Save the collection only if/when all
+ // webdav->filesystem operations succeed using our
+ // temporary collection -- and send a 500 error if the
+ // updates can't be saved.
+ logger := ctxlog.FromContext(r.Context())
w = &updateOnSuccess{
ResponseWriter: w,
- logger: ctxlog.FromContext(r.Context()),
+ logger: logger,
update: func() error {
- err := writingDir.Sync()
+ var manifest string
+ var snap *arvados.Subtree
+ var err error
+ if r.Method == "PUT" {
+ snap, err = arvados.Snapshot(tmpfs, colltarget)
+ if err != nil {
+ return fmt.Errorf("snapshot tmpfs: %w", err)
+ }
+ tmpfs, err = (&arvados.Collection{}).FileSystem(client, session.keepclient)
+ err = arvados.Splice(tmpfs, "file", snap)
+ if err != nil {
+ return fmt.Errorf("splice tmpfs: %w", err)
+ }
+ manifest, err = tmpfs.MarshalManifest(".")
+ if err != nil {
+ return fmt.Errorf("marshal tmpfs: %w", err)
+ }
+ replace["/"+colltarget] = "manifest_text/file"
+ } else if len(replace) == 0 {
+ return nil
+ }
+ var updated arvados.Collection
+ err = client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+collectionID, nil, map[string]interface{}{
+ "replace_files": replace,
+ "collection": map[string]interface{}{"manifest_text": manifest}})
var te arvados.TransactionError
if errors.As(err, &te) {
err = te
@@ -582,26 +806,29 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
if err != nil {
return err
}
- // Sync the changes to the persistent
- // sessionfs for this token.
- snap, err := writingDir.Snapshot()
- if err != nil {
- return err
+ if r.Method == "PUT" {
+ h.repack(r.Context(), session, logger, &updated)
}
- collectionDir.Splice(snap)
return nil
}}
+ } else {
+ // When writing, we need to block session renewal
+ // until we're finished, in order to guarantee the
+ // effect of the write is visible in future responses.
+ // But if we're not writing, we can release the lock
+ // early. This enables us to keep renewing sessions
+ // and processing more requests even if a slow client
+ // takes a long time to download a large file.
+ releaseSession()
+ targetFS = sessionFS
}
if r.Method == http.MethodGet {
applyContentDispositionHdr(w, r, basename, attachment)
}
- if webdavPrefix == "" {
- webdavPrefix = "/" + strings.Join(pathParts[:stripParts], "/")
- }
wh := &webdav.Handler{
Prefix: webdavPrefix,
FileSystem: &webdavfs.FS{
- FileSystem: sessionFS,
+ FileSystem: targetFS,
Prefix: fsprefix,
Writing: writeMethod[r.Method],
AlwaysReadEOF: r.Method == "PROPFIND",
@@ -616,11 +843,10 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
h.metrics.track(wh, w, r)
if r.Method == http.MethodGet && w.WroteStatus() == http.StatusOK {
wrote := int64(w.WroteBodyBytes())
- fnm := strings.Join(pathParts[stripParts:], "/")
- fi, err := wh.FileSystem.Stat(r.Context(), fnm)
+ fi, err := wh.FileSystem.Stat(r.Context(), colltarget)
if err == nil && fi.Size() != wrote {
var n int
- f, err := wh.FileSystem.OpenFile(r.Context(), fnm, os.O_RDONLY, 0)
+ f, err := wh.FileSystem.OpenFile(r.Context(), colltarget, os.O_RDONLY, 0)
if err == nil {
n, err = f.Read(make([]byte, 1024))
f.Close()
@@ -630,6 +856,37 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
}
}
+// Repack the given collection after uploading a file.
+func (h *handler) repack(ctx context.Context, session *cachedSession, logger logrus.FieldLogger, updated *arvados.Collection) {
+ if _, busy := h.repacking.LoadOrStore(updated.UUID, true); busy {
+ // Another goroutine is already repacking the same
+ // collection.
+ return
+ }
+ defer h.repacking.Delete(updated.UUID)
+
+ // Repacking is best-effort, so we disable retries, and don't
+ // fail on errors.
+ client := *session.client
+ client.Timeout = 0
+ repackfs, err := updated.FileSystem(&client, session.keepclient)
+ if err != nil {
+ logger.Warnf("setting up repackfs: %s", err)
+ return
+ }
+ repacked, err := repackfs.Repack(ctx, arvados.RepackOptions{CachedOnly: true})
+ if err != nil {
+ logger.Warnf("repack: %s", err)
+ return
+ }
+ if repacked > 0 {
+ err := repackfs.Sync()
+ if err != nil {
+ logger.Infof("sync repack: %s", err)
+ }
+ }
+}
+
var dirListingTemplate = `
@@ -647,6 +904,9 @@ var dirListingTemplate = `
.footer p {
font-size: 82%;
}
+ hr {
+ border: 1px solid #808080;
+ }
ul {
padding: 0;
}
@@ -662,9 +922,9 @@ var dirListingTemplate = `
This collection of data files is being shared with you through
Arvados. You can download individual files listed below. To download
-the entire directory tree with wget, try:
+the entire directory tree with wget
, try: