Merge branch 'wtsi/13809-root_url-protocol-port-configuration'
authorPeter Amstutz <pamstutz@veritasgenetics.com>
Wed, 5 Sep 2018 18:56:46 +0000 (14:56 -0400)
committerPeter Amstutz <pamstutz@veritasgenetics.com>
Wed, 5 Sep 2018 18:56:46 +0000 (14:56 -0400)
refs #13809

Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz@veritasgenetics.com>

268 files changed:
README.md
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/models/session_db.js
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/views/application/_show_text_with_locators.html.erb
apps/workbench/test/controllers/container_requests_controller_test.rb
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/integration/work_units_test.rb
build/build.list
build/package-build-dockerfiles/Makefile
build/package-build-dockerfiles/centos7/Dockerfile
build/package-build-dockerfiles/debian8/Dockerfile
build/package-build-dockerfiles/debian9/Dockerfile
build/package-build-dockerfiles/ubuntu1404/Dockerfile
build/package-build-dockerfiles/ubuntu1604/Dockerfile
build/package-build-dockerfiles/ubuntu1804/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/centos7/Dockerfile
build/package-test-dockerfiles/debian8/Dockerfile
build/package-test-dockerfiles/ubuntu1404/Dockerfile
build/package-test-dockerfiles/ubuntu1604/Dockerfile
build/package-test-dockerfiles/ubuntu1804/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados [new file with mode: 0644]
build/package-testing/deb-common-test-packages.sh
build/package-testing/test-packages-ubuntu1804.sh [new symlink]
build/run-build-packages-sso.sh
build/run-build-packages.sh
build/run-library.sh
build/run-tests.sh
doc/_config.yml
doc/_includes/_install_compute_docker.liquid
doc/_includes/_skip_sso_server_install.liquid [deleted file]
doc/admin/health-checks.html.textile.liquid [new file with mode: 0644]
doc/admin/management-token.html.textile.liquid [new file with mode: 0644]
doc/admin/metrics.html.textile.liquid [new file with mode: 0644]
doc/admin/upgrading.html.textile.liquid
doc/api/execution.html.textile.liquid
doc/api/methods/groups.html.textile.liquid
doc/css/code.css
doc/css/images.css
doc/install/cheat_sheet.html.textile.liquid
doc/install/copy_pipeline_from_curoverse.html.textile.liquid
doc/install/create-standard-objects.html.textile.liquid [deleted file]
doc/install/crunch2-slurm/install-slurm.html.textile.liquid
doc/install/index.html.textile.liquid
doc/install/install-api-server.html.textile.liquid
doc/install/install-arv-git-httpd.html.textile.liquid
doc/install/install-components.html.textile.liquid [new file with mode: 0644]
doc/install/install-controller.html.textile.liquid [new file with mode: 0644]
doc/install/install-keep-balance.html.textile.liquid
doc/install/install-keepproxy.html.textile.liquid
doc/install/install-keepstore.html.textile.liquid
doc/install/install-manual-prerequisites.html.textile.liquid
doc/install/install-nodemanager.html.textile.liquid
doc/start/getting_started/firstpipeline.html.textile.liquid
doc/user/composer/c1.png [new file with mode: 0644]
doc/user/composer/c10.png [new file with mode: 0644]
doc/user/composer/c11.png [new file with mode: 0644]
doc/user/composer/c12.png [new file with mode: 0644]
doc/user/composer/c13.png [new file with mode: 0644]
doc/user/composer/c14.png [new file with mode: 0644]
doc/user/composer/c15.png [new file with mode: 0644]
doc/user/composer/c16.png [new file with mode: 0644]
doc/user/composer/c17.png [new file with mode: 0644]
doc/user/composer/c18.png [new file with mode: 0644]
doc/user/composer/c19.png [new file with mode: 0644]
doc/user/composer/c2.png [new file with mode: 0644]
doc/user/composer/c20.png [new file with mode: 0644]
doc/user/composer/c21.png [new file with mode: 0644]
doc/user/composer/c22.png [new file with mode: 0644]
doc/user/composer/c23.png [new file with mode: 0644]
doc/user/composer/c24.png [new file with mode: 0644]
doc/user/composer/c2b.png [new file with mode: 0644]
doc/user/composer/c2c.png [new file with mode: 0644]
doc/user/composer/c3.png [new file with mode: 0644]
doc/user/composer/c4.png [new file with mode: 0644]
doc/user/composer/c5.png [new file with mode: 0644]
doc/user/composer/c6.png [new file with mode: 0644]
doc/user/composer/c7.png [new file with mode: 0644]
doc/user/composer/c8.png [new file with mode: 0644]
doc/user/composer/c9.png [new file with mode: 0644]
doc/user/composer/composer.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/cwl-runner.html.textile.liquid
doc/user/cwl/cwl-style.html.textile.liquid
doc/user/topics/arv-copy.html.textile.liquid
doc/user/topics/arv-docker.html.textile.liquid
doc/user/tutorials/running-external-program.html.textile.liquid
doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid
doc/user/tutorials/writing-cwl-workflow.html.textile.liquid
lib/controller/federation.go [new file with mode: 0644]
lib/controller/federation_test.go [new file with mode: 0644]
lib/controller/handler.go
lib/controller/handler_test.go
lib/controller/proxy.go [new file with mode: 0644]
lib/controller/server_test.go [new file with mode: 0644]
lib/crunchstat/crunchstat.go
sdk/R/DESCRIPTION
sdk/R/R/Arvados.R
sdk/R/R/ArvadosFile.R
sdk/R/R/Collection.R
sdk/R/R/CollectionTree.R
sdk/R/R/HttpParser.R
sdk/R/R/HttpRequest.R
sdk/R/R/RESTService.R
sdk/R/R/Subcollection.R
sdk/R/R/autoGenAPI.R
sdk/R/R/zzz.R
sdk/R/README.Rmd
sdk/R/man/Arvados.Rd
sdk/R/man/ArvadosFile.Rd
sdk/R/man/Collection.Rd
sdk/R/man/Subcollection.Rd
sdk/R/man/users.merge.Rd [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeHttpParser.R
sdk/R/tests/testthat/fakes/FakeHttpRequest.R
sdk/R/tests/testthat/fakes/FakeRESTService.R
sdk/R/tests/testthat/test-ArvadosFile.R
sdk/R/tests/testthat/test-Collection.R
sdk/R/tests/testthat/test-CollectionTree.R
sdk/R/tests/testthat/test-HttpParser.R
sdk/R/tests/testthat/test-HttpRequest.R
sdk/R/tests/testthat/test-RESTService.R
sdk/R/tests/testthat/test-Subcollection.R
sdk/R/tests/testthat/test-util.R
sdk/cli/arvados-cli.gemspec
sdk/cli/bin/arv
sdk/cli/bin/arv-run-pipeline-instance
sdk/cli/bin/arv-tag
sdk/cli/bin/crunch-job
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/fsaccess.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/tests/13931-size-job.yml [new file with mode: 0644]
sdk/cwl/tests/13931-size.cwl [new file with mode: 0644]
sdk/cwl/tests/13976-keepref-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/arvados-tests.sh
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_job.py
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/wf/expect_packed.cwl
sdk/dev-jobs.dockerfile
sdk/go/arvados/api_client_authorization.go
sdk/go/arvados/config.go
sdk/go/arvados/postgresql.go [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadostest/fixtures.go
sdk/go/auth/auth.go
sdk/go/auth/salt.go [new file with mode: 0644]
sdk/go/dispatch/dispatch.go
sdk/go/httpserver/metrics.go [new file with mode: 0644]
sdk/python/arvados/api.py
sdk/python/arvados/commands/run.py
sdk/python/setup.py
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
sdk/ruby/arvados.gemspec
sdk/ruby/lib/arvados/google_api_client.rb
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/static_controller.rb
services/api/app/models/arvados_model.rb
services/api/app/models/collection.rb
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/config/application.rb
services/api/config/initializers/oj_mimic_json.rb [new file with mode: 0644]
services/api/config/initializers/time_format.rb
services/api/config/routes.rb
services/api/db/migrate/20180806133039_index_all_filenames.rb [new file with mode: 0644]
services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/crunch_dispatch.rb
services/api/lib/load_param.rb
services/api/lib/safe_json.rb
services/api/test/fixtures/api_client_authorizations.yml
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/containers.yml
services/api/test/fixtures/groups.yml
services/api/test/functional/arvados/v1/container_requests_controller_test.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/integration/cross_origin_test.rb
services/api/test/test_helper.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/container_test.rb
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-local/crunch-dispatch-local_test.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-dispatch-slurm/slurm.go
services/crunch-dispatch-slurm/squeue.go
services/crunch-dispatch-slurm/squeue_test.go
services/crunch-dispatch-slurm/usage.go
services/crunch-run/crunchrun.go
services/crunchstat/crunchstat.go
services/fuse/arvados_fuse/fusedir.py
services/keep-web/cache.go
services/keep-web/cache_test.go
services/keep-web/cadaver_test.go
services/keep-web/doc.go
services/keep-web/handler.go
services/keep-web/handler_test.go
services/keep-web/server.go
services/keep-web/server_test.go
services/keep-web/status_test.go
services/keepproxy/keepproxy_test.go
services/keepstore/config.go
services/keepstore/handlers.go
services/keepstore/s3_volume.go
services/login-sync/arvados-login-sync.gemspec
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
services/nodemanager/arvnodeman/computenode/driver/__init__.py
services/nodemanager/arvnodeman/computenode/driver/azure.py
services/nodemanager/arvnodeman/computenode/driver/dummy.py
services/nodemanager/arvnodeman/computenode/driver/ec2.py
services/nodemanager/arvnodeman/computenode/driver/gce.py
services/nodemanager/arvnodeman/config.py
services/nodemanager/arvnodeman/daemon.py
services/nodemanager/arvnodeman/jobqueue.py
services/nodemanager/arvnodeman/launcher.py
services/nodemanager/doc/azure.example.cfg
services/nodemanager/doc/ec2.example.cfg
services/nodemanager/doc/gce.example.cfg
services/nodemanager/tests/fake_azure.cfg.template
services/nodemanager/tests/integration_test.py
services/nodemanager/tests/test_computenode_dispatch.py
services/nodemanager/tests/test_daemon.py
services/ws/config.go
services/ws/event_source.go
services/ws/event_source_test.go
services/ws/server.go
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/api-setup.sh
tools/arvbox/lib/arvbox/docker/common.sh
tools/arvbox/lib/arvbox/docker/crunch-setup.sh
tools/arvbox/lib/arvbox/docker/keep-setup.sh
tools/arvbox/lib/arvbox/docker/service/api/run-service
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run-service
tools/arvbox/lib/arvbox/docker/service/composer/run-service
tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/controller/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/controller/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service
tools/arvbox/lib/arvbox/docker/service/doc/run [changed from symlink to file mode: 0755]
tools/arvbox/lib/arvbox/docker/service/doc/run-service
tools/arvbox/lib/arvbox/docker/service/gitolite/run-service
tools/arvbox/lib/arvbox/docker/service/keep-web/run-service
tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service
tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/nginx/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/nginx/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/nginx/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/ready/run [changed from symlink to file mode: 0755]
tools/arvbox/lib/arvbox/docker/service/ready/run-service
tools/arvbox/lib/arvbox/docker/service/sso/run-service
tools/arvbox/lib/arvbox/docker/service/vm/run
tools/arvbox/lib/arvbox/docker/service/vm/run-service
tools/arvbox/lib/arvbox/docker/service/websockets/run-service
tools/arvbox/lib/arvbox/docker/service/workbench/run-service
tools/arvbox/lib/arvbox/docker/yml_override.py [moved from tools/arvbox/lib/arvbox/docker/application_yml_override.py with 79% similarity]
vendor/vendor.json

index c480ffda4cca9c5141e954c75bbc41c35fd67b67..12fdd219fc698226033e0283baec5a7ad087e920 100644 (file)
--- a/README.md
+++ b/README.md
@@ -19,9 +19,7 @@ Arvados consists of:
 
 ## Quick start
 
-Curoverse maintains an Arvados public cloud demo at
-[https://cloud.curoverse.com](https://cloud.curoverse.com).  A Google account
-is required to log in.
+Veritas Genetics maintains a public installation of Arvados for evaluation and trial use, the [Arvados Playground](https://playground.arvados.org). A Google account is required to log in.
 
 To try out Arvados on your local workstation, you can use Arvbox, which
 provides Arvados components pre-installed in a Docker container (requires
index 9c4cd678b0cf9aa38a110a32e1223586dceb0afb..06460ad06c1487d1d0c2e08978f36c644de95624 100644 (file)
@@ -186,7 +186,7 @@ GEM
       mini_portile2 (~> 2.3.0)
     npm-rails (0.2.1)
       rails (>= 3.2)
-    oj (3.5.0)
+    oj (3.6.4)
     os (0.9.6)
     passenger (5.2.1)
       rack
@@ -358,4 +358,4 @@ DEPENDENCIES
   wiselinks
 
 BUNDLED WITH
-   1.16.1
+   1.16.2
index 7d1b3b15926816229acbc8d83b0ffa52443055b6..5d42fdf07f668c3f1f25ad26543a4ecc2b26f05c 100644 (file)
@@ -68,7 +68,7 @@ window.SessionDB = function() {
                 url = 'https://' + url;
             }
             url = new URL(url);
-            return db.discoveryDoc({baseURL: url.origin}).map(function() {
+            return m.request(url.origin + '/discovery/v1/apis/arvados/v1/rest').then(function() {
                 return url.origin + '/';
             }).catch(function(err) {
                 // If url is a Workbench site (and isn't too old),
@@ -231,9 +231,13 @@ window.SessionDB = function() {
         // discovery doc from a session's API server.
         discoveryDoc: function(session) {
             var cache = db.discoveryCache[session.baseURL];
-            if (!cache) {
+            if (!cache && session) {
                 db.discoveryCache[session.baseURL] = cache = m.stream();
-                m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest')
+                var baseURL = session.baseURL;
+                if (baseURL[baseURL.length - 1] !== '/') {
+                    baseURL += '/';
+                }
+                m.request(baseURL+'discovery/v1/apis/arvados/v1/rest')
                     .then(function (dd) {
                         // Just in case we're talking with an old API server.
                         dd.remoteHosts = dd.remoteHosts || {};
@@ -293,6 +297,7 @@ window.SessionDB = function() {
         autoLoadRemoteHosts: function() {
             var sessions = db.loadAll();
             var doc = db.discoveryDoc(db.loadLocal());
+            if (doc === undefined) { return; }
             doc.map(function(d) {
                 Object.keys(d.remoteHosts).map(function(uuidPrefix) {
                     if (!(sessions[uuidPrefix])) {
index 106716a0f72f178e826afc6eaaf2908ecb8afe0a..2b48d74b20c09d407edb11d36bdb06d7152bdaa8 100644 (file)
@@ -16,7 +16,7 @@ module ApplicationHelper
   end
 
   def render_markup(markup)
-    raw RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile) if markup
+    sanitize(raw(RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile))) if markup
   end
 
   def human_readable_bytes_html(n)
@@ -43,13 +43,6 @@ module ApplicationHelper
     end
 
     return h(n)
-    #raw = n.to_s
-    #cooked = ''
-    #while raw.length > 3
-    #  cooked = ',' + raw[-3..-1] + cooked
-    #  raw = raw[0..-4]
-    #end
-    #cooked = raw + cooked
   end
 
   def resource_class_for_uuid(attrvalue, opts={})
@@ -680,9 +673,10 @@ module ApplicationHelper
   end
 
   # Keep locators are expected to be of the form \"...<pdh/file_path>\"
-  JSON_KEEP_LOCATOR_REGEXP = /(.*)(([0-9a-f]{32}\+\d+)(.*)\"(.*))/
+  JSON_KEEP_LOCATOR_REGEXP = /([0-9a-f]{32}\+\d+[^'"]*?)(?=['"]|\z|$)/
   def keep_locator_in_json str
-    JSON_KEEP_LOCATOR_REGEXP.match str
+    # Return a list of all matches
+    str.scan(JSON_KEEP_LOCATOR_REGEXP).flatten
   end
 
 private
index 273ae1cebb779aa2a6e813f1488baa96086a603c..b34b4cac8fd4862146e9f0a89b268c5b16ff8f5d 100644 (file)
@@ -6,30 +6,39 @@ SPDX-License-Identifier: AGPL-3.0 %>
 
 <% data_height = data_height || 100 %>
   <div style="max-height:<%=data_height%>px; overflow:auto;">
-    <% text_data.each_line do |l| %>
-      <% text_part = l %>
-      <% match = keep_locator_in_json l %>
+    <% text_data.each_line do |line| %>
+      <% matches = keep_locator_in_json line %>
 
-      <%
-        if match
-          text_part = match[1]
-          rindex = match[2].rindex('"'); match2 = match[2][0..rindex-1]
-          quote_char = '"'
+      <% if matches.nil? or matches.empty? %>
+        <span style="white-space: pre-wrap; margin: none;"><%= line %></span>
+      <% else
+        subs = []
+        matches.uniq.each do |loc|
+          pdh, filename = loc.split('/', 2)
 
-          pdh_readable = object_readable(match2)
-          file_link = ''
-          if pdh_readable and match[4].size > 0
-            link_params = {controller: 'collections', action: 'show_file', uuid: match[3], file: match[4][1..-1]}
-            preview_allowed = preview_allowed_for(match[4])
-            if preview_allowed
-              file_link = link_to(raw(match[4]), link_params.merge(disposition: 'inline'))
-            else
-              file_link = link_to(raw(match[4]), link_params.merge(disposition: 'attachment'))
+          if object_readable(pdh)
+            # Add PDH link
+            replacement = link_to_arvados_object_if_readable(pdh, pdh, friendly_name: true)
+            if filename
+              link_params = {controller: 'collections', action: 'show_file', uuid: pdh, file: filename}
+              if preview_allowed_for(filename)
+                params = {disposition: 'inline'}
+              else
+                params = {disposition: 'attachment'}
+              end
+              file_link = link_to(raw("/"+filename), link_params.merge(params))
+              # Add file link
+              replacement << file_link
             end
+            # Add link(s) substitution
+            subs << [loc, replacement]
           end
         end
-      %>
-
-      <span style="white-space: pre-wrap; margin: none;"><%= text_part %><% if match %><% if pdh_readable then %><%= link_to_arvados_object_if_readable(match[3], match[3], friendly_name: true) %><%= file_link%><% else %><%= match2%><% end %><%=quote_char+match[5]%><br/><% end %></span>
+        # Replace all readable locators with links
+        subs.each do |loc, link|
+          line.gsub!(loc, link)
+        end %>
+        <span style="white-space: pre-wrap; margin: none;"><%= raw line %></span>
+      <% end %>
     <% end %>
   </div>
index 89e1506f4bf4163e5818f9eae0d209551e195d75..6e96839e25f617f6ffca29ca1a44e81065c0cce2 100644 (file)
@@ -134,6 +134,8 @@ class ContainerRequestsControllerTest < ActionController::TestCase
     assert_response :success
 
     assert_match /hello/, @response.body
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/baz\?" # locator on command
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar\?" # locator on command
     assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foo" # mount input1
     assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/bar" # mount input2
     assert_includes @response.body, "href=\"\/collections/1fd08fc162a5c6413070a8bd0bffc818+150" # mount workflow
index ada0e33e70ab5f41221389f39cce1e9e2fdf32b3..3522745fe4cc0bca3da001e12c805fe516640482 100644 (file)
@@ -335,10 +335,20 @@ class ProjectsControllerTest < ActionController::TestCase
     project = api_fixture('groups')['aproject']
     use_token :active
     found = Group.find(project['uuid'])
-    found.description = 'Textile description with link to home page <a href="/">take me home</a>.'
+    found.description = '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
     found.save!
     get(:show, {id: project['uuid']}, session_for(:active))
-    assert_includes @response.body, 'Textile description with link to home page <a href="/">take me home</a>.'
+    assert_includes @response.body, '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
+  end
+
+  test "find a project and edit description to unsafe html description" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+    found.description = 'Textile description with unsafe script tag <script language="javascript">alert("Hello there")</script>.'
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, 'Textile description with unsafe script tag alert("Hello there").'
   end
 
   test "find a project and edit description to textile description with link to object" do
index 5f6ef9988bab245ad054e78f13da5b6d65462850..e5cc6e4dc050d47bfa2d6220a4ec02528ef7aa05 100644 (file)
@@ -93,7 +93,7 @@ class WorkUnitsTest < ActionDispatch::IntegrationTest
 
       assert_text 'created_at'
       if cancelable
-        assert_text 'priority: 1' if type.include?('container')
+        assert_text 'priority: 501' if type.include?('container')
         if type.include?('pipeline')
           assert_selector 'a', text: 'Pause'
           first('a,link', text: 'Pause').click
index ef6407031c41f286d47ad08e573f3020a48bf9e9..4c3d740b0b82c21a71e274bfc0db1311bdcd3e43 100644 (file)
@@ -4,35 +4,35 @@
 
 #distribution(s)|name|version|iteration|type|architecture|extra fpm arguments
 debian8,debian9,centos7|python-gflags|2.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|google-api-python-client|1.6.2|2|python|all
 debian8,debian9,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
 debian8,debian9,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
 debian8,debian9,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|rsa|3.4.2|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|uritemplate|3.0.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
-debian8,debian9,centos7|ws4py|0.3.5|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|rsa|3.4.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|uritemplate|3.0.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|httplib2|0.9.2|3|python|all
+debian8,debian9,centos7,ubuntu1404,ubuntu1604|ws4py|0.4.2|2|python|all
 debian8,debian9,centos7|pykka|1.2.1|2|python|all
 debian8,debian9,ubuntu1404,centos7|six|1.10.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.6|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|ciso8601|1.0.6|3|python|amd64
 debian8,debian9,centos7|pycrypto|2.6.1|3|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|llfuse|1.2|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804|backports.ssl_match_hostname|3.5.0.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|llfuse|1.2|3|python|amd64
 debian8,debian9,ubuntu1404,centos7|pycurl|7.19.5.3|3|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|pyyaml|3.12|2|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|rdflib|4.2.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|pyyaml|3.12|2|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|rdflib|4.2.2|2|python|all
 debian8,debian9,ubuntu1404,centos7|shellescape|3.4.1|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|mistune|0.7.3|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|typing|3.5.3.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|avro|1.8.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|mistune|0.7.3|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|typing|3.6.4|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|avro|1.8.1|2|python|all
 debian8,debian9,ubuntu1404,centos7|ruamel.ordereddict|0.4.9|2|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|cachecontrol|0.11.7|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|pathlib2|2.3.2|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|scandir|1.7|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|docker-py|1.7.2|2|python3|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|cachecontrol|0.11.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|pathlib2|2.3.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|scandir|1.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|docker-py|1.7.2|2|python3|all
 debian8,debian9,centos7|six|1.10.0|2|python3|all
 debian8,debian9,ubuntu1404,centos7|requests|2.12.4|2|python3|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|websocket-client|0.37.0|2|python3|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|websocket-client|0.37.0|2|python3|all
 ubuntu1404|requests|2.4.3|2|python|all
 centos7|contextlib2|0.5.4|2|python|all
 centos7|isodate|0.5.4|2|python|all
@@ -40,8 +40,10 @@ centos7|python-daemon|2.1.2|1|python|all
 centos7|pbr|0.11.1|2|python|all
 centos7|pyparsing|2.1.10|2|python|all
 centos7|keepalive|0.5|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|subprocess32|3.5.1|2|python|all
+centos7|networkx|1.11|0|python|all
+centos7|psutil|5.0.1|0|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|lockfile|0.12.2|2|python|all|--epoch 1
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|subprocess32|3.5.1|2|python|all
 all|ruamel.yaml|0.14.12|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
 all|cwltest|1.0.20180518074130|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32 >= 3.5.0'
 all|junit-xml|1.8|3|python|all
@@ -50,3 +52,6 @@ all|futures|3.0.5|2|python|all
 all|future|0.16.0|2|python|all
 all|future|0.16.0|2|python3|all
 all|mypy-extensions|0.3.0|1|python|all
+all|prov|1.5.1|0|python|all
+all|bagit|1.6.4|0|python|all
+all|typing-extensions|3.6.5|0|python|all
index ab1ade14deababdcc76abd11d02a99968ac0dac1..9dc536bf1eb919a7d334d3d18061db3dfd8c90d7 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-all: centos7/generated debian8/generated debian9/generated ubuntu1204/generated ubuntu1404/generated ubuntu1604/generated
+all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
 
 centos7/generated: common-generated-all
        test -d centos7/generated || mkdir centos7/generated
@@ -16,10 +16,6 @@ debian9/generated: common-generated-all
        test -d debian9/generated || mkdir debian9/generated
        cp -rlt debian9/generated common-generated/*
 
-ubuntu1204/generated: common-generated-all
-       test -d ubuntu1204/generated || mkdir ubuntu1204/generated
-       cp -rlt ubuntu1204/generated common-generated/*
-
 ubuntu1404/generated: common-generated-all
        test -d ubuntu1404/generated || mkdir ubuntu1404/generated
        cp -rlt ubuntu1404/generated common-generated/*
@@ -28,6 +24,10 @@ ubuntu1604/generated: common-generated-all
        test -d ubuntu1604/generated || mkdir ubuntu1604/generated
        cp -rlt ubuntu1604/generated common-generated/*
 
+ubuntu1804/generated: common-generated-all
+       test -d ubuntu1804/generated || mkdir ubuntu1804/generated
+       cp -rlt ubuntu1804/generated common-generated/*
+
 GOTARBALL=go1.10.1.linux-amd64.tar.gz
 NODETARBALL=node-v6.11.2-linux-x64.tar.xz
 
index 3a8b03f190b420a69b673780a46d434c7dad8da1..216c5cbbad079bfc4ef7a16d5734579b8578995b 100644 (file)
@@ -9,12 +9,12 @@ MAINTAINER Ward Vandewege <ward@curoverse.com>
 RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git
 
 # Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler && \
-    /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
 
 # Install golang binary
 ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
index 54267d708e2cc2ce34c603bf5048cf816c31de86..fb1209027bdf75438790d8736b6ce710ef656b67 100644 (file)
@@ -11,12 +11,12 @@ ENV DEBIAN_FRONTEND noninteractive
 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip
 
 # Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler && \
-    /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
 
 # Install golang binary
 ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
index 9ade5fa27232f6613fd07199a6c8a3d9f54565ca..e2da5ad2119f09f91d558ca327baad7dda5e3cf7 100644 (file)
@@ -18,7 +18,7 @@ RUN gpg --import /tmp/D39DC0E3.asc && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler && \
-    /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
 
 # Install golang binary
 ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
index 4ff47ff315bee92d127814348f621a54f64e789a..b7d4081ffb98e8aa2be1d241d30938d7eaf21820 100644 (file)
@@ -11,12 +11,12 @@ ENV DEBIAN_FRONTEND noninteractive
 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip 
 
 # Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler && \
-    /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
 
 # Install golang binary
 ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
index 7e5701f871cb987dc581fe843b1b2f3c4a2d3b7c..6220652e56e15faa8c917c8838b024e12bbe740c 100644 (file)
@@ -11,12 +11,12 @@ ENV DEBIAN_FRONTEND noninteractive
 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata
 
 # Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler && \
-    /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
 
 # Install golang binary
 ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
diff --git a/build/package-build-dockerfiles/ubuntu1804/Dockerfile b/build/package-build-dockerfiles/ubuntu1804/Dockerfile
new file mode 100644 (file)
index 0000000..68df124
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata
+
+# Install RVM
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+# Old versions of setuptools cannot build a schema-salad package.
+RUN pip install --upgrade setuptools
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1804"]
index fd2f9e3d8cdbafced2990875f56ce72a2719672f..fa959a1eb8cc2e7aa39d89338ae203c5fe599933 100644 (file)
@@ -9,7 +9,7 @@ RUN yum -q -y install scl-utils centos-release-scl which tar
 
 # Install RVM
 RUN touch /var/lib/rpm/* && \
-    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
index dcf581a1e036876cb85676e305b78eb399381448..c40ed820790a8672e63b9f01e19ba4ea86c3313f 100644 (file)
@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND noninteractive
 # Install RVM
 RUN apt-get update && \
     apt-get -y install --no-install-recommends curl ca-certificates && \
-    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3
index a1bc48443ea4eb48e4939d04deb8952c67220882..6d929e84948e3eb3d91f974b5b292b18b22fea62 100644 (file)
@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND noninteractive
 # Install dependencies and RVM
 RUN apt-get update && \
     apt-get -y install --no-install-recommends curl ca-certificates python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip binutils build-essential ca-certificates  && \
-    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3
index 25d49dc5019112cc7d9cc6b251904780471faa79..54b1f401cf00cd2a3b6248676de51aee99915b29 100644 (file)
@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND noninteractive
 # Install RVM
 RUN apt-get update && \
     apt-get -y install --no-install-recommends curl ca-certificates && \
-    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.3
diff --git a/build/package-test-dockerfiles/ubuntu1804/Dockerfile b/build/package-test-dockerfiles/ubuntu1804/Dockerfile
new file mode 100644 (file)
index 0000000..506abac
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install RVM
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates gnupg2 && \
+    gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu1804/ /" >>/etc/apt/sources.list
+
+# Add preferences file for the Arvados packages. This pins Arvados
+# packages at priority 501, so that older python dependency versions
+# are preferred in those cases where we need them
+ADD etc-apt-preferences.d-arvados /etc/apt/preferences.d/arvados
diff --git a/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados b/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados
new file mode 100644 (file)
index 0000000..9e24695
--- /dev/null
@@ -0,0 +1,3 @@
+Package: *
+Pin: release o=Arvados
+Pin-Priority: 501
index b4ea35c574b20a776960aeefad4d4e4d324a347e..b5325224ee170bbc6170babfa8c2b95665d2057a 100755 (executable)
@@ -27,7 +27,13 @@ cd /tmp/opts
 
 export ARV_PACKAGES_DIR="/arvados/packages/$target"
 
-dpkg-deb -x $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) .
+if [[ -f $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb 2>/dev/null | head -n1) ]] ; then
+    debpkg=$(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1)
+else
+    debpkg=$(ls -t "$ARV_PACKAGES_DIR/processed/$1"_*.deb | head -n1)
+fi
+
+dpkg-deb -x $debpkg .
 
 while read so && [ -n "$so" ]; do
     echo
diff --git a/build/package-testing/test-packages-ubuntu1804.sh b/build/package-testing/test-packages-ubuntu1804.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
index bd3975841bf40cbb674dce29b43960ac1a08a524..d6a21178f8cfe3d224a08f955ca35eeb996d0dff 100755 (executable)
@@ -71,22 +71,13 @@ if [[ "$DEBUG" != 0 ]]; then
 fi
 
 case "$TARGET" in
-    debian8)
+    debian*)
         FORMAT=deb
         ;;
-    debian9)
+    ubuntu*)
         FORMAT=deb
         ;;
-    ubuntu1204)
-        FORMAT=deb
-        ;;
-    ubuntu1404)
-        FORMAT=deb
-        ;;
-    ubuntu1604)
-        FORMAT=deb
-        ;;
-    centos7)
+    centos*)
         FORMAT=rpm
         ;;
     *)
index caebac013d4db721af25655a3551371c35275782..9ffa20fe6492443feb5490ec4ce06309374b4d06 100755 (executable)
@@ -104,19 +104,13 @@ PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/dist-packages
 ## End Debian Python defaults.
 
 case "$TARGET" in
-    debian8)
+    debian*)
         FORMAT=deb
         ;;
-    debian9)
+    ubuntu*)
         FORMAT=deb
         ;;
-    ubuntu1404)
-        FORMAT=deb
-        ;;
-    ubuntu1604)
-        FORMAT=deb
-        ;;
-    centos7)
+    centos*)
         FORMAT=rpm
         PYTHON2_PACKAGE=$(rpm -qf "$(which python$PYTHON2_VERSION)" --queryformat '%{NAME}\n')
         PYTHON2_PKG_PREFIX=$PYTHON2_PACKAGE
@@ -153,8 +147,13 @@ if [[ "$?" != 0 ]]; then
   exit 1
 fi
 
-EASY_INSTALL2=$(find_easy_install -$PYTHON2_VERSION "")
-EASY_INSTALL3=$(find_easy_install -$PYTHON3_VERSION 3)
+PYTHON2_FPM_INSTALLER=(--python-easyinstall "$(find_python_program easy_install-$PYTHON2_VERSION easy_install)")
+install3=$(find_python_program easy_install-$PYTHON3_VERSION easy_install3 pip-$PYTHON3_VERSION pip3)
+if [[ $install3 =~ easy_ ]]; then
+    PYTHON3_FPM_INSTALLER=(--python-easyinstall "$install3")
+else
+    PYTHON3_FPM_INSTALLER=(--python-pip "$install3")
+fi
 
 RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
 RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
index 4b18d037b6b30655714ed174fad84b665ebe7f9f..8ba14949d3c0847acaaa8c2fe3671a513c7668de 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -xe
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
@@ -20,17 +20,19 @@ debug_echo () {
     echo "$@" >"$STDOUT_IF_DEBUG"
 }
 
-find_easy_install() {
-    for version_suffix in "$@"; do
-        if "easy_install$version_suffix" --version >/dev/null 2>&1; then
-            echo "easy_install$version_suffix"
+find_python_program() {
+    prog="$1"
+    shift
+    for prog in "$@"; do
+        if "$prog" --version >/dev/null 2>&1; then
+            echo "$prog"
             return 0
         fi
     done
     cat >&2 <<EOF
 $helpmessage
 
-Error: easy_install$1 (from Python setuptools module) not found
+Error: $prog (from Python setuptools module) not found
 
 EOF
     exit 1
@@ -60,7 +62,7 @@ version_from_git() {
     declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
     ARVADOS_BUILDING_VERSION="$(git describe --abbrev=0).$(date -ud "@$git_ts" +%Y%m%d%H%M%S)"
     echo "$ARVADOS_BUILDING_VERSION"
-} 
+}
 
 nohash_version_from_git() {
     if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
@@ -264,7 +266,7 @@ test_package_presence() {
     # Get the list of packages from the repos
 
     if [[ "$FORMAT" == "deb" ]]; then
-      debian_distros="jessie precise stretch trusty wheezy xenial"
+      debian_distros="jessie precise stretch trusty wheezy xenial bionic"
 
       for D in ${debian_distros}; do
         if [ ${pkgname:0:3} = "lib" ]; then
@@ -273,12 +275,15 @@ test_package_presence() {
           repo_subdir=${pkgname:0:1}
         fi
 
-        repo_pkg_list=$(curl -o - http://apt.arvados.org/pool/${D}/main/${repo_subdir}/)
+        repo_pkg_list=$(curl -s -o - http://apt.arvados.org/pool/${D}/main/${repo_subdir}/)
         echo ${repo_pkg_list} |grep -q ${complete_pkgname}
-        if [ $? -eq 0 ]; then
+        if [ $? -eq 0 ] ; then
           echo "Package $complete_pkgname exists, not rebuilding!"
           curl -o ./${complete_pkgname} http://apt.arvados.org/pool/${D}/main/${repo_subdir}/${complete_pkgname}
           return 1
+       elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
+          echo "Package $complete_pkgname exists, not rebuilding!"
+          return 1
         else
           echo "Package $complete_pkgname not found, building"
           return 0
@@ -310,6 +315,7 @@ handle_rails_package() {
     cd "$srcdir"
     local license_path="$1"; shift
     local version="$(version_from_git)"
+    echo "$version" >package-build.version
     local scripts_dir="$(mktemp --tmpdir -d "$pkgname-XXXXXXXX.scripts")" && \
     (
         set -e
@@ -387,7 +393,7 @@ fpm_build () {
           # Make sure we build with that for consistency.
           python=python2.7
           set -- "$@" --python-bin python2.7 \
-              --python-easyinstall "$EASY_INSTALL2" \
+              "${PYTHON_FPM_INSTALLER[@]}" \
               --python-package-name-prefix "$PYTHON2_PKG_PREFIX" \
               --prefix "$PYTHON2_PREFIX" \
               --python-install-lib "$PYTHON2_INSTALL_LIB" \
@@ -403,7 +409,7 @@ fpm_build () {
           PACKAGE_TYPE=python
           python=python3
           set -- "$@" --python-bin python3 \
-              --python-easyinstall "$EASY_INSTALL3" \
+              "${PYTHON3_FPM_INSTALLER[@]}" \
               --python-package-name-prefix "$PYTHON3_PKG_PREFIX" \
               --prefix "$PYTHON3_PREFIX" \
               --python-install-lib "$PYTHON3_INSTALL_LIB" \
index 636c0306ca94a7948b1e4a63302ae467ca7aea37..4ddbf89c1d7ccb286fcfe887fb941734bffbb519 100755 (executable)
@@ -270,6 +270,7 @@ rotate_logfile() {
 
 declare -a failures
 declare -A skip
+declare -A only
 declare -A testargs
 skip[apps/workbench_profile]=1
 # nodemanager_integration tests are not reliable, see #12061.
@@ -288,7 +289,7 @@ do
             skip[$1]=1; shift
             ;;
         --only)
-            only="$1"; skip[$1]=""; shift
+            only[$1]=1; skip[$1]=""; shift
             ;;
         --short)
             short=1
@@ -331,14 +332,19 @@ done
 # required when testing it. Skip that step if it is not needed.
 NEED_SDK_R=true
 
-if [[ ! -z "${only}" && "${only}" != "sdk/R" ]]; then
+if [[ ${#only[@]} -ne 0 ]] &&
+   [[ -z "${only['sdk/R']}" && -z "${only['doc']}" ]]; then
   NEED_SDK_R=false
 fi
 
-if [[ ! -z "${skip}" && "${skip}" == "sdk/R" ]]; then
+if [[ ${skip["sdk/R"]} == 1 && ${skip["doc"]} == 1 ]]; then
   NEED_SDK_R=false
 fi
 
+if [[ $NEED_SDK_R == false ]]; then
+       echo "R SDK not needed, it will not be installed."
+fi
+
 start_services() {
     echo 'Starting API, keepproxy, keep-web, ws, arv-git-httpd, and nginx ssl proxy...'
     if [[ ! -d "$WORKSPACE/services/api/log" ]]; then
@@ -658,9 +664,9 @@ do_test() {
             ;;
     esac
     if [[ -z "${skip[$suite]}" && -z "${skip[$1]}" && \
-              (-z "${only}" || "${only}" == "${suite}" || \
-                   "${only}" == "${1}") ||
-                  "${only}" == "${2}" ]]; then
+              (${#only[@]} -eq 0 || ${only[$suite]} -eq 1 || \
+                   ${only[$1]} -eq 1) ||
+                  ${only[$2]} -eq 1 ]]; then
         retry do_test_once ${@}
     else
         title "Skipping ${1} tests"
@@ -732,7 +738,7 @@ do_test_once() {
 do_install() {
   skipit=false
 
-  if [[ -z "${only_install}" || "${only_install}" == "${1}" ]]; then
+  if [[ -z "${only_install}" || "${only_install}" == "${1}" || "${only_install}" == "${2}" ]]; then
       retry do_install_once ${@}
   else
       skipit=true
index e5e4719a682140cf22219fb143d3e3e9da56d93c..c2c97a9ff54bf11552d40f14d8bc452a960c441e 100644 (file)
@@ -24,6 +24,7 @@ navbar:
     - Run a workflow using Workbench:
       - user/getting_started/workbench.html.textile.liquid
       - user/tutorials/tutorial-workflow-workbench.html.textile.liquid
+      - user/composer/composer.html.textile.liquid
     - Access an Arvados virtual machine:
       - user/getting_started/vm-login-with-webshell.html.textile.liquid
       - user/getting_started/ssh-access-unix.html.textile.liquid
@@ -147,15 +148,22 @@ navbar:
   admin:
     - Topics:
       - admin/index.html.textile.liquid
+    - Upgrading and migrations:
       - admin/upgrading.html.textile.liquid
+      - install/migrate-docker19.html.textile.liquid
+    - Users and Groups:
       - install/cheat_sheet.html.textile.liquid
-      - user/topics/arvados-sync-groups.html.textile.liquid
-      - admin/storage-classes.html.textile.liquid
       - admin/activation.html.textile.liquid
-      - admin/migrating-providers.html.textile.liquid
       - admin/merge-remote-account.html.textile.liquid
+      - admin/migrating-providers.html.textile.liquid
+      - user/topics/arvados-sync-groups.html.textile.liquid
+    - Monitoring:
+      - admin/health-checks.html.textile.liquid
+      - admin/metrics.html.textile.liquid
+      - admin/management-token.html.textile.liquid
+    - Cloud:
+      - admin/storage-classes.html.textile.liquid
       - admin/spot-instances.html.textile.liquid
-      - install/migrate-docker19.html.textile.liquid
   installguide:
     - Overview:
       - install/index.html.textile.liquid
@@ -165,15 +173,12 @@ navbar:
       - install/arvados-on-kubernetes.html.textile.liquid
     - Manual installation:
       - install/install-manual-prerequisites.html.textile.liquid
+      - install/install-components.html.textile.liquid
+    - Core:
       - install/install-postgresql.html.textile.liquid
-      - install/install-sso.html.textile.liquid
       - install/install-api-server.html.textile.liquid
-      - install/install-ws.html.textile.liquid
-      - install/install-arv-git-httpd.html.textile.liquid
-      - install/install-workbench-app.html.textile.liquid
-      - install/install-composer.html.textile.liquid
-      - install/install-shell-server.html.textile.liquid
-      - install/create-standard-objects.html.textile.liquid
+      - install/install-controller.html.textile.liquid
+    - Keep:
       - install/install-keepstore.html.textile.liquid
       - install/configure-fs-storage.html.textile.liquid
       - install/configure-s3-object-storage.html.textile.liquid
@@ -181,6 +186,14 @@ navbar:
       - install/install-keepproxy.html.textile.liquid
       - install/install-keep-web.html.textile.liquid
       - install/install-keep-balance.html.textile.liquid
+    - User interface:
+      - install/install-sso.html.textile.liquid
+      - install/install-workbench-app.html.textile.liquid
+      - install/install-composer.html.textile.liquid
+    - Additional services:
+      - install/install-ws.html.textile.liquid
+      - install/install-shell-server.html.textile.liquid
+      - install/install-arv-git-httpd.html.textile.liquid
     - Containers API support on SLURM:
       - install/crunch2-slurm/install-prerequisites.html.textile.liquid
       - install/crunch2-slurm/install-slurm.html.textile.liquid
index 18347785cd07d018b66247af7a90807a6630e2ec..eb808e41835bdb3a887758f5e0044cfe03d449c8 100644 (file)
@@ -49,3 +49,31 @@ On Red Hat-based systems, run:
 </notextile>
 
 Finally, reboot the system to make these changes effective.
+
+h2. Create a project for Docker images
+
+Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
+~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
+~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$project_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+h2. Download and tag the latest arvados/jobs docker image
+
+In order to start workflows from workbench, there needs to be Docker image tagged @arvados/jobs:latest@. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.  In this example @$project_uuid@ should be the the UUID of the "Arvados Standard Docker Images" project.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-keepdocker --project-uuid $project_uuid --pull arvados/jobs latest</span>
+</code></pre></notextile>
+
+If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
diff --git a/doc/_includes/_skip_sso_server_install.liquid b/doc/_includes/_skip_sso_server_install.liquid
deleted file mode 100644 (file)
index eafa4cc..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-<div class="alert alert-block alert-info">
-  <button type="button" class="close" data-dismiss="alert">&times;</button>
-  <h4>Note!</h4>
-  <p>The SSO server codebase currently uses OpenID 2.0 to talk to Google's authentication service. Google <a href="https://developers.google.com/accounts/docs/OpenID2">has deprecated that protocol</a>. This means that new clients will not be allowed to talk to Google's authentication services anymore over OpenID 2.0, and they will phase out the use of OpenID 2.0 completely in the coming monts. We are working on upgrading the SSO server codebase to a newer protocol. That work should be complete by the end of November 2014. In the mean time, anyone is free to use the existing Curoverse SSO server for any local Arvados installation. Instructions to do so are provided on the "API server":install-api-server.html page.</p>
-  <p><strong>Recommendation: skip this step</strong></p>
-</div>
diff --git a/doc/admin/health-checks.html.textile.liquid b/doc/admin/health-checks.html.textile.liquid
new file mode 100644 (file)
index 0000000..630c6a1
--- /dev/null
@@ -0,0 +1,70 @@
+---
+layout: default
+navsection: admin
+title: Health checks
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Health check endpoints are found at @/_health/ping@ on many Arvados services.  The purpose of the health check is to offer a simple method of determining if a service can be reached and allow the service to self-report any problems, suitable for integrating into operational alert systems.
+
+To access health check endpoints, services must be configured with a "management token":management-token.html .
+
+Health check endpoints return a JSON object with the field @health@.  This has a value of either @OK@ or @ERROR@.  On error, it may also include a  field @error@ with additional information.  Examples:
+
+<pre>
+{
+  "health": "OK"
+}
+</pre>
+
+<pre>
+{
+  "health": "ERROR"
+  "error": "Inverted polarity in the warp core"
+}
+</pre>
+
+h2. Healthcheck aggregator
+
+The service @arvados-health@ performs health checks on all configured services and returns a single value of @OK@ or @ERROR@ for the entire cluster.  It exposes the endpoint @/_health/all@ .
+
+The healthcheck aggregator uses the @NodeProfile@ section of the cluster-wide @arvados.yml@ configuration file.  Here is an example.
+
+<pre>
+Cluster:
+  # The cluster uuid prefix
+  zzzzz:
+    NodeProfile:
+      # For each node, the profile name corresponds to a
+      # locally-resolvable hostname, and describes which Arvados
+      # services are available on that machine.
+      api:
+        arvados-controller:
+          Listen: 8000
+        arvados-api-server:
+          Listen: 8001
+      manage:
+       arvados-node-manager:
+         Listen: 8002
+      workbench:
+       arvados-workbench:
+         Listen: 8003
+       arvados-ws:
+         Listen: 8004
+      keep:
+       keep-web:
+         Listen: 8005
+       keepproxy:
+         Listen: 8006
+      keep0:
+        keepstore:
+         Listen: 25701
+      keep1:
+        keepstore:
+         Listen: 25701
+</pre>
diff --git a/doc/admin/management-token.html.textile.liquid b/doc/admin/management-token.html.textile.liquid
new file mode 100644 (file)
index 0000000..5380f38
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: admin
+title: Management token
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+To enable and collect health checks and metrics, services must be configured with a "management token".
+
+Services must have ManagementToken configured.  This is used to authorize access monitoring endpoints.  If ManagementToken is not configured, monitoring endpoints will return the error @404 disabled@.
+
+To access a monitoring endpoint, the requester must provide the HTTP header @Authorization: Bearer (ManagementToken)@.
+
+h2. API server
+
+Set @ManagementToken@ in the appropriate section of @application.yml@
+
+<pre>
+production:
+  # Token to be included in all healthcheck requests. Disabled by default.
+  # Server expects request header of the format "Authorization: Bearer xxx"
+  ManagementToken: xxx
+</pre>
+
+h2. Node Manager
+
+Set @port@ (the listen port) and @ManagementToken@ in the @Manage@ section of @node-manager.ini@.
+
+<pre>
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+ManagementToken = xxx
+</pre>
+
+h2. Other services
+
+The following services also support monitoring.  Set @ManagementToken@ in the respective yaml config file for each service.
+
+* keepstore
+* keep-web
+* keepproxy
+* arv-git-httpd
+* websockets
diff --git a/doc/admin/metrics.html.textile.liquid b/doc/admin/metrics.html.textile.liquid
new file mode 100644 (file)
index 0000000..45b9ece
--- /dev/null
@@ -0,0 +1,189 @@
+---
+layout: default
+navsection: admin
+title: Metrics
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Some Arvados services publish Prometheus/OpenMetrics-compatible metrics at @/metrics@, and some provide additional runtime status at @/status.json@.  Metrics can help you understand how components perform under load, find performance bottlenecks, and detect and diagnose problems.
+
+To access metrics endpoints, services must be configured with a "management token":management-token.html. When accessing a metrics endpoint, prefix the management token with @"Bearer "@ and supply it in the @Authorization@ request header.
+
+<pre>curl -sfH "Authorization: Bearer your_management_token_goes_here" "https://0.0.0.0:25107/status.json"
+</pre>
+
+h2. Keep-web
+
+Keep-web exports metrics at @/metrics@ -- e.g., @https://collections.zzzzz.arvadosapi.com/metrics@.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|request_duration_seconds|summary|elapsed time between receiving a request and sending the last byte of the response body (segmented by HTTP request method and response status code)|
+|time_to_status_seconds|summary|elapsed time between receiving a request and sending the HTTP response status code (segmented by HTTP request method and response status code)|
+
+Metrics in the @arvados_keepweb_collectioncache@ namespace report keep-web's internal cache of Arvados collection metadata.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|arvados_keepweb_collectioncache_requests|counter|cache lookups|
+|arvados_keepweb_collectioncache_api_calls|counter|outgoing API calls|
+|arvados_keepweb_collectioncache_permission_hits|counter|collection-to-permission cache hits|
+|arvados_keepweb_collectioncache_pdh_hits|counter|UUID-to-PDH cache hits|
+|arvados_keepweb_collectioncache_hits|counter|PDH-to-manifest cache hits|
+|arvados_keepweb_collectioncache_cached_manifests|gauge|number of collections in the cache|
+|arvados_keepweb_collectioncache_cached_manifest_bytes|gauge|memory consumed by cached collection manifests|
+
+h2. Keepstore
+
+Keepstore exports metrics at @/status.json@ -- e.g., @http://keep0.zzzzz.arvadosapi.com:25107/status.json@.
+
+h3. Root
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Volumes|         array of "volumeStatusEnt":#volumeStatusEnt ||
+|BufferPool|      "PoolStatus":#PoolStatus ||
+|PullQueue|       "WorkQueueStatus":#WorkQueueStatus ||
+|TrashQueue|      "WorkQueueStatus":#WorkQueueStatus ||
+|RequestsCurrent| int ||
+|RequestsMax|     int ||
+|Version|         string ||
+
+h3(#volumeStatusEnt). volumeStatusEnt
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Label|         string||
+|Status|        "VolumeStatus":#VolumeStatus ||
+|VolumeStats|   "ioStats":#ioStats ||
+
+h3(#VolumeStatus). VolumeStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|MountPoint| string||
+|DeviceNum|  uint64||
+|BytesFree|  uint64||
+|BytesUsed|  uint64||
+
+h3(#ioStats). ioStats
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Errors|     uint64||
+|Ops|        uint64||
+|CompareOps| uint64||
+|GetOps|     uint64||
+|PutOps|     uint64||
+|TouchOps|   uint64||
+|InBytes|    uint64||
+|OutBytes|   uint64||
+
+h3(#PoolStatus). PoolStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|BytesAllocatedCumulative|      uint64||
+|BuffersMax|   int||
+|BuffersInUse| int||
+
+h3(#WorkQueueStatus). WorkQueueStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|InProgress| int||
+|Queued|     int||
+
+h3. Example response
+
+<pre>
+{
+  "Volumes": [
+    {
+      "Label": "[UnixVolume /var/lib/arvados/keep0]",
+      "Status": {
+        "MountPoint": "/var/lib/arvados/keep0",
+        "DeviceNum": 65029,
+        "BytesFree": 222532972544,
+        "BytesUsed": 435456679936
+      },
+      "InternalStats": {
+        "Errors": 0,
+        "InBytes": 1111,
+        "OutBytes": 0,
+        "OpenOps": 1,
+        "StatOps": 4,
+        "FlockOps": 0,
+        "UtimesOps": 0,
+        "CreateOps": 0,
+        "RenameOps": 0,
+        "UnlinkOps": 0,
+        "ReaddirOps": 0
+      }
+    }
+  ],
+  "BufferPool": {
+    "BytesAllocatedCumulative": 67108864,
+    "BuffersMax": 20,
+    "BuffersInUse": 0
+  },
+  "PullQueue": {
+    "InProgress": 0,
+    "Queued": 0
+  },
+  "TrashQueue": {
+    "InProgress": 0,
+    "Queued": 0
+  },
+  "RequestsCurrent": 1,
+  "RequestsMax": 40,
+  "Version": "dev"
+}
+</pre>
+
+h2. Node manager
+
+The node manager status end point provides a snapshot of internal status at the time of the most recent wishlist update.
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|nodes_booting|int|Number of nodes in booting state|
+|nodes_unpaired|int|Number of nodes in unpaired state|
+|nodes_busy|int|Number of nodes in busy state|
+|nodes_idle|int|Number of nodes in idle state|
+|nodes_fail|int|Number of nodes in fail state|
+|nodes_down|int|Number of nodes in down state|
+|nodes_shutdown|int|Number of nodes in shutdown state|
+|nodes_wish|int|Number of nodes in the current wishlist|
+|node_quota|int|Current node count ceiling due to cloud quota limits|
+|config_max_nodes|int|Configured max node count|
+
+h3. Example
+
+<pre>
+{
+  "actor_exceptions": 0,
+  "idle_times": {
+    "compute1": 0,
+    "compute3": 0,
+    "compute2": 0,
+    "compute4": 0
+  },
+  "create_node_errors": 0,
+  "destroy_node_errors": 0,
+  "nodes_idle": 0,
+  "config_max_nodes": 8,
+  "list_nodes_errors": 0,
+  "node_quota": 8,
+  "Version": "1.1.4.20180719160944",
+  "nodes_wish": 0,
+  "nodes_unpaired": 0,
+  "nodes_busy": 4,
+  "boot_failures": 0
+}
+</pre>
index 7a330a9638a094caee14e4b159bc1caa70598572..55f39f7d848356714b3190a6c3addc07168167dc 100644 (file)
@@ -30,6 +30,10 @@ Note to developers: Add new items at the top. Include the date, issue number, co
 TODO: extract this information based on git commit messages and generate changelogs / release notes automatically.
 {% endcomment %}
 
+h3. 2018-07-31: "#13497":https://dev.arvados.org/issues/13497 "db5107dca":https://dev.arvados.org/projects/arvados/repository/revisions/db5107dca adds a new system service, arvados-controller
+* "Install the controller":../install/install-controller.html after upgrading your system.
+* Verify your setup by confirming that API calls appear in the controller's logs (_e.g._, @journalctl -fu arvados-controller@) while loading a workbench page.
+
 h3. 2018-04-05: v1.1.4 regression in arvados-cwl-runner for workflows that rely on implicit discovery of secondaryFiles
 
 h4. Secondary files missing from toplevel workflow inputs
@@ -151,7 +155,7 @@ baseCommand: echo
 
 This bug will be fixed in an upcoming release of Arvados.
 
-h3. 2017-12-08: #11908 commit:8f987a9271 now requires minimum of Postgres 9.4 (previously 9.3)
+h3. 2017-12-08: "#11908":https://dev.arvados.org/issues/11908 "8f987a9271":https://dev.arvados.org/projects/arvados/repository/revisions/8f987a9271 now requires minimum of Postgres 9.4 (previously 9.3)
 * Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
 * Ubuntu 16.04 (pg 9.5) does not require an upgrade
 * Ubuntu 14.04 (pg 9.3) requires upgrade to Postgres 9.4: https://www.postgresql.org/download/linux/ubuntu/
@@ -160,7 +164,7 @@ h3. 2017-12-08: #11908 commit:8f987a9271 now requires minimum of Postgres 9.4 (p
 *# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
 *# Restore from the backup using @psql@
 
-h3. 2017-09-25: #12032 commit:68bdf4cbb now requires minimum of Postgres 9.3 (previously 9.1)
+h3. 2017-09-25: "#12032":https://dev.arvados.org/issues/12032 "68bdf4cbb":https://dev.arvados.org/projects/arvados/repository/revisions/68bdf4cbb now requires minimum of Postgres 9.3 (previously 9.1)
 * Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
 * Ubuntu 16.04 (pg 9.5) does not require an upgrade
 * Ubuntu 14.04 (pg 9.3) is compatible, however upgrading to Postgres 9.4 is recommended: https://www.postgresql.org/download/linux/ubuntu/
@@ -169,21 +173,21 @@ h3. 2017-09-25: #12032 commit:68bdf4cbb now requires minimum of Postgres 9.3 (pr
 *# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
 *# Restore from the backup using @psql@
 
-h3. 2017-06-30: #11807 commit:55aafbb converts old "jobs" database records from YAML to JSON, making the upgrade process slower than usual.
-* The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 commit:660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual.
+h3. 2017-06-30: "#11807":https://dev.arvados.org/issues/11807 "55aafbb":https://dev.arvados.org/projects/arvados/repository/revisions/55aafbb converts old "jobs" database records from YAML to JSON, making the upgrade process slower than usual.
+* The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual.
 * The conversion runs as a database migration, i.e., during the deb/rpm package upgrade process, while your API server is unavailable.
 * Expect it to take about 1 minute per 20K jobs that have ever been created/run.
 
-h3. 2017-06-05: #9005 commit:cb230b0 reduces service discovery overhead in keep-web requests.
+h3. 2017-06-05: "#9005":https://dev.arvados.org/issues/9005 "cb230b0":https://dev.arvados.org/projects/arvados/repository/revisions/cb230b0 reduces service discovery overhead in keep-web requests.
 * When upgrading keep-web _or keepproxy_ to/past this version, make sure to update API server as well. Otherwise, a bad token in a request can cause keep-web to fail future requests until either keep-web restarts or API server gets upgraded.
 
-h3. 2017-04-12: #11349 commit:2c094e2 adds a "management" http server to nodemanager.
+h3. 2017-04-12: "#11349":https://dev.arvados.org/issues/11349 "2c094e2":https://dev.arvados.org/projects/arvados/repository/revisions/2c094e2 adds a "management" http server to nodemanager.
 * To enable it, add to your configuration file: <pre>[Manage]
   address = 127.0.0.1
   port = 8989</pre> (see example configuration files in source:services/nodemanager/doc or https://doc.arvados.org/install/install-nodemanager.html for more info)
 * The server responds to @http://{address}:{port}/status.json@ with a summary of how many nodes are in each state (booting, busy, shutdown, etc.)
 
-h3. 2017-03-23: #10766 commit:e8cc0d7 replaces puma with arvados-ws as the recommended websocket server.
+h3. 2017-03-23: "#10766":https://dev.arvados.org/issues/10766 "e8cc0d7":https://dev.arvados.org/projects/arvados/repository/revisions/e8cc0d7 replaces puma with arvados-ws as the recommended websocket server.
 * See http://doc.arvados.org/install/install-ws.html for install/upgrade instructions.
 * Remove the old puma server after the upgrade is complete. Example, with runit: <pre>
 $ sudo sv down /etc/sv/puma
@@ -193,17 +197,17 @@ $ systemctl disable puma
 $ systemctl stop puma
 </pre>
 
-h3. 2017-03-06: #11168 commit:660a614 uses JSON instead of YAML to encode hashes and arrays in the database.
+h3. 2017-03-06: "#11168":https://dev.arvados.org/issues/11168 "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 uses JSON instead of YAML to encode hashes and arrays in the database.
 * Aside from a slight performance improvement, this should have no externally visible effect.
 * Downgrading past this version is not supported, and is likely to cause errors. If this happens, the solution is to upgrade past this version.
 * After upgrading, make sure to restart puma and crunch-dispatch-* processes.
 
-h3. 2017-02-03: #10969 commit:74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run.
+h3. 2017-02-03: "#10969":https://dev.arvados.org/issues/10969 "74a9dec":https://dev.arvados.org/projects/arvados/repository/revisions/74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run.
 * If your compute nodes run a version of *docker older than 1.10* you must override the default by adding to your API server configuration (@/etc/arvados/api/application.yml@): <pre><code class="yaml">docker_image_formats: ["v1"]</code></pre>
-* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue #10969 for more detail.
+* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue "#10969":https://dev.arvados.org/issues/10969 for more detail.
 * *NOTE:* This does *not* include any support for migrating existing Docker images from v1 to v2 format. This will come later: for now, sites running Docker 1.9 or earlier should still *avoid upgrading Docker further than 1.9.*
 
-h3. 2016-09-27: several Debian and RPM packages -- keep-balance (commit:d9eec0b), keep-web (commit:3399e63), keepproxy (commit:6de67b6), and arvados-git-httpd (commit:9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags (commit:3bbe1cd).
+h3. 2016-09-27: several Debian and RPM packages -- keep-balance ("d9eec0b":https://dev.arvados.org/projects/arvados/repository/revisions/d9eec0b), keep-web ("3399e63":https://dev.arvados.org/projects/arvados/repository/revisions/3399e63), keepproxy ("6de67b6":https://dev.arvados.org/projects/arvados/repository/revisions/6de67b6), and arvados-git-httpd ("9e27ddf":https://dev.arvados.org/projects/arvados/repository/revisions/9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags ("3bbe1cd":https://dev.arvados.org/projects/arvados/repository/revisions/3bbe1cd).
 * On Debian-based systems using systemd, services are enabled automatically when packages are installed.
 * On RedHat-based systems using systemd, unit files are installed but services must be enabled explicitly: e.g., <code>"sudo systemctl enable keep-web; sudo systemctl start keep-web"</code>.
 * The new systemd-supervised services will not start up successfully until configuration files are installed in /etc/arvados/: e.g., <code>"Sep 26 18:23:55 62751f5bb946 keep-web[74]: 2016/09/26 18:23:55 open /etc/arvados/keep-web/keep-web.yml: no such file or directory"</code>
@@ -218,33 +222,33 @@ h3. 2016-09-27: several Debian and RPM packages -- keep-balance (commit:d9eec0b)
 ** keepproxy - /etc/arvados/keepproxy/keepproxy.yml
 ** arvados-git-httpd - /etc/arvados/arv-git-httpd/arv-git-httpd.yml
 
-h3. 2016-05-31: commit:ae72b172c8 and commit:3aae316c25 install Python modules and scripts to different locations on the filesystem.
-* Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection).  Now they get installed to a path under @/usr@.  This improves compatibility with other Python packages provided by the distribution.  See #9242 for more background.
+h3. 2016-05-31: "ae72b172c8":https://dev.arvados.org/projects/arvados/repository/revisions/ae72b172c8 and "3aae316c25":https://dev.arvados.org/projects/arvados/repository/revisions/3aae316c25 install Python modules and scripts to different locations on the filesystem.
+* Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection).  Now they get installed to a path under @/usr@.  This improves compatibility with other Python packages provided by the distribution.  See "#9242":https://dev.arvados.org/issues/9242 for more background.
 * If you simply import Python modules from scripts, or call Python tools relying on $PATH, you don't need to make any changes.  If you have hardcoded full paths to some of these files (e.g., in symbolic links or configuration files), you will need to update those paths after this upgrade.
 
-h3. 2016-04-25: commit:eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows.
+h3. 2016-04-25: "eebcb5e":https://dev.arvados.org/projects/arvados/repository/revisions/eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows.
 * On each Debian-based compute node and shell node, run: @sudo apt-get install crunchrunner@
 * On each Red Hat-based compute node and shell node, run: @sudo yum install crunchrunner@
 
-h3. 2016-04-21: commit:3c88abd changes the Keep permission signature algorithm.
+h3. 2016-04-21: "3c88abd":https://dev.arvados.org/projects/arvados/repository/revisions/3c88abd changes the Keep permission signature algorithm.
 * All software components that generate signatures must be upgraded together. These are: keepstore, API server, keep-block-check, and keep-rsync. For example, if keepstore < 0.1.20160421183420 but API server >= 0.1.20160421183420, clients will not be able to read or write data in Keep.
 * Jobs and client operations that are in progress during the upgrade (including arv-put's "resume cache") will fail.
 
-h3. 2015-01-05: commit:e1276d6e disables Workbench's "Getting Started" popup by default.
+h3. 2015-01-05: "e1276d6e":https://dev.arvados.org/projects/arvados/repository/revisions/e1276d6e disables Workbench's "Getting Started" popup by default.
 * If you want new users to continue seeing this popup, set @enable_getting_started_popup: true@ in Workbench's @application.yml@ configuration.
 
-h3. 2015-12-03: commit:5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see #7751)
-* All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge commit:346a558
+h3. 2015-12-03: "5590c9ac":https://dev.arvados.org/projects/arvados/repository/revisions/5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see "#7751":https://dev.arvados.org/issues/7751)
+* All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge "346a558":https://dev.arvados.org/projects/arvados/repository/revisions/346a558
 * Jobs will fail if the API server (in particular crunch-job from the arvados-cli gem) is upgraded without upgrading arvados-fuse on compute nodes.
 
-h3. 2015-11-11: commit:1e2ace5 changes recommended config for keep-web (see #5824)
+h3. 2015-11-11: "1e2ace5":https://dev.arvados.org/projects/arvados/repository/revisions/1e2ace5 changes recommended config for keep-web (see "#5824":https://dev.arvados.org/issues/5824)
 * proxy/dns/ssl config should be updated to route "https://download.uuid_prefix.arvadosapi.com/" requests to keep-web (alongside the existing "collections" routing)
 * keep-web command line adds @-attachment-only-host download.uuid_prefix.arvadosapi.com@
 * Workbench config adds @keep_web_download_url@
 * More info on the (still beta/non-TOC-linked) "keep-web doc page":http://doc.arvados.org/install/install-keep-web.html
 
-h3. 2015-11-04: commit:1d1c6de removes stopped containers (see #7444)
+h3. 2015-11-04: "1d1c6de":https://dev.arvados.org/projects/arvados/repository/revisions/1d1c6de removes stopped containers (see "#7444":https://dev.arvados.org/issues/7444)
 * arvados-docker-cleaner removes _all_ docker containers as soon as they exit, effectively making @docker run@ default to @--rm@. If you run arvados-docker-cleaner on a host that does anything other than run crunch-jobs, and you still want to be able to use @docker start@, read the "new doc page":http://doc.arvados.org/install/install-compute-node.html to learn how to turn this off before upgrading.
 
-h3. 2015-11-04: commit:21006cf adds a keep-web service (see #5824)
+h3. 2015-11-04: "21006cf":https://dev.arvados.org/projects/arvados/repository/revisions/21006cf adds a keep-web service (see "#5824":https://dev.arvados.org/issues/5824)
 * Nothing relies on it yet, but early adopters can install it now by following http://doc.arvados.org/install/install-keep-web.html (it is not yet linked in the TOC).
index 3c7347dd60bd8c61a75f1a77929227da4750dea4..cada9ab1b88ac226231633a8a3b43f56cf735a5b 100644 (file)
@@ -22,6 +22,34 @@ h2. Container API
 
 !(full-width){{site.baseurl}}/images/Crunch_dispatch.svg!
 
+h2(#RAM). Understanding RAM requests for containers
+
+The @runtime_constraints@ section of a container specifies working RAM (@ram@) and Keep cache (@keep_cache_ram@).  If not specified, containers get a default Keep cache (@container_default_keep_cache_ram@, default 256 MiB).  The total RAM requested for a container is the sum of working RAM, Keep cache, and an additional RAM reservation configured by the admin (@ReserveExtraRAM@ in the dispatcher configuration, default zero).
+
+The total RAM request is used to schedule containers onto compute nodes.  RAM allocation limits are enforced using kernel controls such as cgroups.  A container which requests 1 GiB RAM will only be permitted to allocate up to 1 GiB of RAM, even if scheduled on a 4 GiB node.  On HPC systems, a multi-core node may run multiple containers at a time.
+
+When running on the cloud, the memory request (along with CPU and disk) is used to select (and possibly boot) an instance type with adequate resources to run the container.  Instance type RAM is derated 5% from the published specification to accomodate virtual machine, kernel and system services overhead.
+
+h3. Calculate minimum instance type RAM for a container
+
+    (RAM request + Keep cache + ReserveExtraRAM) * (100/95)
+
+For example, for a 3 GiB request, default Keep cache, and no extra RAM reserved:
+
+    (3072 + 256) * 1.0526 = 3494 MiB
+
+To run this container, the instance type must have a published RAM size of at least 3494 MiB.
+
+h3. Calculate the maximum requestable RAM for an instance type
+
+    (Instance type RAM * (95/100)) - Keep cache - ReserveExtraRAM
+
+For example, for a 3.75 GiB node, default Keep cache, and no extra RAM reserved:
+
+    (3840 * 0.95) - 256 = 3392 MiB
+
+To run on this instance type, the container can request at most 3392 MiB of working RAM.
+
 h2. Job API (deprecated)
 
 # To submit work, create a "job":{{site.baseurl}}/api/methods/jobs.html .  If the same job has been submitted in the past, it will return an existing job in @Completed@ state.
index e87bc51ad4a590b4102fd4f1047c9b878de466a2..c50366d4de6c4a14b5e1203283301a2938d54623 100644 (file)
@@ -125,3 +125,24 @@ table(table table-bordered table-condensed).
 |_. Argument |_. Type |_. Description |_. Location |_. Example |
 {background:#ccffcc}.|uuid|string|The UUID of the Group to untrash.|path||
 |ensure_unique_name|boolean (default false)|Rename project uniquely if untrashing it would fail with a unique name conflict.|query||
+
+h3. shared
+
+This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account.  This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree.
+
+When called with "include=owner_uuid" this also returns (in the "included" field) the objects that own those projects (users or non-project groups).
+
+Specifically, the logic is:
+
+<pre>
+select groups that are readable by current user AND
+    (the owner_uuid is a user (but not the current user) OR
+     the owner_uuid is not readable by the current user OR
+     the owner_uuid is a group but group_class is not a project)
+</pre>
+
+In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query|?include=owner_uuid|
index 543a14de0baff833edd73f63a76677c2f50fa055..ff4a58e12c6ed6bd0e0a4af38dcd7ee884839178 100644 (file)
@@ -29,3 +29,12 @@ table.code tr td:nth-child(2) {
 .userinput {
     color: #d14;
 }
+
+table.CodeRay {
+    margin-left: 3em;
+    width: calc(100% - 6em);
+}
+
+td.line-numbers {
+    width: 2em;
+}
index 0bd2ec7f0c4a55ee8755643c4d4cc22a4a2935ec..73a1119f36253b5d010fc454f72cc34a22545783 100644 (file)
@@ -5,3 +5,11 @@ SPDX-License-Identifier: CC-BY-SA-3.0 */
 img.full-width {
     width: 100%
 }
+
+img.screenshot {
+    max-width: calc(100% - 2em);
+    border: 3px;
+    border-style: solid;
+    margin-left: 2em;
+    margin-bottom: 2em;
+}
index afff1f45424ca9e29272afe158782dcc09c597fb..562b76ddf0a01855d066f8c1c0724a78e33fcd71 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: admin
-title: User management
+title: User management at the CLI
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
index 4961a05b56f025fc18d58f3b8ce95ee977285d58..fa497c93de484e1a5ab094d2914cf802ffb6c6bd 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Copy pipeline from Curoverse cloud
+title: Copy pipeline from the Arvados Playground
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -9,27 +9,27 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This tutorial describes how to find and copy a publicly shared pipeline from Curoverse cloud. Please note that you can use similar steps to copy any template you can access from Curoverse cloud to your cluster.
+This tutorial describes how to find and copy a publicly shared pipeline from the Arvados Playground. Please note that you can use similar steps to copy any template you can access from the Arvados Playground to your cluster.
 
-h3. Access a public pipeline in Curoverse cloud using Workbench
+h3. Access a public pipeline in the Arvados Playground using Workbench
 
-Curoverse cloud provides access to some public data, which can be used to experience Arvados in action. Let's access a public pipeline and copy it to your cluster, so that you can run it in your environment.
+the Arvados Playground provides access to some public data, which can be used to experience Arvados in action. Let's access a public pipeline and copy it to your cluster, so that you can run it in your environment.
 
-Start by visiting the "*Curoverse public projects page*":https://cloud.curoverse.com/projects/public. This page lists all the publicly accessible projects in this arvados installation. Click on one of these projects to open it. We will use "*lobSTR v.3 (Public)*":https://cloud.curoverse.com/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq as the example in this tutorial.
+Start by visiting the "*Arvados Playground public projects page*":https://playground.arvados.org/projects/public. This page lists all the publicly accessible projects in this arvados installation. Click on one of these projects to open it. We will use "*lobSTR v.3 (Public)*":https://playground.arvados.org/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq as the example in this tutorial.
 
-Once in the "*lobSTR v.3 (Public)*":https://cloud.curoverse.com/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq project, click on the *Pipeline templates* tab. In the pipeline templates tab, you will see a template named *lobSTR v.3*. Click on the <span class="fa fa-lg fa-gears"></span> *Show* button to the left of this name. This will take to you to the "*lobSTR v.3*":https://cloud.curoverse.com/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu template page.
+Once in the "*lobSTR v.3 (Public)*":https://playground.arvados.org/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq project, click on the *Pipeline templates* tab. In the pipeline templates tab, you will see a template named *lobSTR v.3*. Click on the <span class="fa fa-lg fa-gears"></span> *Show* button to the left of this name. This will take to you to the "*lobSTR v.3*":https://playground.arvados.org/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu template page.
 
 Once in this page, you can take the *uuid* of this template from the address bar, which is *qr1hi-p5p6p-9pkaxt6qjnkxhhu*. Next, we will copy this template to your Arvados instance.
 
-h3. Copying a pipeline template from Curoverse cloud to your cluster
+h3. Copying a pipeline template from the Arvados Playground to your cluster
 
-As described above, navigate to the publicly shared pipeline template "*lobSTR v.3*":https://cloud.curoverse.com/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu using Curoverse Workbench.  We will now copy this template with uuid *qr1hi-p5p6p-9pkaxt6qjnkxhhu* to your cluster.
+As described above, navigate to the publicly shared pipeline template "*lobSTR v.3*":https://playground.arvados.org/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu on the Arvados Playground.  We will now copy this template with uuid *qr1hi-p5p6p-9pkaxt6qjnkxhhu* to your cluster.
 
 {% include 'tutorial_expectations' %}
 
 We will use the Arvados *arv-copy* command to copy this template to your cluster. In order to use arv-copy, first you need to setup the source and destination cluster configuration files. Here, *qr1hi* would be the source cluster and your Arvados instance would be the *dst_cluster*.
 
-During this setup, if you have an account in Curoverse cloud, you can use "your access token":#using-your-token to create the source configuration file. If you do not have an account in Curoverse cloud, you can use the "anonymous access token":#using-anonymous-token for the source cluster configuration.
+During this setup, if you have an account in the Arvados Playground, you can use "your access token":#using-your-token to create the source configuration file. If you do not have an account in the Arvados Playground, you can use the "anonymous access token":#using-anonymous-token for the source cluster configuration.
 
 h4(#using-anonymous-token). *Configuring source and destination setup files using anonymous access token*
 
@@ -53,7 +53,7 @@ You can now copy the pipeline template from *qr1hi* to *your cluster*. Replace *
 
 h4(#using-your-token). *Configuring source and destination setup files using personal access token*
 
-If you already have an account in Curoverse cloud, you can follow the instructions in the "*Using arv-copy*":http://doc.arvados.org/user/topics/arv-copy.html user guide to get your *Current token* for source and destination clusters, and use them to create the source *qr1hi.conf* and dst_cluster.conf configuration files.
+If you already have an account in the Arvados Playground, you can follow the instructions in the "*Using arv-copy*":http://doc.arvados.org/user/topics/arv-copy.html user guide to get your *Current token* for source and destination clusters, and use them to create the source *qr1hi.conf* and dst_cluster.conf configuration files.
 
 You can now copy the pipeline template from *qr1hi* to *your cluster* with or without recursion. Replace *dst_cluster* with the *uuid_prefix* of your cluster.
 
diff --git a/doc/install/create-standard-objects.html.textile.liquid b/doc/install/create-standard-objects.html.textile.liquid
deleted file mode 100644 (file)
index 8ac3fb0..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Create standard objects
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-In these steps we use the Arvados CLI tools on the <strong>shell server</strong> to create a few Arvados objects. The CLI tools require an ARVADOS_API_TOKEN environment variable with a valid admin token. If you haven't already done so, set that up as shown in the "API token guide":../user/reference/api-tokens.html.
-
-h3. Arvados repository
-
-Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
-
-<notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
-~$ <span class="userinput">all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"</span>
-~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
-~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
-</code></pre></notextile>
-
-Create a link object to make the repository object readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
-
-<notextile>
-<pre><code>~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
-<span class="userinput">{
- "tail_uuid":"$all_users_group_uuid",
- "head_uuid":"$repo_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF</span>
-</code></pre></notextile>
-
-In a couple of minutes, your arvados-git-sync cron job will create an empty repository on your git server. Seed it with the real arvados repository. If your git credential helpers were configured correctly when you "set up your shell server":install-shell-server.html, the "git push" command will use your API token instead of prompting you for a username and password.
-
-<notextile>
-<pre><code>~$ <span class="userinput">cd /tmp</span>
-/tmp$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git</span>
-/tmp <span class="userinput">git --git-dir arvados.git push https://git.<b>uuid_prefix.your.domain</b>/arvados.git '*:*'</span>
-</code></pre>
-</notextile>
-
-If you did not set up a HTTPS service, you can push to <code>git@git.uuid_prefix.your.domain:arvados.git</code> using your SSH key, or by logging in to your git server and using sudo.
-
-<notextile>
-<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
-git@gitserver:~$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git /tmp/arvados.git</span>
-git@gitserver:~$ <span class="userinput">cd /tmp/arvados.git</span>
-git@gitserver:/tmp/arvados.git$ <span class="userinput">gitolite push /var/lib/arvados/git/repositories/<b>your_arvados_repo_uuid</b>.git '*:*'</span>
-</code></pre>
-</notextile>
-
-h3. Default project for docker images
-
-Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
-
-<notextile>
-<pre><code>~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
-~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
-~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
-<span class="userinput">{
- "tail_uuid":"$all_users_group_uuid",
- "head_uuid":"$project_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF</span>
-</code></pre></notextile>
-
-h3. Download and tag the latest arvados/jobs docker image
-
-The @arvados-cwl-runner@ needs access to an arvados/jobs image that is tagged as 'latest'. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.
-
-<notextile>
-<pre><code>~$ <span class="userinput">arv-keepdocker --pull arvados/jobs latest</span>
-</code></pre></notextile>
-
-If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
index c69d18b8e4bd2b0b8e3a19802982fdc284eb0e42..e1593a430a9f89b369e1c67e73f41a6705aa6ce4 100644 (file)
@@ -9,8 +9,6 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-h2(#slurm). Set up SLURM
-
 On the API server, install SLURM and munge, and generate a munge key.
 
 On Debian-based systems:
index a47d30ac153c1924af59f916dbb3fc2afbf2d7b0..c31b2ed43c89b92b5ef8c5c15c8abdd4dd185cbe 100644 (file)
@@ -9,19 +9,19 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados components run on GNU/Linux systems, and do not depend on any particular cloud operating stack.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
+Arvados components run on GNU/Linux systems, and supports multiple cloud operating stacks.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
 
 Arvados components can be installed and configured in a number of different ways.
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-||||\6=. _Appropriate for_|
-||_Ease of installation_|_Multiuser/Networked_|_Workflow Development_|_Workflow Testing_|_Large Scale Production_|_Developing Arvados_|_Arvados Software Development Testing_|
-|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|no|no|no|yes|yes|
-|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|no ^2^|no ^2^|no ^2^|no|yes|
-|"Manual installation":install-manual-prerequisites.html|Complex|yes|yes|yes|yes|no|no|
-|"Cloud demo":https://cloud.curoverse.com by Veritas Genetics|N/A ^3^|yes|no|no|no|no|no|
-|"Cluster Operation Subscription":https://curoverse.com/products by Veritas Genetics|N/A ^3^|yes|yes|yes|yes|yes|yes|
+|||\5=. Appropriate for|
+||_. Ease of setup|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados System Testing|
+|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
+|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
+|"Manual installation":install-manual-prerequisites.html|Complicated|yes|yes|yes|no|no|
+|"Arvados Playground":https://playground.arvados.org hosted by Veritas Genetics|N/A ^3^|yes|yes|no|no|no|
+|"Cluster Operation Subscription":https://curoverse.com/products supported by Veritas Genetics|N/A ^3^|yes|yes|yes|yes|yes|
 </div>
 
 * ^1^ Assumes a Kubernetes cluster is available
index 7201460ddeeefc03e22f63479afb485bb76dfd35..a25942fe24824e3f5dfe36b22abe13014931ca92 100644 (file)
@@ -158,16 +158,53 @@ Example @application.yml@:
 </code></pre>
 </notextile>
 
+h3(#disable_api_methods). disable_api_methods
+
+Set the @disable_api_methods@ configuration option to disable the deprecated @jobs@ API.  This will prevent users from accidentally submitting jobs that won't run.  "All new installations should use the containers API.":crunch2-slurm/install-prerequisites.html
+
+<notextile>
+<pre><code>
+  disable_api_methods:
+    - jobs.create
+    - pipeline_instances.create
+    - pipeline_templates.create
+    - jobs.get
+    - pipeline_instances.get
+    - pipeline_templates.get
+    - jobs.list
+    - pipeline_instances.list
+    - pipeline_templates.list
+    - jobs.index
+    - pipeline_instances.index
+    - pipeline_templates.index
+    - jobs.update
+    - pipeline_instances.update
+    - pipeline_templates.update
+    - jobs.queue
+    - jobs.queue_size
+    - job_tasks.create
+    - job_tasks.get
+    - job_tasks.list
+    - job_tasks.index
+    - job_tasks.update
+    - jobs.show
+    - pipeline_instances.show
+    - pipeline_templates.show
+    - job_tasks.show
+</code></pre>
+</notextile>
+
 h2(#set_up). Set up Nginx and Passenger
 
 The Nginx server will serve API requests using Passenger. It will also be used to proxy SSL requests to other services which are covered later in this guide.
 
 First, "Install Nginx and Phusion Passenger":https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/install_passenger_main.html.
 
-Edit the http section of your Nginx configuration to run the Passenger server, and serve SSL requests. Add a block like the following, adding SSL and logging parameters to taste:
+Edit the http section of your Nginx configuration to run the Passenger server. Add a block like the following, adding SSL and logging parameters to taste:
 
 <notextile>
-<pre><code>server {
+<pre><code>
+server {
   listen 127.0.0.1:8000;
   server_name localhost-api;
 
@@ -202,33 +239,6 @@ geo $external_client {
   default        1;
   <span class="userinput">10.20.30.0/24</span>  0;
 }
-
-server {
-  listen       <span class="userinput">[your public IP address]</span>:443 ssl;
-  server_name  <span class="userinput">uuid_prefix.your.domain</span>;
-
-  ssl on;
-  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
-  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
-
-  index  index.html index.htm index.php;
-
-  # Refer to the comment about this setting in the server section above.
-  client_max_body_size 128m;
-
-  location / {
-    proxy_pass            http://api;
-    proxy_redirect        off;
-    proxy_connect_timeout 90s;
-    proxy_read_timeout    300s;
-
-    proxy_set_header      X-Forwarded-Proto https;
-    proxy_set_header      Host $http_host;
-    proxy_set_header      X-External-Client $external_client;
-    proxy_set_header      X-Real-IP $remote_addr;
-    proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
-  }
-}
 </code></pre>
 </notextile>
 
index 2a4d103c7bfd84ea9ecead8515715edd664fcd4d..7fc332177dcf812a1f44f3f5554d9d8583844952 100644 (file)
@@ -19,6 +19,7 @@ The git hosting setup involves three components.
 It is not strictly necessary to deploy _both_ SSH and HTTPS access, but we recommend deploying both:
 * SSH is a more appropriate way to authenticate from a user's workstation because it does not require managing tokens on the client side;
 * HTTPS is a more appropriate way to authenticate from a shell VM because it does not depend on SSH agent forwarding (SSH clients' agent forwarding features tend to behave as if the remote machine is fully trusted).
+* HTTPS is also used by Arvados Composer to access git repositories from the browser.
 
 The HTTPS instructions given below will not work if you skip the SSH setup steps.
 
@@ -338,3 +339,47 @@ Restart Nginx to make the Nginx and API server configuration changes take effect
 <pre><code>gitserver:~$ <span class="userinput">sudo nginx -s reload</span>
 </code></pre>
 </notextile>
+
+h2. Clone Arvados repository
+
+Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
+~$ <span class="userinput">all_users_group_uuid="$uuid_prefix-j7d0g-fffffffffffffff"</span>
+~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
+~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
+</code></pre></notextile>
+
+Create a link object to make the repository object readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
+
+<notextile>
+<pre><code>~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+In a couple of minutes, your arvados-git-sync cron job will create an empty repository on your git server. Seed it with the real arvados repository. If your git credential helpers were configured correctly when you "set up your shell server":install-shell-server.html, the "git push" command will use your API token instead of prompting you for a username and password.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd /tmp</span>
+/tmp$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git</span>
+/tmp <span class="userinput">git --git-dir arvados.git push https://git.<b>uuid_prefix.your.domain</b>/arvados.git '*:*'</span>
+</code></pre>
+</notextile>
+
+If you did not set up a HTTPS service, you can push to <code>git@git.uuid_prefix.your.domain:arvados.git</code> using your SSH key, or by logging in to your git server and using sudo.
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
+git@gitserver:~$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git /tmp/arvados.git</span>
+git@gitserver:~$ <span class="userinput">cd /tmp/arvados.git</span>
+git@gitserver:/tmp/arvados.git$ <span class="userinput">gitolite push /var/lib/arvados/git/repositories/<b>your_arvados_repo_uuid</b>.git '*:*'</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/install-components.html.textile.liquid b/doc/install/install-components.html.textile.liquid
new file mode 100644 (file)
index 0000000..b21c4bd
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: installguide
+title: Choosing which components to install
+...
+
+Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.)  It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture to understand how these components interact.
+
+table(table table-bordered table-condensed).
+|\3=. *Core*|
+|"Postgres database":install-postgresql.html |Stores data for the API server.|Required.|
+|"API server":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|
+|\3=. *Keep (storage)*|
+|"Keepstore":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.|
+|"Keepproxy":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.|
+|"Keep-web":install-keep-web.html |Gateway service providing read/write HTTP and WebDAV support on top of Keep.|Required to be able to download files from Keep over plain HTTP in Workbench.|
+|"Keep-balance":install-keep-balance.html |Storage cluster maintenance daemon responsible for moving blocks to their optimal server location, adjusting block replication levels, and trashing unreferenced blocks.|Required to free deleted data from underlying storage, and to ensure proper replication and block distribution (including support for storage classes).|
+|\3=. *User interface*|
+|"Single Sign On server":install-sso.html |Login server.|Required for web based login to Workbench.|
+|"Workbench":install-workbench-app.html |Primary graphical user interface for working with file collections and running containers.|Optional.  Depends on API server, SSO server, keep-web, websockets server.|
+|"Workflow Composer":install-composer.html |Graphical user interface for editing Common Workflow Language workflows.|Optional.  Depends on git server (arv-git-httpd).|
+|\3=. *Additional services*|
+|"Websockets server":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.|
+|"Shell server":install-shell-server.html |Synchronize (create/delete/configure) Unix shell accounts with Arvados users.|Optional.|
+|"Git server":install-arv-git-httpd.html |Arvados-hosted git repositories, with Arvados-token based authentication.|Optional, but required by Workflow Composer.|
+|\3=. *Crunch (running containers)*|
+|"crunch-dispatch-slurm":crunch2-slurm/install-prerequisites.html |Run analysis workflows using Docker containers distributed across a SLURM cluster.|Optional if you wish to use Arvados for data management only.|
+|"Node Manager":install-nodemanager.html |Allocate and free cloud VM instances on demand based on workload.|Optional, not needed for a static SLURM cluster (such as on-premise HPC).|
diff --git a/doc/install/install-controller.html.textile.liquid b/doc/install/install-controller.html.textile.liquid
new file mode 100644 (file)
index 0000000..ccb8d98
--- /dev/null
@@ -0,0 +1,180 @@
+---
+layout: default
+navsection: installguide
+title: Install the controller
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The arvados-controller service must be installed on your API server node.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-controller</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-controller</span>
+</code></pre>
+</notextile>
+
+Verify the @arvados-controller@ program is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-controller -h</span>
+Usage:
+  -config file
+[...]
+</code></pre>
+</notextile>
+
+h3. Configure Nginx to route requests to the controller
+
+Add @upstream@ and @server@ definitions inside the @http@ section of your Nginx configuration using the following template.
+
+{% include 'notebox_begin' %}
+
+If you are adding arvados-controller to an existing system as part of the upgrade procedure, do not add a new "server" part here. Instead, add only the "upstream" part as shown here, and update your existing "server" section by changing its @proxy_pass@ directive from @http://api@ to @http://controller@.
+
+{% include 'notebox_end' %}
+
+<notextile>
+<pre><code>upstream controller {
+  server     127.0.0.1:9004  fail_timeout=10s;
+}
+
+server {
+  listen       <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name  <span class="userinput">uuid_prefix.your.domain</span>;
+
+  ssl on;
+  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  # Refer to the comment about this setting in the passenger (arvados
+  # api server) section of your Nginx configuration.
+  client_max_body_size 128m;
+
+  location / {
+    proxy_pass            http://controller;
+    proxy_redirect        off;
+    proxy_connect_timeout 90s;
+    proxy_read_timeout    300s;
+
+    proxy_set_header      X-Forwarded-Proto https;
+    proxy_set_header      Host $http_host;
+    proxy_set_header      X-External-Client $external_client;
+    proxy_set_header      X-Real-IP $remote_addr;
+    proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
+  }
+}
+</code></pre>
+</notextile>
+
+Restart Nginx to apply the new configuration.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo nginx -s reload</span>
+</code></pre>
+</notextile>
+
+h3. Configure arvados-controller
+
+Create the cluster configuration file @/etc/arvados/config.yml@ using the following template.
+
+<notextile>
+<pre><code>Clusters:
+  <span class="userinput">uuid_prefix</span>:
+    NodeProfiles:
+      apiserver:
+        arvados-controller:
+          Listen: ":<span class="userinput">9004</span>" # must match the "upstream controller" section of your Nginx config
+        arvados-api-server:
+          Listen: ":<span class="userinput">8000</span>" # must match the "upstream api" section of your Nginx config
+    PostgreSQL:
+      ConnectionPool: 128
+      Connection:
+        host: localhost
+        dbname: arvados_production
+        user: arvados
+        password: <span class="userinput">xxxxxxxx</span>
+        sslmode: require
+</code></pre>
+</notextile>
+
+Create the host configuration file @/etc/arvados/environment@.
+
+<notextile>
+<pre><code>ARVADOS_NODE_PROFILE=apiserver
+</code></pre>
+</notextile>
+
+h3. Start the service (option 1: systemd)
+
+If your system does not use systemd, skip this section and follow the "runit instructions":#runit instead.
+
+If your system uses systemd, the arvados-controller service should already be set up. Restart it to load the new configuration file, and check its status:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart arvados-controller</span>
+~$ <span class="userinput">sudo systemctl status arvados-controller</span>
+&#x25cf; arvados-controller.service - Arvados controller
+   Loaded: loaded (/lib/systemd/system/arvados-controller.service; enabled; vendor preset: enabled)
+   Active: active (running) since Tue 2018-07-31 13:17:44 UTC; 3s ago
+     Docs: https://doc.arvados.org/
+ Main PID: 25066 (arvados-control)
+   CGroup: /system.slice/arvados-controller.service
+           â””─25066 /usr/bin/arvados-controller
+
+Jul 31 13:17:44 zzzzz systemd[1]: Starting Arvados controller...
+Jul 31 13:17:44 zzzzz arvados-controller[25191]: {"Listen":"[::]:9004","Service":"arvados-controller","level":"info","msg":"listening","time":"2018-07-31T13:17:44.521694195Z"}
+Jul 31 13:17:44 zzzzz systemd[1]: Started Arvados controller.
+</code></pre>
+</notextile>
+
+Skip ahead to "confirm the service is working":#confirm.
+
+h3(#runit). Start the service (option 2: runit)
+
+Install runit to supervise the arvados-controller daemon.  {% include 'install_runit' %}
+
+Create a supervised service.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir /etc/service/arvados-controller</span>
+~$ <span class="userinput">cd /etc/service/arvados-controller</span>
+~$ <span class="userinput">sudo mkdir log log/main</span>
+~$ <span class="userinput">printf '#!/bin/sh\nset -a\n. /etc/arvados/environment\nexec arvados-controller 2>&1\n' | sudo tee run</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec svlogd main\n' | sudo tee log/run</span>
+~$ <span class="userinput">sudo chmod +x run log/run</span>
+~$ <span class="userinput">sudo sv exit .</span>
+~$ <span class="userinput">cd -</span>
+</code></pre>
+</notextile>
+
+Use @sv stat@ and check the log file to verify the service is running.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sv stat /etc/service/arvados-controller</span>
+run: /etc/service/arvados-controller: (pid 12520) 2s; run: log: (pid 12519) 2s
+~$ <span class="userinput">tail /etc/service/arvados-controller/log/main/current</span>
+{"Listen":"[::]:9004","Service":"arvados-controller","level":"info","msg":"listening","time":"2018-07-31T13:17:44.521694195Z"}
+</code></pre>
+</notextile>
+
+h3(#confirm). Confirm the service is working
+
+Confirm the service is listening on its assigned port and responding to requests.
+
+<notextile>
+<pre><code>~$ <span class="userinput">curl -X OPTIONS http://0.0.0.0:<b>9004</b>/login</span>
+{"errors":["Forbidden"],"error_token":"1533044555+684b532c"}
+</code></pre>
+</notextile>
index 4c735a1eec1ec286b2652f6ee5282920c48cc797..3a8dce078dd092bfe687639f912415b2553bf14c 100644 (file)
@@ -57,12 +57,7 @@ h3. Create a keep-balance token
 
 Create an Arvados superuser token for use by keep-balance. *On the API server*, run:
 
-<notextile>
-<pre><code>apiserver:~$ <span class="userinput">cd /var/www/arvados-api/current</span>
-apiserver:/var/www/arvados-api/current$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production bundle exec script/create_superuser_token.rb</span>
-zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
-</code></pre>
-</notextile>
+{% include 'create_superuser_token' %}
 
 h3. Update keepstore configuration files
 
index fe690a5eda8880b67f21fca6c2242e8bf62afead..9f580c0f8b2af0f0244c1ae1570c4346d33cd6ac 100644 (file)
@@ -103,7 +103,18 @@ Note: if the Web uploader is failing to upload data and there are no logs from k
 
 h3. Tell the API server about the Keepproxy server
 
-The API server needs to be informed about the presence of your Keepproxy server. Please execute the following commands on your <strong>shell server</strong>.
+The API server needs to be informed about the presence of your Keepproxy server.
+
+First, if you don't already have an admin token, create a superuser token:
+
+{% include 'create_superuser_token' %}
+
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
 
 <notextile>
 <pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
@@ -117,3 +128,13 @@ The API server needs to be informed about the presence of your Keepproxy server.
 }
 EOF</span>
 </code></pre></notextile>
+
+h3. Testing keepproxy
+
+Log into a host that is on an external network from your private Arvados network.  The host should be able to contact your keepproxy server (eg keep.$uuid_prefix.arvadosapi.com), but not your keepstore servers (eg keep[0-9].$uuid_prefix.arvadosapi.com).
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections, for an example see "Testing keep.":install-keepstore.html#testing on the keepstore install page.
index 750b7a47ef184ecaa91536dd098e3b3b0891562b..943c9bae36b1c7e2358a83f9636bb3eb3ddf3cd3 100644 (file)
@@ -198,19 +198,52 @@ Repeat the above sections to prepare volumes and bring up supervised services on
 
 h3. Tell the API server about the Keepstore servers
 
-The API server needs to be informed about the presence of your Keepstore servers. For each of the Keepstore servers you have created, please execute the following commands on your <strong>shell server</strong>.
+The API server needs to be informed about the presence of your Keepstore servers.
 
-Make sure to update the @service_host@ value to match each of your Keepstore servers.
+First, if you don't already have an admin token, create a superuser token:
+
+{% include 'create_superuser_token' %}
+
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
+
+Use this command to register each keepstore server you have installed.  Make sure to update the @service_host@ value.
 
 <notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
 ~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
 <span class="userinput">{
- "service_host":"<strong>keep0.$prefix.your.domain</strong>",
+ "service_host":"<strong>keep0.$uuid_prefix.your.domain</strong>",
  "service_port":25107,
  "service_ssl_flag":false,
  "service_type":"disk"
 }
 EOF</span>
 </code></pre></notextile>
+
+h3(#testing). Testing keep
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections:
+
+<pre>
+$ echo "hello world!" > hello.txt
+
+$ arv-put --portable-data-hash hello.txt
+2018-07-12 13:35:25 arvados.arv_put[28702] INFO: Creating new cache file at /home/example/.cache/arvados/arv-put/1571ec0adb397c6a18d5c74cc95b3a2a
+0M / 0M 100.0% 2018-07-12 13:35:27 arvados.arv_put[28702] INFO:
+
+2018-07-12 13:35:27 arvados.arv_put[28702] INFO: Collection saved as 'Saved at 2018-07-12 17:35:25 UTC by example@example'
+59389a8f9ee9d399be35462a0f92541c+53
+
+$ arv-get 59389a8f9ee9d399be35462a0f92541c+53/hello.txt
+hello world!
+</pre>
index 7b1b24e1445d59ac15d1205988d06814eab950eb..e0cc4b8581e65a1a38292f1953418db394f92bee 100644 (file)
@@ -19,7 +19,7 @@ This guide assumes you have seven systems available in the same network subnet:
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-|_Function_|_Number of nodes_|
+|_. Function|_. Number of nodes|
 |Arvados API, Crunch dispatcher, Git, Websockets and Workbench|1|
 |Arvados Compute node|1|
 |Arvados Keepproxy and Keep-web server|1|
@@ -33,7 +33,7 @@ The number of Keepstore, shell and compute nodes listed above is a minimum. In a
 h2. Supported GNU/Linux distributions
 
 table(table table-bordered table-condensed).
-|_Distribution_|_State_|_Last supported version_|
+|_. Distribution|_. State|_. Last supported version|
 |CentOS 7|Supported|Latest|
 |Debian 8 ("jessie")|Supported|Latest|
 |Debian 9 ("stretch")|Supported|Latest|
@@ -73,7 +73,7 @@ First, register the Curoverse signing key in apt's database:
 Configure apt to retrieve packages from the Arvados package repository. This command depends on your OS vendor and version:
 
 table(table table-bordered table-condensed).
-|OS version|Command|
+|_. OS version|_. Command|
 |Debian 8 ("jessie")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ jessie main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Ubuntu 14.04 ("trusty")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
@@ -128,7 +128,7 @@ By convention, we use the following hostname pattern:
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-|_Function_|_Hostname_|
+|_. Function|_. Hostname|
 |Arvados API|@uuid_prefix@.your.domain|
 |Arvados Git server|git.@uuid_prefix@.your.domain|
 |Arvados Keepproxy server|keep.@uuid_prefix@.your.domain|
index 9ee6722a07fa8b71be4b120e431b49f95635d881..defec2589e82a3f32266f39e500c54401ee57683 100644 (file)
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados Node Manager provides elastic computing for Arvados and SLURM by creating and destroying virtual machines on demand.  Node Manager currently supports Amazon Web Services (AWS), Google Cloud Platform (GCP) and Microsoft Azure.
 
-Note: node manager is only required for elastic computing cloud environments.  Fixed size clusters do not require node manager.
+Note: node manager is only required for elastic computing cloud environments.  Fixed size clusters (such as on-premise HPC) do not require node manager.
 
 h2. Install
 
@@ -113,6 +113,15 @@ boot_fail_after = 1800
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
@@ -282,6 +291,15 @@ poll_stale_after = 600
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
@@ -470,6 +488,15 @@ boot_fail_after = 1800
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
index 53adcd5c1c727ac56ccf771755ef5feea9d6bcef..43369a3bbfc230f3bf95a55923211dc24566c606 100644 (file)
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2. LobSTR v3 
 
-In this quickstart guide, we'll run an existing pipeline with pre-existing data. Step-by-step instructions are shown below. You can follow along using your own local install or by using Curoverse's <a href="http://lp.curoverse.com/beta-signup/">hosted version of Arvados</a> (in public beta, any Google account can be used to login).
+In this quickstart guide, we'll run an existing pipeline with pre-existing data. Step-by-step instructions are shown below. You can follow along using your own local install or by using the <a href="https://playground.arvados.org/">Arvados Playground</a> (any Google account can be used to log in).
 
 (For more information about this pipeline, see our <a href="https://dev.arvados.org/projects/arvados/wiki/LobSTR_tutorial">detailed lobSTR guide</a>).
 
diff --git a/doc/user/composer/c1.png b/doc/user/composer/c1.png
new file mode 100644 (file)
index 0000000..6e89aa0
Binary files /dev/null and b/doc/user/composer/c1.png differ
diff --git a/doc/user/composer/c10.png b/doc/user/composer/c10.png
new file mode 100644 (file)
index 0000000..1bca579
Binary files /dev/null and b/doc/user/composer/c10.png differ
diff --git a/doc/user/composer/c11.png b/doc/user/composer/c11.png
new file mode 100644 (file)
index 0000000..4d64476
Binary files /dev/null and b/doc/user/composer/c11.png differ
diff --git a/doc/user/composer/c12.png b/doc/user/composer/c12.png
new file mode 100644 (file)
index 0000000..f192ab7
Binary files /dev/null and b/doc/user/composer/c12.png differ
diff --git a/doc/user/composer/c13.png b/doc/user/composer/c13.png
new file mode 100644 (file)
index 0000000..7ba72dc
Binary files /dev/null and b/doc/user/composer/c13.png differ
diff --git a/doc/user/composer/c14.png b/doc/user/composer/c14.png
new file mode 100644 (file)
index 0000000..f7d446b
Binary files /dev/null and b/doc/user/composer/c14.png differ
diff --git a/doc/user/composer/c15.png b/doc/user/composer/c15.png
new file mode 100644 (file)
index 0000000..54fa54d
Binary files /dev/null and b/doc/user/composer/c15.png differ
diff --git a/doc/user/composer/c16.png b/doc/user/composer/c16.png
new file mode 100644 (file)
index 0000000..bbdd65a
Binary files /dev/null and b/doc/user/composer/c16.png differ
diff --git a/doc/user/composer/c17.png b/doc/user/composer/c17.png
new file mode 100644 (file)
index 0000000..5706e61
Binary files /dev/null and b/doc/user/composer/c17.png differ
diff --git a/doc/user/composer/c18.png b/doc/user/composer/c18.png
new file mode 100644 (file)
index 0000000..fc2b736
Binary files /dev/null and b/doc/user/composer/c18.png differ
diff --git a/doc/user/composer/c19.png b/doc/user/composer/c19.png
new file mode 100644 (file)
index 0000000..97202cd
Binary files /dev/null and b/doc/user/composer/c19.png differ
diff --git a/doc/user/composer/c2.png b/doc/user/composer/c2.png
new file mode 100644 (file)
index 0000000..89fdf33
Binary files /dev/null and b/doc/user/composer/c2.png differ
diff --git a/doc/user/composer/c20.png b/doc/user/composer/c20.png
new file mode 100644 (file)
index 0000000..df31c9c
Binary files /dev/null and b/doc/user/composer/c20.png differ
diff --git a/doc/user/composer/c21.png b/doc/user/composer/c21.png
new file mode 100644 (file)
index 0000000..cc3f928
Binary files /dev/null and b/doc/user/composer/c21.png differ
diff --git a/doc/user/composer/c22.png b/doc/user/composer/c22.png
new file mode 100644 (file)
index 0000000..9c7781f
Binary files /dev/null and b/doc/user/composer/c22.png differ
diff --git a/doc/user/composer/c23.png b/doc/user/composer/c23.png
new file mode 100644 (file)
index 0000000..f5be591
Binary files /dev/null and b/doc/user/composer/c23.png differ
diff --git a/doc/user/composer/c24.png b/doc/user/composer/c24.png
new file mode 100644 (file)
index 0000000..b544356
Binary files /dev/null and b/doc/user/composer/c24.png differ
diff --git a/doc/user/composer/c2b.png b/doc/user/composer/c2b.png
new file mode 100644 (file)
index 0000000..39acd60
Binary files /dev/null and b/doc/user/composer/c2b.png differ
diff --git a/doc/user/composer/c2c.png b/doc/user/composer/c2c.png
new file mode 100644 (file)
index 0000000..931181c
Binary files /dev/null and b/doc/user/composer/c2c.png differ
diff --git a/doc/user/composer/c3.png b/doc/user/composer/c3.png
new file mode 100644 (file)
index 0000000..3e650c2
Binary files /dev/null and b/doc/user/composer/c3.png differ
diff --git a/doc/user/composer/c4.png b/doc/user/composer/c4.png
new file mode 100644 (file)
index 0000000..0f706a0
Binary files /dev/null and b/doc/user/composer/c4.png differ
diff --git a/doc/user/composer/c5.png b/doc/user/composer/c5.png
new file mode 100644 (file)
index 0000000..aaff6f5
Binary files /dev/null and b/doc/user/composer/c5.png differ
diff --git a/doc/user/composer/c6.png b/doc/user/composer/c6.png
new file mode 100644 (file)
index 0000000..9275d86
Binary files /dev/null and b/doc/user/composer/c6.png differ
diff --git a/doc/user/composer/c7.png b/doc/user/composer/c7.png
new file mode 100644 (file)
index 0000000..2d77fe2
Binary files /dev/null and b/doc/user/composer/c7.png differ
diff --git a/doc/user/composer/c8.png b/doc/user/composer/c8.png
new file mode 100644 (file)
index 0000000..1620887
Binary files /dev/null and b/doc/user/composer/c8.png differ
diff --git a/doc/user/composer/c9.png b/doc/user/composer/c9.png
new file mode 100644 (file)
index 0000000..43b1210
Binary files /dev/null and b/doc/user/composer/c9.png differ
diff --git a/doc/user/composer/composer.html.textile.liquid b/doc/user/composer/composer.html.textile.liquid
new file mode 100644 (file)
index 0000000..e8ef0b6
--- /dev/null
@@ -0,0 +1,119 @@
+---
+layout: default
+navsection: userguide
+title: Create a Workflow with Composer
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados Workflow Composer is a graphical interface for building Common Workflow Language (CWL) workflows to run on Arvados.
+
+This tutorial will demonstrate:
+
+# Creating a new git repository through Arvados to store the workflow
+# Creating CommandLineTools for "sort" and "uniq"
+# Creating a Workflow which uses "sort" and "uniq" to remove duplicate lines from a text file
+# Submitting the Workflow to run on Arvados
+
+h3. 1. Access from workbench
+
+!(screenshot)c1.png!
+
+h3. 2. Composer starting page
+
+!(screenshot)c2.png!
+
+h3. 3. Manage git repositories (opens Workbench in new tab)
+
+!(screenshot)c2b.png!
+
+h3. 4. Add a new repository
+
+!(screenshot)c4.png!
+
+!(screenshot)c3.png!
+
+h3. 5. Return to Composer.  Use refresh button to discover new repository (may take a few moments to show up).
+
+!(screenshot)c2c.png!
+
+h3. 6. Create a new Command Line Tool
+
+!(screenshot)c5.png!
+
+!(screenshot)c20.png!
+
+h3. 7. Set Docker image, base command, and input port for "sort" tool
+
+The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:8@)  You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
+
+!(screenshot)c6.png!
+
+h3. 8. Redirect stdout to a file
+
+!(screenshot)c7.png!
+
+h3. 9. Capture output file
+
+!(screenshot)c8.png!
+
+h3. 10. Save Command Line Tool
+
+!(screenshot)c22.png!
+
+h3. 11. Repeat steps 6-10 for "uniq" tool
+
+Create a new tool with a "base command" of "uniq".
+
+h3. 12. Switch back to "Home" tab and create workflow
+
+!(screenshot)c24.png!
+
+!(screenshot)c9.png!
+
+!(screenshot)c10.png!
+
+h3. 13. Drag and drop tools into Workflow
+
+!(screenshot)c11.png!
+
+h3. 14. Drag from input port of "sort" to empty space to create workflow input
+
+!(screenshot)c21.png!
+
+h3. 15. Drag from output port of "sort" to input port of "uniq"
+
+!(screenshot)c13.png!
+
+h3. 16. Drag from output port of "uniq" to empty space to create workflow output
+
+!(screenshot)c14.png!
+
+h3. 17. Save Workflow
+
+!(screenshot)c23.png!
+
+h3. 18. Click on "Test" tab then click "Run"
+
+!(screenshot)c15.png!
+
+h3. 19. Choose input file
+
+You may need to "upload an input file":{{site.baseurl}}/user/tutorials/tutorial-keep.html
+
+!(screenshot)c16.png!
+
+h3. 20. Run the workflow
+
+!(screenshot)c17.png!
+
+h3. 21. Monitor progress (may take several minutes)
+
+!(screenshot)c18.png!
+
+h3. 22. Get workflow output
+
+!(screenshot)c19.png!
index 2319b3cb81f8a85046be6d103fc92efe8ac0b1d8..ad5d3bd83643e6d9134dbfddddfdf2209be66140 100644 (file)
@@ -33,7 +33,7 @@ The tutorial files are located in the "documentation section of the Arvados sour
 </code></pre>
 </notextile>
 
-The tutorial data is hosted on "https://cloud.curoverse.com":https://cloud.curoverse.com (also referred to by the identifier *qr1hi*).  If you are using a different Arvados instance, you may need to copy the data to your own instance.  The easiest way to do this is with "arv-copy":{{site.baseurl}}/user/topics/arv-copy.html (this requires signing up for a free cloud.curoverse.com account).
+The tutorial data is hosted on "https://playground.arvados.org":https://playground.arvados.org (also referred to by the identifier *qr1hi*).  If you are using a different Arvados instance, you may need to copy the data to your own instance.  The easiest way to do this is with "arv-copy":{{site.baseurl}}/user/topics/arv-copy.html (this requires signing up for a free playground.arvados.org account).
 
 <notextile>
 <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst settings 2463fa9efeb75e099685528b3b9071e0+438</span>
@@ -42,13 +42,13 @@ The tutorial data is hosted on "https://cloud.curoverse.com":https://cloud.curov
 </code></pre>
 </notextile>
 
-If you do not wish to create an account on "https://cloud.curoverse.com":https://cloud.curoverse.com, you may download the files anonymously and upload them to your local Arvados instance:
+If you do not wish to create an account on "https://playground.arvados.org":https://playground.arvados.org, you may download the files anonymously and upload them to your local Arvados instance:
 
-"https://cloud.curoverse.com/collections/2463fa9efeb75e099685528b3b9071e0+438":https://cloud.curoverse.com/collections/2463fa9efeb75e099685528b3b9071e0+438
+"https://playground.arvados.org/collections/2463fa9efeb75e099685528b3b9071e0+438":https://playground.arvados.org/collections/2463fa9efeb75e099685528b3b9071e0+438
 
-"https://cloud.curoverse.com/collections/ae480c5099b81e17267b7445e35b4bc7+180":https://cloud.curoverse.com/collections/ae480c5099b81e17267b7445e35b4bc7+180
+"https://playground.arvados.org/collections/ae480c5099b81e17267b7445e35b4bc7+180":https://playground.arvados.org/collections/ae480c5099b81e17267b7445e35b4bc7+180
 
-"https://cloud.curoverse.com/collections/655c6cd07550151b210961ed1d3852cf+57":https://cloud.curoverse.com/collections/655c6cd07550151b210961ed1d3852cf+57
+"https://playground.arvados.org/collections/655c6cd07550151b210961ed1d3852cf+57":https://playground.arvados.org/collections/655c6cd07550151b210961ed1d3852cf+57
 
 h2. Submitting a workflow to an Arvados cluster
 
index db03adf1c07135e57f84b8643c6b922b1091c140..07cb4aa9095fad72e9854997427b5f171a941307 100644 (file)
@@ -113,6 +113,8 @@ steps:
         tmpdirMin: 90000
 </pre>
 
+* Available compute nodes types vary over time and across different cloud providers, so try to limit the RAM requirement to what the program actually needs.  However, if you need to target a specific compute node type, see this discussion on "calculating RAM request and choosing instance type for containers.":{{site.baseurl}}/api/execution.html#RAM
+
 * Instead of scattering separate steps, prefer to scatter over a subworkflow.
 
 With the following pattern, @step1@ has to wait for all samples to complete before @step2@ can start computing on any samples.  This means a single long-running sample can prevent the rest of the workflow from moving on:
@@ -174,7 +176,9 @@ steps:
           run: tool3.cwl
 </pre>
 
-* When migrating from crunch v1 API (--api=jobs) to the crunch v2 API (--api=containers) there are a few differences in behavior:
+h2(#migrate). Migrating running CWL on jobs API to containers API
+
+* When migrating from jobs API (--api=jobs) (sometimes referred to as "crunch v1") to the containers API (--api=containers) ("crunch v2") there are a few differences in behavior:
 ** The tool is limited to accessing only collections which are explicitly listed in the input, and further limited to only the subdirectories of collections listed in input.  For example, given an explicit file input @/dir/subdir/file1.txt@, a tool will not be able to implicitly access the file @/dir/file2.txt@.  Use @secondaryFiles@ or a @Directory@ input to describe trees of files.
 ** Files listed in @InitialWorkDirRequirement@ appear in the output directory as normal files (not symlinks) but cannot be moved, renamed or deleted.  These files will be added to the output collection but without any additional copies of the underlying data.
 ** Tools are disallowed network access by default.  Tools which require network access must include @arv:APIRequirement: {}@ in their @requirements@ section.
index 74868bcab4a0b76d44cbd43eee0c2c89d73b0311..f1adfe28545fe235ddd64e0eed882c7a84966e88 100644 (file)
@@ -21,7 +21,7 @@ h2. arv-copy
 
 @arv-copy@ allows users to copy collections and pipeline templates from one cluster to another. By default, @arv-copy@ will recursively go through a template and copy all dependencies associated with the object.
 
-For example, let's copy from the <a href="https://cloud.curoverse.com/">cloud instance *qr1hi*</a> to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
+For example, let's copy from the <a href="https://playground.arvados.org/">Arvados playground</a>, also known as *qr1hi*, to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
 
 In order to communicate with both clusters, you must create custom configuration files for each cluster. In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*. Copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ in both of your clusters. Then, create two configuration files, one for each cluster. The names of the files must have the format of *uuid_prefix.conf*. In our example, let's make two files, one for *qr1hi* and one for *dst_cluster*. From your *Current token* page in *qr1hi* and *dst_cluster*, copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
 
@@ -44,7 +44,7 @@ h3. How to copy a collection
 
 First, select the uuid of the collection you want to copy from the source cluster. The uuid can be found in the collection display page in the collection summary area (top left box), or from the URL bar (the part after @collections/...@)
 
-Now copy the collection from *qr1hi* to *dst_cluster*. We will use the uuid @qr1hi-4zz18-tci4vn4fa95w0zx@ as an example. You can find this collection in the <a href="https://cloud.curoverse.com/collections/qr1hi-4zz18-tci4vn4fa95w0zx">lobSTR v.3 project on cloud.curoverse.com</a>.
+Now copy the collection from *qr1hi* to *dst_cluster*. We will use the uuid @qr1hi-4zz18-tci4vn4fa95w0zx@ as an example. You can find this collection in the <a href="https://playground.arvados.org/collections/qr1hi-4zz18-tci4vn4fa95w0zx">lobSTR v.3 project on playground.arvados.org</a>.
 <notextile>
 <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster qr1hi-4zz18-tci4vn4fa95w0zx</span>
 qr1hi-4zz18-tci4vn4fa95w0zx: 6.1M / 6.1M 100.0%
index 1a78025b2570fef3db597ef88cb900072ae519b2..c21fbd9ad2204c0eb056f473879c057ebbc814a7 100644 (file)
@@ -184,14 +184,14 @@ arvados/jobs          latest              12b9f859d48c        4 days ago
 
 h2. Upload your image
 
-Finally, we are ready to upload the new Docker image to Arvados.  Use @arv keep docker@ with the image repository name to upload the image.  Without arguments, @arv keep docker@ will print out the list of Docker images in Arvados that are available to you.
+Finally, we are ready to upload the new Docker image to Arvados.  Use @arv-keepdocker@ with the image repository name to upload the image.  Without arguments, @arv-keepdocker@ will print out the list of Docker images in Arvados that are available to you.
 
 <notextile>
-<pre><code>$ <span class="userinput">arv keep docker arvados/jobs-with-r</span>
+<pre><code>$ <span class="userinput">arv-keepdocker arvados/jobs-with-r</span>
 703M / 703M 100.0%
 Collection saved as 'Docker image arvados/jobs-with-r:latest 2818853ff9f9'
 qr1hi-4zz18-abcdefghijklmno
-$ <span class="userinput">arv keep docker</span>
+$ <span class="userinput">arv-keepdocker</span>
 REPOSITORY                      TAG         IMAGE ID      COLLECTION                     CREATED
 arvados/jobs-with-r             latest      2818853ff9f9  qr1hi-4zz18-abcdefghijklmno    Tue Jan 17 20:35:53 2017
 </code></pre>
@@ -207,9 +207,9 @@ hints:
 
 h2. Share Docker images
 
-Docker images are subject to normal Arvados permissions.  If wish to share your Docker image with others (or wish to share a pipeline template that uses your Docker image) you will need to use @arv keep docker@ with the @--project-uuid@ option to upload the image to a shared project.
+Docker images are subject to normal Arvados permissions.  If wish to share your Docker image with others (or wish to share a pipeline template that uses your Docker image) you will need to use @arv-keepdocker@ with the @--project-uuid@ option to upload the image to a shared project.
 
 <notextile>
-<pre><code>$ <span class="userinput">arv keep docker --project-uuid qr1hi-j7d0g-xxxxxxxxxxxxxxx arvados/jobs-with-r</span>
+<pre><code>$ <span class="userinput">arv-keepdocker --project-uuid qr1hi-j7d0g-xxxxxxxxxxxxxxx arvados/jobs-with-r</span>
 </code></pre>
 </notextile>
index bcbf148e5d66014ff0e519b810d9433981f97d92..a4e58b84be12b29f8f2950a1098d7490910696f6 100644 (file)
@@ -56,7 +56,7 @@ See the "run-command reference":{{site.baseurl}}/user/topics/run-command.html fo
 <pre><code>~$ <span class="userinput">git rev-parse HEAD</span></code></pre>
 </notextile>
 
-* @"docker_image"@ : The docker image hash used is found on the "Collection page":https://cloud.curoverse.com/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address*.
+* @"docker_image"@ : The docker image hash used is found on the "Collection page":https://playground.arvados.org/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address*.
 
 h2. Running your pipeline
 
@@ -82,4 +82,4 @@ Note: Job reuse can only happen if all input collections do not change.
 <pre><code>~$ <span class="userinput">git rev-parse HEAD</span></code></pre>
 </notextile>
 
-* @"docker_image"@ : This specifies the "Docker":https://www.docker.com/ runtime environment where jobs run their scripts. Docker version control is similar to git, and you can commit and push changes to your images. You must re-use the docker image hash from the previous run to use the same image. It can be found on the "Collection page":https://cloud.curoverse.com/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address* or the *docker_image_locator* in a job's metadata.
+* @"docker_image"@ : This specifies the "Docker":https://www.docker.com/ runtime environment where jobs run their scripts. Docker version control is similar to git, and you can commit and push changes to your images. You must re-use the docker image hash from the previous run to use the same image. It can be found on the "Collection page":https://playground.arvados.org/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address* or the *docker_image_locator* in a job's metadata.
index 6785ed68d937f488720d1197538d1064539dd5f7..8dcb8e674e55021313dad6bd4cea1902e3187b9f 100644 (file)
@@ -19,6 +19,8 @@ A "workflow" (sometimes called a "pipeline" in other systems) is a sequence of s
 
 h3. Steps
 
+notextile. <div class="spaced-out">
+
 # Start from the *Workbench Dashboard*.  You can access the Dashboard by clicking on *<i class="fa fa-lg fa-fw fa-dashboard"></i> Dashboard* in the upper left corner of any Workbench page.
 # Click on the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a process...</span> button.  This will open a dialog box titled *Choose a pipeline or workflow to run*.
 # In the search box, type in *Tutorial bwa mem cwl*.
@@ -30,3 +32,5 @@ h3. Steps
 # After the process starts running, you can track the progress by watching log messages from the component(s).  This page refreshes automatically.  You will see a <span class="label label-success">complete</span> label when the process completes successfully.
 # Click on the *Output* link to see the results of the process.  This will load a new page listing the output files from this process.  You'll see the output SAM file from the alignment tool under the *Files* tab.
 # Click on the <span class="btn btn-sm btn-info"><i class="fa fa-download"></i></span> download button to the right of the SAM file to download your results.
+
+notextile. </div>
index 6cd7ee1518d46dac2f95431f69b0bd8086b06ace..2f1f80ca0b428c0576bf454aa97a5341e8a8fa3a 100644 (file)
@@ -15,10 +15,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2. Developing workflows
 
-For an introduction and and detailed documentation about writing CWL, see the "CWL User Guide":http://commonwl.org/v1.0/UserGuide.html and the "CWL Specification":http://commonwl.org/v1.0 .
+For an introduction and and detailed documentation about writing CWL, see the "CWL User Guide":https://www.commonwl.org/user_guide and the "CWL Specification":http://commonwl.org/v1.0 .
 
 See "Best Practices for writing CWL":{{site.baseurl}}/user/cwl/cwl-style.html and "Arvados CWL Extensions":{{site.baseurl}}/user/cwl/cwl-extensions.html for additional information about using CWL on Arvados.
 
+h2. Using Composer
+
+You can create new workflows in the browser using "Arvados Composer":{{site.baseurl}}/user/composer/composer.html
+
 h2. Registering a workflow to use in Workbench
 
 Use @--create-workflow@ to register a CWL workflow with Arvados.  This enables you to share workflows with other Arvados users, and run them by clicking the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a process...</span> button on the Workbench Dashboard and on the command line by UUID.
diff --git a/lib/controller/federation.go b/lib/controller/federation.go
new file mode 100644 (file)
index 0000000..24b9250
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "database/sql"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "regexp"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+var wfRe = regexp.MustCompile(`^/arvados/v1/workflows/([0-9a-z]{5})-[^/]+$`)
+
+func (h *Handler) proxyRemoteCluster(w http.ResponseWriter, req *http.Request, next http.Handler) {
+       m := wfRe.FindStringSubmatch(req.URL.Path)
+       if len(m) < 2 || m[1] == h.Cluster.ClusterID {
+               next.ServeHTTP(w, req)
+               return
+       }
+       remoteID := m[1]
+       remote, ok := h.Cluster.RemoteClusters[remoteID]
+       if !ok {
+               httpserver.Error(w, "no proxy available for cluster "+remoteID, http.StatusNotFound)
+               return
+       }
+       scheme := remote.Scheme
+       if scheme == "" {
+               scheme = "https"
+       }
+       err := h.saltAuthToken(req, remoteID)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusBadRequest)
+               return
+       }
+       urlOut := &url.URL{
+               Scheme:   scheme,
+               Host:     remote.Host,
+               Path:     req.URL.Path,
+               RawPath:  req.URL.RawPath,
+               RawQuery: req.URL.RawQuery,
+       }
+       client := h.secureClient
+       if remote.Insecure {
+               client = h.insecureClient
+       }
+       h.proxy.Do(w, req, urlOut, client)
+}
+
+// Extract the auth token supplied in req, and replace it with a
+// salted token for the remote cluster.
+func (h *Handler) saltAuthToken(req *http.Request, remote string) error {
+       creds := auth.NewCredentials()
+       creds.LoadTokensFromHTTPRequest(req)
+       if len(creds.Tokens) == 0 && req.Header.Get("Content-Type") == "application/x-www-form-encoded" {
+               // Override ParseForm's 10MiB limit by ensuring
+               // req.Body is a *http.maxBytesReader.
+               req.Body = http.MaxBytesReader(nil, req.Body, 1<<28) // 256MiB. TODO: use MaxRequestSize from discovery doc or config.
+               if err := creds.LoadTokensFromHTTPRequestBody(req); err != nil {
+                       return err
+               }
+               // Replace req.Body with a buffer that re-encodes the
+               // form without api_token, in case we end up
+               // forwarding the request.
+               if req.PostForm != nil {
+                       req.PostForm.Del("api_token")
+               }
+               req.Body = ioutil.NopCloser(bytes.NewBufferString(req.PostForm.Encode()))
+       }
+       if len(creds.Tokens) == 0 {
+               return nil
+       }
+       token, err := auth.SaltToken(creds.Tokens[0], remote)
+       if err == auth.ErrObsoleteToken {
+               // If the token exists in our own database, salt it
+               // for the remote. Otherwise, assume it was issued by
+               // the remote, and pass it through unmodified.
+               db, err := h.db(req)
+               if err != nil {
+                       return err
+               }
+               aca := arvados.APIClientAuthorization{APIToken: creds.Tokens[0]}
+               err = db.QueryRowContext(req.Context(), `SELECT uuid FROM api_client_authorizations WHERE api_token=$1 AND (expires_at IS NULL OR expires_at > current_timestamp) LIMIT 1`, aca.APIToken).Scan(&aca.UUID)
+               if err == sql.ErrNoRows {
+                       // Not ours; pass through unmodified.
+                       token = aca.APIToken
+               } else if err != nil {
+                       return err
+               } else {
+                       // Found; make V2 version and salt it.
+                       token, err = auth.SaltToken(aca.TokenV2(), remote)
+                       if err != nil {
+                               return err
+                       }
+               }
+       } else if err != nil {
+               return err
+       }
+       req.Header.Set("Authorization", "Bearer "+token)
+
+       // Remove api_token=... from the the query string, in case we
+       // end up forwarding the request.
+       if values, err := url.ParseQuery(req.URL.RawQuery); err != nil {
+               return err
+       } else if _, ok := values["api_token"]; ok {
+               delete(values, "api_token")
+               req.URL.RawQuery = values.Encode()
+       }
+       return nil
+}
diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go
new file mode 100644 (file)
index 0000000..2682092
--- /dev/null
@@ -0,0 +1,301 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "encoding/json"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/Sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = check.Suite(&FederationSuite{})
+
+type FederationSuite struct {
+       log *logrus.Logger
+       // testServer and testHandler are the controller being tested,
+       // "zhome".
+       testServer  *httpserver.Server
+       testHandler *Handler
+       // remoteServer ("zzzzz") forwards requests to the Rails API
+       // provided by the integration test environment.
+       remoteServer *httpserver.Server
+       // remoteMock ("zmock") appends each incoming request to
+       // remoteMockRequests, and returns an empty 200 response.
+       remoteMock         *httpserver.Server
+       remoteMockRequests []http.Request
+}
+
+func (s *FederationSuite) SetUpTest(c *check.C) {
+       s.log = logrus.New()
+       s.log.Formatter = &logrus.JSONFormatter{}
+       s.log.Out = &logWriter{c.Log}
+
+       s.remoteServer = newServerFromIntegrationTestEnv(c)
+       c.Assert(s.remoteServer.Start(), check.IsNil)
+
+       s.remoteMock = newServerFromIntegrationTestEnv(c)
+       s.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)
+       c.Assert(s.remoteMock.Start(), check.IsNil)
+
+       nodeProfile := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI:   arvados.SystemServiceInstance{Listen: ":1"}, // local reqs will error "connection refused"
+       }
+       s.testHandler = &Handler{Cluster: &arvados.Cluster{
+               ClusterID:  "zhome",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": nodeProfile,
+               },
+       }, NodeProfile: &nodeProfile}
+       s.testServer = newServerFromIntegrationTestEnv(c)
+       s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.log, s.testHandler))
+
+       s.testHandler.Cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+               "zzzzz": {
+                       Host:   s.remoteServer.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+               "zmock": {
+                       Host:   s.remoteMock.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+       }
+
+       c.Assert(s.testServer.Start(), check.IsNil)
+
+       s.remoteMockRequests = nil
+}
+
+func (s *FederationSuite) remoteMockHandler(w http.ResponseWriter, req *http.Request) {
+       s.remoteMockRequests = append(s.remoteMockRequests, *req)
+}
+
+func (s *FederationSuite) TearDownTest(c *check.C) {
+       if s.remoteServer != nil {
+               s.remoteServer.Close()
+       }
+       if s.testServer != nil {
+               s.testServer.Close()
+       }
+}
+
+func (s *FederationSuite) testRequest(req *http.Request) *http.Response {
+       resp := httptest.NewRecorder()
+       s.testServer.Server.Handler.ServeHTTP(resp, req)
+       return resp.Result()
+}
+
+func (s *FederationSuite) TestLocalRequest(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zhome-", 1), nil)
+       resp := s.testRequest(req)
+       s.checkHandledLocally(c, resp)
+}
+
+func (s *FederationSuite) checkHandledLocally(c *check.C, resp *http.Response) {
+       // Our "home" controller can't handle local requests because
+       // it doesn't have its own stub/test Rails API, so we rely on
+       // "connection refused" to indicate the controller tried to
+       // proxy the request to its local Rails API.
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
+       s.checkJSONErrorMatches(c, resp, `.*connection refused`)
+}
+
+func (s *FederationSuite) TestNoAuth(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+       s.checkJSONErrorMatches(c, resp, `Not logged in`)
+}
+
+func (s *FederationSuite) TestBadAuth(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+       s.checkJSONErrorMatches(c, resp, `Not logged in`)
+}
+
+func (s *FederationSuite) TestNoAccess(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.SpectatorToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       s.checkJSONErrorMatches(c, resp, `.*not found`)
+}
+
+func (s *FederationSuite) TestGetUnknownRemote(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zz404-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       s.checkJSONErrorMatches(c, resp, `.*no proxy available for cluster zz404`)
+}
+
+func (s *FederationSuite) TestRemoteError(c *check.C) {
+       rc := s.testHandler.Cluster.RemoteClusters["zzzzz"]
+       rc.Scheme = "https"
+       s.testHandler.Cluster.RemoteClusters["zzzzz"] = rc
+
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
+       s.checkJSONErrorMatches(c, resp, `.*HTTP response to HTTPS client`)
+}
+
+func (s *FederationSuite) TestGetRemoteWorkflow(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var wf arvados.Workflow
+       c.Check(json.NewDecoder(resp.Body).Decode(&wf), check.IsNil)
+       c.Check(wf.UUID, check.Equals, arvadostest.WorkflowWithDefinitionYAMLUUID)
+       c.Check(wf.OwnerUUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *FederationSuite) TestOptionsMethod(c *check.C) {
+       req := httptest.NewRequest("OPTIONS", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Origin", "https://example.com")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       body, err := ioutil.ReadAll(resp.Body)
+       c.Check(err, check.IsNil)
+       c.Check(string(body), check.Equals, "")
+       c.Check(resp.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
+       for _, hdr := range []string{"Authorization", "Content-Type"} {
+               c.Check(resp.Header.Get("Access-Control-Allow-Headers"), check.Matches, ".*"+hdr+".*")
+       }
+       for _, method := range []string{"GET", "HEAD", "PUT", "POST", "DELETE"} {
+               c.Check(resp.Header.Get("Access-Control-Allow-Methods"), check.Matches, ".*"+method+".*")
+       }
+}
+
+func (s *FederationSuite) TestRemoteWithTokenInQuery(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1)+"?api_token="+arvadostest.ActiveToken, nil)
+       s.testRequest(req)
+       c.Assert(len(s.remoteMockRequests), check.Equals, 1)
+       pr := s.remoteMockRequests[0]
+       // Token is salted and moved from query to Authorization header.
+       c.Check(pr.URL.String(), check.Not(check.Matches), `.*api_token=.*`)
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+}
+
+func (s *FederationSuite) TestLocalTokenSalted(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       s.testRequest(req)
+       c.Assert(len(s.remoteMockRequests), check.Equals, 1)
+       pr := s.remoteMockRequests[0]
+       // The salted token here has a "zzzzz-" UUID instead of a
+       // "ztest-" UUID because ztest's local database has the
+       // "zzzzz-" test fixtures. The "secret" part is HMAC(sha1,
+       // arvadostest.ActiveToken, "zmock") = "7fd3...".
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+}
+
+func (s *FederationSuite) TestRemoteTokenNotSalted(c *check.C) {
+       // remoteToken can be any v1 token that doesn't appear in
+       // ztest's local db.
+       remoteToken := "abcdef00000000000000000000000000000000000000000000"
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+remoteToken)
+       s.testRequest(req)
+       c.Assert(len(s.remoteMockRequests), check.Equals, 1)
+       pr := s.remoteMockRequests[0]
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer "+remoteToken)
+}
+
+func (s *FederationSuite) TestWorkflowCRUD(c *check.C) {
+       wf := arvados.Workflow{
+               Description: "TestCRUD",
+       }
+       {
+               body := &strings.Builder{}
+               json.NewEncoder(body).Encode(&wf)
+               req := httptest.NewRequest("POST", "/arvados/v1/workflows", strings.NewReader(url.Values{
+                       "workflow": {body.String()},
+               }.Encode()))
+               req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               rec := httptest.NewRecorder()
+               s.remoteServer.Server.Handler.ServeHTTP(rec, req) // direct to remote -- can't proxy a create req because no uuid
+               resp := rec.Result()
+               s.checkResponseOK(c, resp)
+               json.NewDecoder(resp.Body).Decode(&wf)
+
+               defer func() {
+                       req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
+                       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+                       s.remoteServer.Server.Handler.ServeHTTP(httptest.NewRecorder(), req)
+               }()
+               c.Check(wf.UUID, check.Not(check.Equals), "")
+
+               c.Assert(wf.ModifiedAt, check.NotNil)
+               c.Logf("wf.ModifiedAt: %v", wf.ModifiedAt)
+               c.Check(time.Since(*wf.ModifiedAt) < time.Minute, check.Equals, true)
+       }
+       for _, method := range []string{"PATCH", "PUT", "POST"} {
+               form := url.Values{
+                       "workflow": {`{"description": "Updated with ` + method + `"}`},
+               }
+               if method == "POST" {
+                       form["_method"] = []string{"PATCH"}
+               }
+               req := httptest.NewRequest(method, "/arvados/v1/workflows/"+wf.UUID, strings.NewReader(form.Encode()))
+               req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               s.checkResponseOK(c, resp)
+               err := json.NewDecoder(resp.Body).Decode(&wf)
+               c.Check(err, check.IsNil)
+
+               c.Check(wf.Description, check.Equals, "Updated with "+method)
+       }
+       {
+               req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               s.checkResponseOK(c, resp)
+               err := json.NewDecoder(resp.Body).Decode(&wf)
+               c.Check(err, check.IsNil)
+       }
+       {
+               req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+wf.UUID, nil)
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       }
+}
+
+func (s *FederationSuite) checkResponseOK(c *check.C, resp *http.Response) {
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       if resp.StatusCode != http.StatusOK {
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Logf("... response body = %q, %v\n", body, err)
+       }
+}
+
+func (s *FederationSuite) checkJSONErrorMatches(c *check.C, resp *http.Response, re string) {
+       var jresp httpserver.ErrorResponse
+       err := json.NewDecoder(resp.Body).Decode(&jresp)
+       c.Check(err, check.IsNil)
+       c.Assert(len(jresp.Errors), check.Equals, 1)
+       c.Check(jresp.Errors[0], check.Matches, re)
+}
index 59c2f2a61d9d534f19cfb45ee2a34a38a4381808..25799aae9ea30d3116469e205c77f927c313172a 100644 (file)
@@ -5,8 +5,8 @@
 package controller
 
 import (
-       "context"
-       "io"
+       "database/sql"
+       "errors"
        "net"
        "net/http"
        "net/url"
@@ -17,113 +17,133 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/health"
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       _ "github.com/lib/pq"
 )
 
 type Handler struct {
        Cluster     *arvados.Cluster
        NodeProfile *arvados.NodeProfile
 
-       setupOnce    sync.Once
-       handlerStack http.Handler
-       proxyClient  *arvados.Client
+       setupOnce      sync.Once
+       handlerStack   http.Handler
+       proxy          *proxy
+       secureClient   *http.Client
+       insecureClient *http.Client
+       pgdb           *sql.DB
+       pgdbMtx        sync.Mutex
 }
 
 func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
        h.setupOnce.Do(h.setup)
+       if req.Method != "GET" && req.Method != "HEAD" {
+               // http.ServeMux returns 301 with a cleaned path if
+               // the incoming request has a double slash. Some
+               // clients (including the Go standard library) change
+               // the request method to GET when following a 301
+               // redirect if the original method was not HEAD
+               // (RFC7231 6.4.2 specifically allows this in the case
+               // of POST). Thus "POST //foo" gets misdirected to
+               // "GET /foo". To avoid this, eliminate double slashes
+               // before passing the request to ServeMux.
+               for strings.Contains(req.URL.Path, "//") {
+                       req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
+               }
+       }
        h.handlerStack.ServeHTTP(w, req)
 }
 
 func (h *Handler) CheckHealth() error {
        h.setupOnce.Do(h.setup)
-       _, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       _, _, err := findRailsAPI(h.Cluster, h.NodeProfile)
        return err
 }
 
+func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }
+
 func (h *Handler) setup() {
        mux := http.NewServeMux()
        mux.Handle("/_health/", &health.Handler{
                Token:  h.Cluster.ManagementToken,
                Prefix: "/_health/",
        })
-       mux.Handle("/", http.HandlerFunc(h.proxyRailsAPI))
+       hs := http.NotFoundHandler()
+       hs = prepend(hs, h.proxyRailsAPI)
+       hs = prepend(hs, h.proxyRemoteCluster)
+       mux.Handle("/", hs)
        h.handlerStack = mux
-}
 
-// headers that shouldn't be forwarded when proxying. See
-// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
-var dropHeaders = map[string]bool{
-       "Connection":          true,
-       "Keep-Alive":          true,
-       "Proxy-Authenticate":  true,
-       "Proxy-Authorization": true,
-       "TE":                true,
-       "Trailer":           true,
-       "Transfer-Encoding": true,
-       "Upgrade":           true,
-}
+       sc := *arvados.DefaultSecureClient
+       sc.Timeout = time.Duration(h.Cluster.HTTPRequestTimeout)
+       sc.CheckRedirect = neverRedirect
+       h.secureClient = &sc
 
-func (h *Handler) proxyRailsAPI(w http.ResponseWriter, reqIn *http.Request) {
-       urlOut, err := findRailsAPI(h.Cluster, h.NodeProfile)
-       if err != nil {
-               httpserver.Error(w, err.Error(), http.StatusInternalServerError)
-               return
+       ic := *arvados.InsecureHTTPClient
+       ic.Timeout = time.Duration(h.Cluster.HTTPRequestTimeout)
+       ic.CheckRedirect = neverRedirect
+       h.insecureClient = &ic
+
+       h.proxy = &proxy{
+               Name:           "arvados-controller",
+               RequestTimeout: time.Duration(h.Cluster.HTTPRequestTimeout),
        }
-       urlOut = &url.URL{
-               Scheme:   urlOut.Scheme,
-               Host:     urlOut.Host,
-               Path:     reqIn.URL.Path,
-               RawPath:  reqIn.URL.RawPath,
-               RawQuery: reqIn.URL.RawQuery,
+}
+
+var errDBConnection = errors.New("database connection error")
+
+func (h *Handler) db(req *http.Request) (*sql.DB, error) {
+       h.pgdbMtx.Lock()
+       defer h.pgdbMtx.Unlock()
+       if h.pgdb != nil {
+               return h.pgdb, nil
        }
 
-       // Copy headers from incoming request, then add/replace proxy
-       // headers like Via and X-Forwarded-For.
-       hdrOut := http.Header{}
-       for k, v := range reqIn.Header {
-               if !dropHeaders[k] {
-                       hdrOut[k] = v
-               }
+       db, err := sql.Open("postgres", h.Cluster.PostgreSQL.Connection.String())
+       if err != nil {
+               httpserver.Logger(req).WithError(err).Error("postgresql connect failed")
+               return nil, errDBConnection
        }
-       xff := reqIn.RemoteAddr
-       if xffIn := reqIn.Header.Get("X-Forwarded-For"); xffIn != "" {
-               xff = xffIn + "," + xff
+       if p := h.Cluster.PostgreSQL.ConnectionPool; p > 0 {
+               db.SetMaxOpenConns(p)
        }
-       hdrOut.Set("X-Forwarded-For", xff)
-       hdrOut.Add("Via", reqIn.Proto+" arvados-controller")
-
-       ctx := reqIn.Context()
-       if timeout := h.Cluster.HTTPRequestTimeout; timeout > 0 {
-               var cancel context.CancelFunc
-               ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Duration(timeout)))
-               defer cancel()
+       if err := db.Ping(); err != nil {
+               httpserver.Logger(req).WithError(err).Error("postgresql connect succeeded but ping failed")
+               return nil, errDBConnection
        }
+       h.pgdb = db
+       return db, nil
+}
 
-       reqOut := (&http.Request{
-               Method: reqIn.Method,
-               URL:    urlOut,
-               Header: hdrOut,
-               Body:   reqIn.Body,
-       }).WithContext(ctx)
-       resp, err := arvados.InsecureHTTPClient.Do(reqOut)
+type middlewareFunc func(http.ResponseWriter, *http.Request, http.Handler)
+
+func prepend(next http.Handler, middleware middlewareFunc) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               middleware(w, req, next)
+       })
+}
+
+func (h *Handler) proxyRailsAPI(w http.ResponseWriter, req *http.Request, next http.Handler) {
+       urlOut, insecure, err := findRailsAPI(h.Cluster, h.NodeProfile)
        if err != nil {
                httpserver.Error(w, err.Error(), http.StatusInternalServerError)
                return
        }
-       for k, v := range resp.Header {
-               for _, v := range v {
-                       w.Header().Add(k, v)
-               }
+       urlOut = &url.URL{
+               Scheme:   urlOut.Scheme,
+               Host:     urlOut.Host,
+               Path:     req.URL.Path,
+               RawPath:  req.URL.RawPath,
+               RawQuery: req.URL.RawQuery,
        }
-       w.WriteHeader(resp.StatusCode)
-       n, err := io.Copy(w, resp.Body)
-       if err != nil {
-               httpserver.Logger(reqIn).WithError(err).WithField("bytesCopied", n).Error("error copying response body")
+       client := h.secureClient
+       if insecure {
+               client = h.insecureClient
        }
+       h.proxy.Do(w, req, urlOut, client)
 }
 
 // For now, findRailsAPI always uses the rails API running on this
 // node.
-func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, error) {
+func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, bool, error) {
        hostport := np.RailsAPI.Listen
        if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
                // ":12345" => connect to indicated port on localhost
@@ -131,11 +151,12 @@ func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL,
        } else if _, _, err := net.SplitHostPort(hostport); err == nil {
                // "[::1]:12345" => connect to indicated address & port
        } else {
-               return nil, err
+               return nil, false, err
        }
        proto := "http"
        if np.RailsAPI.TLS {
                proto = "https"
        }
-       return url.Parse(proto + "://" + hostport)
+       url, err := url.Parse(proto + "://" + hostport)
+       return url, np.RailsAPI.Insecure, err
 }
index 981ad7ab91919c65327e972e6004b0eb15594352..963fd1159415e16d93e676401021e974c287ad69 100644 (file)
@@ -34,11 +34,12 @@ type HandlerSuite struct {
 
 func (s *HandlerSuite) SetUpTest(c *check.C) {
        s.cluster = &arvados.Cluster{
-               ClusterID: "zzzzz",
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
                NodeProfiles: map[string]arvados.NodeProfile{
                        "*": {
                                Controller: arvados.SystemServiceInstance{Listen: ":"},
-                               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true},
+                               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
                        },
                },
        }
@@ -65,12 +66,12 @@ func (s *HandlerSuite) TestRequestTimeout(c *check.C) {
        req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
        resp := httptest.NewRecorder()
        s.handler.ServeHTTP(resp, req)
-       c.Check(resp.Code, check.Equals, http.StatusInternalServerError)
+       c.Check(resp.Code, check.Equals, http.StatusBadGateway)
        var jresp httpserver.ErrorResponse
        err := json.Unmarshal(resp.Body.Bytes(), &jresp)
        c.Check(err, check.IsNil)
        c.Assert(len(jresp.Errors), check.Equals, 1)
-       c.Check(jresp.Errors[0], check.Matches, `.*context deadline exceeded`)
+       c.Check(jresp.Errors[0], check.Matches, `.*context deadline exceeded.*`)
 }
 
 func (s *HandlerSuite) TestProxyWithoutToken(c *check.C) {
@@ -101,6 +102,7 @@ func (s *HandlerSuite) TestProxyWithTokenInRequestBody(c *check.C) {
                "_method":   {"GET"},
                "api_token": {arvadostest.ActiveToken},
        }.Encode()))
+       req.Header.Set("Content-type", "application/x-www-form-urlencoded")
        resp := httptest.NewRecorder()
        s.handler.ServeHTTP(resp, req)
        c.Check(resp.Code, check.Equals, http.StatusOK)
@@ -120,3 +122,11 @@ func (s *HandlerSuite) TestProxyNotFound(c *check.C) {
        c.Check(err, check.IsNil)
        c.Check(jresp["errors"], check.FitsTypeOf, []interface{}{})
 }
+
+func (s *HandlerSuite) TestProxyRedirect(c *check.C) {
+       req := httptest.NewRequest("GET", "https://0.0.0.0:1/login?return_to=foo", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusFound)
+       c.Check(resp.Header().Get("Location"), check.Matches, `https://0.0.0.0:1/auth/joshid\?return_to=foo&?`)
+}
diff --git a/lib/controller/proxy.go b/lib/controller/proxy.go
new file mode 100644 (file)
index 0000000..712071b
--- /dev/null
@@ -0,0 +1,83 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+       "io"
+       "net/http"
+       "net/url"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type proxy struct {
+       Name           string // to use in Via header
+       RequestTimeout time.Duration
+}
+
+// headers that shouldn't be forwarded when proxying. See
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
+var dropHeaders = map[string]bool{
+       "Connection":          true,
+       "Keep-Alive":          true,
+       "Proxy-Authenticate":  true,
+       "Proxy-Authorization": true,
+       "TE":                true,
+       "Trailer":           true,
+       "Transfer-Encoding": true,
+       "Upgrade":           true,
+}
+
+func (p *proxy) Do(w http.ResponseWriter, reqIn *http.Request, urlOut *url.URL, client *http.Client) {
+       // Copy headers from incoming request, then add/replace proxy
+       // headers like Via and X-Forwarded-For.
+       hdrOut := http.Header{}
+       for k, v := range reqIn.Header {
+               if !dropHeaders[k] {
+                       hdrOut[k] = v
+               }
+       }
+       xff := reqIn.RemoteAddr
+       if xffIn := reqIn.Header.Get("X-Forwarded-For"); xffIn != "" {
+               xff = xffIn + "," + xff
+       }
+       hdrOut.Set("X-Forwarded-For", xff)
+       if hdrOut.Get("X-Forwarded-Proto") == "" {
+               hdrOut.Set("X-Forwarded-Proto", reqIn.URL.Scheme)
+       }
+       hdrOut.Add("Via", reqIn.Proto+" arvados-controller")
+
+       ctx := reqIn.Context()
+       if p.RequestTimeout > 0 {
+               var cancel context.CancelFunc
+               ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Duration(p.RequestTimeout)))
+               defer cancel()
+       }
+
+       reqOut := (&http.Request{
+               Method: reqIn.Method,
+               URL:    urlOut,
+               Host:   reqIn.Host,
+               Header: hdrOut,
+               Body:   reqIn.Body,
+       }).WithContext(ctx)
+       resp, err := client.Do(reqOut)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusBadGateway)
+               return
+       }
+       for k, v := range resp.Header {
+               for _, v := range v {
+                       w.Header().Add(k, v)
+               }
+       }
+       w.WriteHeader(resp.StatusCode)
+       n, err := io.Copy(w, resp.Body)
+       if err != nil {
+               httpserver.Logger(reqIn).WithError(err).WithField("bytesCopied", n).Error("error copying response body")
+       }
+}
diff --git a/lib/controller/server_test.go b/lib/controller/server_test.go
new file mode 100644 (file)
index 0000000..7742cf4
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "net/http"
+       "os"
+       "path/filepath"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/Sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// logWriter is an io.Writer that writes by calling a "write log"
+// function, typically (*check.C)Log().
+type logWriter struct {
+       logfunc func(...interface{})
+}
+
+func (tl *logWriter) Write(buf []byte) (int, error) {
+       tl.logfunc(string(bytes.TrimRight(buf, "\n")))
+       return len(buf), nil
+}
+
+func integrationTestCluster() *arvados.Cluster {
+       cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+       if err != nil {
+               panic(err)
+       }
+       cc, err := cfg.GetCluster("zzzzz")
+       if err != nil {
+               panic(err)
+       }
+       return cc
+}
+
+// Return a new unstarted controller server, using the Rails API
+// provided by the integration-testing environment.
+func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
+       log := logrus.New()
+       log.Formatter = &logrus.JSONFormatter{}
+       log.Out = &logWriter{c.Log}
+
+       nodeProfile := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
+       }
+       handler := &Handler{Cluster: &arvados.Cluster{
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": nodeProfile,
+               },
+       }, NodeProfile: &nodeProfile}
+
+       srv := &httpserver.Server{
+               Server: http.Server{
+                       Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+               },
+               Addr: nodeProfile.Controller.Listen,
+       }
+       return srv
+}
index 056ef0d185e61c7bbc52b692abd21ea61d9afdd4..8afe828196d9ea029e2f66a411b9e9f40225efee 100644 (file)
@@ -17,6 +17,7 @@ import (
        "os"
        "strconv"
        "strings"
+       "syscall"
        "time"
 )
 
@@ -52,13 +53,17 @@ type Reporter struct {
        // Interval between samples. Must be positive.
        PollPeriod time.Duration
 
+       // Temporary directory, will be monitored for available, used & total space.
+       TempDir string
+
        // Where to write statistics. Must not be nil.
        Logger *log.Logger
 
-       reportedStatFile map[string]string
-       lastNetSample    map[string]ioSample
-       lastDiskSample   map[string]ioSample
-       lastCPUSample    cpuSample
+       reportedStatFile    map[string]string
+       lastNetSample       map[string]ioSample
+       lastDiskIOSample    map[string]ioSample
+       lastCPUSample       cpuSample
+       lastDiskSpaceSample diskSpaceSample
 
        done    chan struct{} // closed when we should stop reporting
        flushed chan struct{} // closed when we have made our last report
@@ -216,14 +221,14 @@ func (r *Reporter) doBlkIOStats() {
                        continue
                }
                delta := ""
-               if prev, ok := r.lastDiskSample[dev]; ok {
+               if prev, ok := r.lastDiskIOSample[dev]; ok {
                        delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
                                sample.sampleTime.Sub(prev.sampleTime).Seconds(),
                                sample.txBytes-prev.txBytes,
                                sample.rxBytes-prev.rxBytes)
                }
                r.Logger.Printf("blkio:%s %d write %d read%s\n", dev, sample.txBytes, sample.rxBytes, delta)
-               r.lastDiskSample[dev] = sample
+               r.lastDiskIOSample[dev] = sample
        }
 }
 
@@ -302,6 +307,42 @@ func (r *Reporter) doNetworkStats() {
        }
 }
 
+type diskSpaceSample struct {
+       hasData    bool
+       sampleTime time.Time
+       total      uint64
+       used       uint64
+       available  uint64
+}
+
+func (r *Reporter) doDiskSpaceStats() {
+       s := syscall.Statfs_t{}
+       err := syscall.Statfs(r.TempDir, &s)
+       if err != nil {
+               return
+       }
+       bs := uint64(s.Bsize)
+       nextSample := diskSpaceSample{
+               hasData:    true,
+               sampleTime: time.Now(),
+               total:      s.Blocks * bs,
+               used:       (s.Blocks - s.Bfree) * bs,
+               available:  s.Bavail * bs,
+       }
+
+       var delta string
+       if r.lastDiskSpaceSample.hasData {
+               prev := r.lastDiskSpaceSample
+               interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
+               delta = fmt.Sprintf(" -- interval %.4f seconds %d used",
+                       interval,
+                       int64(nextSample.used-prev.used))
+       }
+       r.Logger.Printf("statfs %d available %d used %d total%s\n",
+               nextSample.available, nextSample.used, nextSample.total, delta)
+       r.lastDiskSpaceSample = nextSample
+}
+
 type cpuSample struct {
        hasData    bool // to distinguish the zero value from real data
        sampleTime time.Time
@@ -382,7 +423,15 @@ func (r *Reporter) run() {
        }
 
        r.lastNetSample = make(map[string]ioSample)
-       r.lastDiskSample = make(map[string]ioSample)
+       r.lastDiskIOSample = make(map[string]ioSample)
+
+       if len(r.TempDir) == 0 {
+               // Temporary dir not provided, try to get it from the environment.
+               r.TempDir = os.Getenv("TMPDIR")
+       }
+       if len(r.TempDir) > 0 {
+               r.Logger.Printf("notice: monitoring temp dir %s\n", r.TempDir)
+       }
 
        ticker := time.NewTicker(r.PollPeriod)
        for {
@@ -390,6 +439,7 @@ func (r *Reporter) run() {
                r.doCPUStats()
                r.doBlkIOStats()
                r.doNetworkStats()
+               r.doDiskSpaceStats()
                select {
                case <-r.done:
                        return
index 19cf8fbabb537eb346102891438067c779cec46b..878a70901452b47e2710a52be85504179767ea38 100644 (file)
@@ -1,7 +1,7 @@
 Package: ArvadosR
 Type: Package
 Title: Arvados R SDK
-Version: 0.0.2
+Version: 0.0.5
 Authors@R: person("Fuad", "Muhic", role = c("aut", "cre"), email = "fmuhic@capeannenterprises.com")
 Maintainer: Ward Vandewege <wvandewege@veritasgenetics.com>
 Description: This is the Arvados R SDK
index 0ec2d115295749067ceb4ee105245aad73df149f..744cb3c296163906be8be5858e0713e8d43aa44e 100644 (file)
@@ -1,7 +1,3 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
 #' users.get
 #' 
 #' users.get is a method defined in Arvados class.
@@ -108,6 +104,19 @@ NULL
 #' @name users.update_uuid
 NULL
 
+#' users.merge
+#' 
+#' users.merge is a method defined in Arvados class.
+#' 
+#' @usage arv$users.merge(new_owner_uuid,
+#'     new_user_token, redirect_to_new_user = NULL)
+#' @param new_owner_uuid 
+#' @param new_user_token 
+#' @param redirect_to_new_user 
+#' @return User object.
+#' @name users.merge
+NULL
+
 #' users.list
 #' 
 #' users.list is a method defined in Arvados class.
@@ -390,55 +399,55 @@ NULL
 #' @name api_clients.list
 NULL
 
-#' authorized_keys.get
+#' container_requests.get
 #' 
-#' authorized_keys.get is a method defined in Arvados class.
+#' container_requests.get is a method defined in Arvados class.
 #' 
-#' @usage arv$authorized_keys.get(uuid)
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.get
+#' @usage arv$container_requests.get(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.get
 NULL
 
-#' authorized_keys.create
+#' container_requests.create
 #' 
-#' authorized_keys.create is a method defined in Arvados class.
+#' container_requests.create is a method defined in Arvados class.
 #' 
-#' @usage arv$authorized_keys.create(authorizedkey,
+#' @usage arv$container_requests.create(containerrequest,
 #'     ensure_unique_name = "false")
-#' @param authorizedKey AuthorizedKey object.
+#' @param containerRequest ContainerRequest object.
 #' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.create
+#' @return ContainerRequest object.
+#' @name container_requests.create
 NULL
 
-#' authorized_keys.update
+#' container_requests.update
 #' 
-#' authorized_keys.update is a method defined in Arvados class.
+#' container_requests.update is a method defined in Arvados class.
 #' 
-#' @usage arv$authorized_keys.update(authorizedkey,
+#' @usage arv$container_requests.update(containerrequest,
 #'     uuid)
-#' @param authorizedKey AuthorizedKey object.
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.update
+#' @param containerRequest ContainerRequest object.
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.update
 NULL
 
-#' authorized_keys.delete
+#' container_requests.delete
 #' 
-#' authorized_keys.delete is a method defined in Arvados class.
+#' container_requests.delete is a method defined in Arvados class.
 #' 
-#' @usage arv$authorized_keys.delete(uuid)
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.delete
+#' @usage arv$container_requests.delete(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.delete
 NULL
 
-#' authorized_keys.list
+#' container_requests.list
 #' 
-#' authorized_keys.list is a method defined in Arvados class.
+#' container_requests.list is a method defined in Arvados class.
 #' 
-#' @usage arv$authorized_keys.list(filters = NULL,
+#' @usage arv$container_requests.list(filters = NULL,
 #'     where = NULL, order = NULL, select = NULL,
 #'     distinct = NULL, limit = "100", offset = "0",
 #'     count = "exact")
@@ -450,59 +459,59 @@ NULL
 #' @param limit 
 #' @param offset 
 #' @param count 
-#' @return AuthorizedKeyList object.
-#' @name authorized_keys.list
+#' @return ContainerRequestList object.
+#' @name container_requests.list
 NULL
 
-#' container_requests.get
+#' authorized_keys.get
 #' 
-#' container_requests.get is a method defined in Arvados class.
+#' authorized_keys.get is a method defined in Arvados class.
 #' 
-#' @usage arv$container_requests.get(uuid)
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.get
+#' @usage arv$authorized_keys.get(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.get
 NULL
 
-#' container_requests.create
+#' authorized_keys.create
 #' 
-#' container_requests.create is a method defined in Arvados class.
+#' authorized_keys.create is a method defined in Arvados class.
 #' 
-#' @usage arv$container_requests.create(containerrequest,
+#' @usage arv$authorized_keys.create(authorizedkey,
 #'     ensure_unique_name = "false")
-#' @param containerRequest ContainerRequest object.
+#' @param authorizedKey AuthorizedKey object.
 #' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @return ContainerRequest object.
-#' @name container_requests.create
+#' @return AuthorizedKey object.
+#' @name authorized_keys.create
 NULL
 
-#' container_requests.update
+#' authorized_keys.update
 #' 
-#' container_requests.update is a method defined in Arvados class.
+#' authorized_keys.update is a method defined in Arvados class.
 #' 
-#' @usage arv$container_requests.update(containerrequest,
+#' @usage arv$authorized_keys.update(authorizedkey,
 #'     uuid)
-#' @param containerRequest ContainerRequest object.
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.update
+#' @param authorizedKey AuthorizedKey object.
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.update
 NULL
 
-#' container_requests.delete
+#' authorized_keys.delete
 #' 
-#' container_requests.delete is a method defined in Arvados class.
+#' authorized_keys.delete is a method defined in Arvados class.
 #' 
-#' @usage arv$container_requests.delete(uuid)
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.delete
+#' @usage arv$authorized_keys.delete(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.delete
 NULL
 
-#' container_requests.list
+#' authorized_keys.list
 #' 
-#' container_requests.list is a method defined in Arvados class.
+#' authorized_keys.list is a method defined in Arvados class.
 #' 
-#' @usage arv$container_requests.list(filters = NULL,
+#' @usage arv$authorized_keys.list(filters = NULL,
 #'     where = NULL, order = NULL, select = NULL,
 #'     distinct = NULL, limit = "100", offset = "0",
 #'     count = "exact")
@@ -514,8 +523,8 @@ NULL
 #' @param limit 
 #' @param offset 
 #' @param count 
-#' @return ContainerRequestList object.
-#' @name container_requests.list
+#' @return AuthorizedKeyList object.
+#' @name authorized_keys.list
 NULL
 
 #' collections.get
@@ -747,78 +756,6 @@ NULL
 #' @name job_tasks.list
 NULL
 
-#' links.get
-#' 
-#' links.get is a method defined in Arvados class.
-#' 
-#' @usage arv$links.get(uuid)
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.get
-NULL
-
-#' links.create
-#' 
-#' links.create is a method defined in Arvados class.
-#' 
-#' @usage arv$links.create(link, ensure_unique_name = "false")
-#' @param link Link object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @return Link object.
-#' @name links.create
-NULL
-
-#' links.update
-#' 
-#' links.update is a method defined in Arvados class.
-#' 
-#' @usage arv$links.update(link, uuid)
-#' @param link Link object.
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.update
-NULL
-
-#' links.delete
-#' 
-#' links.delete is a method defined in Arvados class.
-#' 
-#' @usage arv$links.delete(uuid)
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.delete
-NULL
-
-#' links.list
-#' 
-#' links.list is a method defined in Arvados class.
-#' 
-#' @usage arv$links.list(filters = NULL,
-#'     where = NULL, order = NULL, select = NULL,
-#'     distinct = NULL, limit = "100", offset = "0",
-#'     count = "exact")
-#' @param filters 
-#' @param where 
-#' @param order 
-#' @param select 
-#' @param distinct 
-#' @param limit 
-#' @param offset 
-#' @param count 
-#' @return LinkList object.
-#' @name links.list
-NULL
-
-#' links.get_permissions
-#' 
-#' links.get_permissions is a method defined in Arvados class.
-#' 
-#' @usage arv$links.get_permissions(uuid)
-#' @param uuid 
-#' @return Link object.
-#' @name links.get_permissions
-NULL
-
 #' jobs.get
 #' 
 #' jobs.get is a method defined in Arvados class.
@@ -1017,53 +954,201 @@ NULL
 #' @name keep_disks.list
 NULL
 
-#' keep_services.get
+#' nodes.get
 #' 
-#' keep_services.get is a method defined in Arvados class.
+#' nodes.get is a method defined in Arvados class.
 #' 
-#' @usage arv$keep_services.get(uuid)
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.get
+#' @usage arv$nodes.get(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.get
 NULL
 
-#' keep_services.create
+#' nodes.create
 #' 
-#' keep_services.create is a method defined in Arvados class.
+#' nodes.create is a method defined in Arvados class.
 #' 
-#' @usage arv$keep_services.create(keepservice,
-#'     ensure_unique_name = "false")
-#' @param keepService KeepService object.
+#' @usage arv$nodes.create(node, ensure_unique_name = "false",
+#'     assign_slot = NULL)
+#' @param node Node object.
 #' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @return KeepService object.
-#' @name keep_services.create
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.create
 NULL
 
-#' keep_services.update
+#' nodes.update
 #' 
-#' keep_services.update is a method defined in Arvados class.
+#' nodes.update is a method defined in Arvados class.
 #' 
-#' @usage arv$keep_services.update(keepservice,
-#'     uuid)
-#' @param keepService KeepService object.
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.update
+#' @usage arv$nodes.update(node, uuid, assign_slot = NULL)
+#' @param node Node object.
+#' @param uuid The UUID of the Node in question.
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.update
 NULL
 
-#' keep_services.delete
+#' nodes.delete
 #' 
-#' keep_services.delete is a method defined in Arvados class.
+#' nodes.delete is a method defined in Arvados class.
 #' 
-#' @usage arv$keep_services.delete(uuid)
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.delete
+#' @usage arv$nodes.delete(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.delete
 NULL
 
-#' keep_services.accessible
+#' nodes.ping
 #' 
-#' keep_services.accessible is a method defined in Arvados class.
+#' nodes.ping is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.ping(uuid, ping_secret)
+#' @param uuid 
+#' @param ping_secret 
+#' @return Node object.
+#' @name nodes.ping
+NULL
+
+#' nodes.list
+#' 
+#' nodes.list is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return NodeList object.
+#' @name nodes.list
+NULL
+
+#' links.get
+#' 
+#' links.get is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.get
+NULL
+
+#' links.create
+#' 
+#' links.create is a method defined in Arvados class.
+#' 
+#' @usage arv$links.create(link, ensure_unique_name = "false")
+#' @param link Link object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Link object.
+#' @name links.create
+NULL
+
+#' links.update
+#' 
+#' links.update is a method defined in Arvados class.
+#' 
+#' @usage arv$links.update(link, uuid)
+#' @param link Link object.
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.update
+NULL
+
+#' links.delete
+#' 
+#' links.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$links.delete(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.delete
+NULL
+
+#' links.list
+#' 
+#' links.list is a method defined in Arvados class.
+#' 
+#' @usage arv$links.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return LinkList object.
+#' @name links.list
+NULL
+
+#' links.get_permissions
+#' 
+#' links.get_permissions is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get_permissions(uuid)
+#' @param uuid 
+#' @return Link object.
+#' @name links.get_permissions
+NULL
+
+#' keep_services.get
+#' 
+#' keep_services.get is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.get(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.get
+NULL
+
+#' keep_services.create
+#' 
+#' keep_services.create is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.create(keepservice,
+#'     ensure_unique_name = "false")
+#' @param keepService KeepService object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return KeepService object.
+#' @name keep_services.create
+NULL
+
+#' keep_services.update
+#' 
+#' keep_services.update is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.update(keepservice,
+#'     uuid)
+#' @param keepService KeepService object.
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.update
+NULL
+
+#' keep_services.delete
+#' 
+#' keep_services.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.delete(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.delete
+NULL
+
+#' keep_services.accessible
+#' 
+#' keep_services.accessible is a method defined in Arvados class.
 #' 
 #' @usage arv$keep_services.accessible(NULL)
 #' @return KeepService object.
@@ -1228,82 +1313,6 @@ NULL
 #' @name pipeline_instances.list
 NULL
 
-#' nodes.get
-#' 
-#' nodes.get is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.get(uuid)
-#' @param uuid The UUID of the Node in question.
-#' @return Node object.
-#' @name nodes.get
-NULL
-
-#' nodes.create
-#' 
-#' nodes.create is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.create(node, ensure_unique_name = "false",
-#'     assign_slot = NULL)
-#' @param node Node object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param assign_slot assign slot and hostname
-#' @return Node object.
-#' @name nodes.create
-NULL
-
-#' nodes.update
-#' 
-#' nodes.update is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.update(node, uuid, assign_slot = NULL)
-#' @param node Node object.
-#' @param uuid The UUID of the Node in question.
-#' @param assign_slot assign slot and hostname
-#' @return Node object.
-#' @name nodes.update
-NULL
-
-#' nodes.delete
-#' 
-#' nodes.delete is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.delete(uuid)
-#' @param uuid The UUID of the Node in question.
-#' @return Node object.
-#' @name nodes.delete
-NULL
-
-#' nodes.ping
-#' 
-#' nodes.ping is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.ping(uuid, ping_secret)
-#' @param uuid 
-#' @param ping_secret 
-#' @return Node object.
-#' @name nodes.ping
-NULL
-
-#' nodes.list
-#' 
-#' nodes.list is a method defined in Arvados class.
-#' 
-#' @usage arv$nodes.list(filters = NULL,
-#'     where = NULL, order = NULL, select = NULL,
-#'     distinct = NULL, limit = "100", offset = "0",
-#'     count = "exact")
-#' @param filters 
-#' @param where 
-#' @param order 
-#' @param select 
-#' @param distinct 
-#' @param limit 
-#' @param offset 
-#' @param count 
-#' @return NodeList object.
-#' @name nodes.list
-NULL
-
 #' repositories.get
 #' 
 #' repositories.get is a method defined in Arvados class.
@@ -2130,6 +2139,7 @@ NULL
 #'     \item{}{\code{\link{users.delete}}}
 #'     \item{}{\code{\link{users.get}}}
 #'     \item{}{\code{\link{users.list}}}
+#'     \item{}{\code{\link{users.merge}}}
 #'     \item{}{\code{\link{users.setup}}}
 #'     \item{}{\code{\link{users.system}}}
 #'     \item{}{\code{\link{users.unsetup}}}
@@ -2444,6 +2454,28 @@ Arvados <- R6::R6Class(
                        resource
                },
 
+               users.merge = function(new_owner_uuid, new_user_token,
+                       redirect_to_new_user = NULL)
+               {
+                       endPoint <- stringr::str_interp("users/merge")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(new_owner_uuid = new_owner_uuid,
+                                                         new_user_token = new_user_token, redirect_to_new_user = redirect_to_new_user)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
                users.list = function(filters = NULL, where = NULL,
                        order = NULL, select = NULL, distinct = NULL,
                        limit = "100", offset = "0", count = "exact")
@@ -2949,9 +2981,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               authorized_keys.get = function(uuid)
+               container_requests.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -2969,17 +3001,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               authorized_keys.create = function(authorizedkey,
+               container_requests.create = function(containerrequest,
                        ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("authorized_keys")
+                       endPoint <- stringr::str_interp("container_requests")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(authorizedkey) > 0)
-                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -2994,16 +3026,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               authorized_keys.update = function(authorizedkey, uuid)
+               container_requests.update = function(containerrequest, uuid)
                {
-                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       if(length(authorizedkey) > 0)
-                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3018,9 +3050,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               authorized_keys.delete = function(uuid)
+               container_requests.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3038,12 +3070,12 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               authorized_keys.list = function(filters = NULL,
+               container_requests.list = function(filters = NULL,
                        where = NULL, order = NULL, select = NULL,
                        distinct = NULL, limit = "100", offset = "0",
                        count = "exact")
                {
-                       endPoint <- stringr::str_interp("authorized_keys")
+                       endPoint <- stringr::str_interp("container_requests")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3063,9 +3095,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               container_requests.get = function(uuid)
+               authorized_keys.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3083,17 +3115,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               container_requests.create = function(containerrequest,
+               authorized_keys.create = function(authorizedkey,
                        ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("container_requests")
+                       endPoint <- stringr::str_interp("authorized_keys")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(containerrequest) > 0)
-                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3108,16 +3140,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               container_requests.update = function(containerrequest, uuid)
+               authorized_keys.update = function(authorizedkey, uuid)
                {
-                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       if(length(containerrequest) > 0)
-                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3132,9 +3164,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               container_requests.delete = function(uuid)
+               authorized_keys.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3152,12 +3184,12 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               container_requests.list = function(filters = NULL,
+               authorized_keys.list = function(filters = NULL,
                        where = NULL, order = NULL, select = NULL,
                        distinct = NULL, limit = "100", offset = "0",
                        count = "exact")
                {
-                       endPoint <- stringr::str_interp("container_requests")
+                       endPoint <- stringr::str_interp("authorized_keys")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3596,9 +3628,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.get = function(uuid)
+               jobs.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("links/${uuid}")
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3616,16 +3648,21 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.create = function(link, ensure_unique_name = "false")
+               jobs.create = function(job, ensure_unique_name = "false",
+                       find_or_create = "false", filters = NULL,
+                       minimum_script_version = NULL, exclude_script_versions = NULL)
                {
-                       endPoint <- stringr::str_interp("links")
+                       endPoint <- stringr::str_interp("jobs")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         find_or_create = find_or_create, filters = filters,
+                                                         minimum_script_version = minimum_script_version,
+                                                         exclude_script_versions = exclude_script_versions)
                        
-                       if(length(link) > 0)
-                               body <- jsonlite::toJSON(list(link = link), 
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3640,16 +3677,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.update = function(link, uuid)
+               jobs.update = function(job, uuid)
                {
-                       endPoint <- stringr::str_interp("links/${uuid}")
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       if(length(link) > 0)
-                               body <- jsonlite::toJSON(list(link = link), 
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3664,9 +3701,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.delete = function(uuid)
+               jobs.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("links/${uuid}")
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3684,11 +3721,11 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.list = function(filters = NULL, where = NULL,
+               jobs.queue = function(filters = NULL, where = NULL,
                        order = NULL, select = NULL, distinct = NULL,
                        limit = "100", offset = "0", count = "exact")
                {
-                       endPoint <- stringr::str_interp("links")
+                       endPoint <- stringr::str_interp("jobs/queue")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3708,9 +3745,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               links.get_permissions = function(uuid)
+               jobs.queue_size = function()
                {
-                       endPoint <- stringr::str_interp("permissions/${uuid}")
+                       endPoint <- stringr::str_interp("jobs/queue_size")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3728,9 +3765,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.get = function(uuid)
+               jobs.cancel = function(uuid)
                {
-                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       endPoint <- stringr::str_interp("jobs/${uuid}/cancel")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3738,7 +3775,7 @@ Arvados <- R6::R6Class(
                        
                        body <- NULL
                        
-                       response <- private$REST$http$exec("GET", url, headers, body,
+                       response <- private$REST$http$exec("POST", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3748,24 +3785,15 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.create = function(job, ensure_unique_name = "false",
-                       find_or_create = "false", filters = NULL,
-                       minimum_script_version = NULL, exclude_script_versions = NULL)
+               jobs.lock = function(uuid)
                {
-                       endPoint <- stringr::str_interp("jobs")
+                       endPoint <- stringr::str_interp("jobs/${uuid}/lock")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
-                                                         find_or_create = find_or_create, filters = filters,
-                                                         minimum_script_version = minimum_script_version,
-                                                         exclude_script_versions = exclude_script_versions)
+                       queryArgs <- NULL
                        
-                       if(length(job) > 0)
-                               body <- jsonlite::toJSON(list(job = job), 
-                                                        auto_unbox = TRUE)
-                       else
-                               body <- NULL
+                       body <- NULL
                        
                        response <- private$REST$http$exec("POST", url, headers, body,
                                                           queryArgs, private$numRetries)
@@ -3777,21 +3805,21 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.update = function(job, uuid)
+               jobs.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
                {
-                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       endPoint <- stringr::str_interp("jobs")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
                        
-                       if(length(job) > 0)
-                               body <- jsonlite::toJSON(list(job = job), 
-                                                        auto_unbox = TRUE)
-                       else
-                               body <- NULL
+                       body <- NULL
                        
-                       response <- private$REST$http$exec("PUT", url, headers, body,
+                       response <- private$REST$http$exec("GET", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3801,9 +3829,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.delete = function(uuid)
+               keep_disks.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3811,7 +3839,7 @@ Arvados <- R6::R6Class(
                        
                        body <- NULL
                        
-                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                       response <- private$REST$http$exec("GET", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3821,21 +3849,21 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.queue = function(filters = NULL, where = NULL,
-                       order = NULL, select = NULL, distinct = NULL,
-                       limit = "100", offset = "0", count = "exact")
+               keep_disks.create = function(keepdisk, ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("jobs/queue")
+                       endPoint <- stringr::str_interp("keep_disks")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(filters = filters, where = where,
-                                                         order = order, select = select, distinct = distinct,
-                                                         limit = limit, offset = offset, count = count)
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       body <- NULL
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
                        
-                       response <- private$REST$http$exec("GET", url, headers, body,
+                       response <- private$REST$http$exec("POST", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3845,17 +3873,21 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.queue_size = function()
+               keep_disks.update = function(keepdisk, uuid)
                {
-                       endPoint <- stringr::str_interp("jobs/queue_size")
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       body <- NULL
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
                        
-                       response <- private$REST$http$exec("GET", url, headers, body,
+                       response <- private$REST$http$exec("PUT", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3865,9 +3897,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.cancel = function(uuid)
+               keep_disks.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("jobs/${uuid}/cancel")
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3875,7 +3907,7 @@ Arvados <- R6::R6Class(
                        
                        body <- NULL
                        
-                       response <- private$REST$http$exec("POST", url, headers, body,
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -3885,13 +3917,18 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.lock = function(uuid)
+               keep_disks.ping = function(uuid = NULL, ping_secret,
+                       node_uuid = NULL, filesystem_uuid = NULL,
+                       service_host = NULL, service_port, service_ssl_flag)
                {
-                       endPoint <- stringr::str_interp("jobs/${uuid}/lock")
+                       endPoint <- stringr::str_interp("keep_disks/ping")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(uuid = uuid, ping_secret = ping_secret,
+                                                         node_uuid = node_uuid, filesystem_uuid = filesystem_uuid,
+                                                         service_host = service_host, service_port = service_port,
+                                                         service_ssl_flag = service_ssl_flag)
                        
                        body <- NULL
                        
@@ -3905,11 +3942,12 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               jobs.list = function(filters = NULL, where = NULL,
-                       order = NULL, select = NULL, distinct = NULL,
-                       limit = "100", offset = "0", count = "exact")
+               keep_disks.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
                {
-                       endPoint <- stringr::str_interp("jobs")
+                       endPoint <- stringr::str_interp("keep_disks")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3929,9 +3967,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.get = function(uuid)
+               nodes.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -3949,16 +3987,18 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.create = function(keepdisk, ensure_unique_name = "false")
+               nodes.create = function(node, ensure_unique_name = "false",
+                       assign_slot = NULL)
                {
-                       endPoint <- stringr::str_interp("keep_disks")
+                       endPoint <- stringr::str_interp("nodes")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         assign_slot = assign_slot)
                        
-                       if(length(keepdisk) > 0)
-                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3973,16 +4013,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.update = function(keepdisk, uuid)
+               nodes.update = function(node, uuid, assign_slot = NULL)
                {
-                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(assign_slot = assign_slot)
                        
-                       if(length(keepdisk) > 0)
-                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -3997,9 +4037,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.delete = function(uuid)
+               nodes.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4017,18 +4057,13 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.ping = function(uuid = NULL, ping_secret,
-                       node_uuid = NULL, filesystem_uuid = NULL,
-                       service_host = NULL, service_port, service_ssl_flag)
+               nodes.ping = function(uuid, ping_secret)
                {
-                       endPoint <- stringr::str_interp("keep_disks/ping")
+                       endPoint <- stringr::str_interp("nodes/${uuid}/ping")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(uuid = uuid, ping_secret = ping_secret,
-                                                         node_uuid = node_uuid, filesystem_uuid = filesystem_uuid,
-                                                         service_host = service_host, service_port = service_port,
-                                                         service_ssl_flag = service_ssl_flag)
+                       queryArgs <- list(ping_secret = ping_secret)
                        
                        body <- NULL
                        
@@ -4042,12 +4077,11 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_disks.list = function(filters = NULL,
-                       where = NULL, order = NULL, select = NULL,
-                       distinct = NULL, limit = "100", offset = "0",
-                       count = "exact")
+               nodes.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
                {
-                       endPoint <- stringr::str_interp("keep_disks")
+                       endPoint <- stringr::str_interp("nodes")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4067,9 +4101,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.get = function(uuid)
+               links.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       endPoint <- stringr::str_interp("links/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4087,17 +4121,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.create = function(keepservice,
-                       ensure_unique_name = "false")
+               links.create = function(link, ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("keep_services")
+                       endPoint <- stringr::str_interp("links")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(keepservice) > 0)
-                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4112,16 +4145,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.update = function(keepservice, uuid)
+               links.update = function(link, uuid)
                {
-                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       endPoint <- stringr::str_interp("links/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       if(length(keepservice) > 0)
-                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4136,9 +4169,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.delete = function(uuid)
+               links.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       endPoint <- stringr::str_interp("links/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4156,13 +4189,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.accessible = function()
+               links.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
                {
-                       endPoint <- stringr::str_interp("keep_services/accessible")
+                       endPoint <- stringr::str_interp("links")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
                        
                        body <- NULL
                        
@@ -4176,18 +4213,13 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               keep_services.list = function(filters = NULL,
-                       where = NULL, order = NULL, select = NULL,
-                       distinct = NULL, limit = "100", offset = "0",
-                       count = "exact")
+               links.get_permissions = function(uuid)
                {
-                       endPoint <- stringr::str_interp("keep_services")
+                       endPoint <- stringr::str_interp("permissions/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(filters = filters, where = where,
-                                                         order = order, select = select, distinct = distinct,
-                                                         limit = limit, offset = offset, count = count)
+                       queryArgs <- NULL
                        
                        body <- NULL
                        
@@ -4201,9 +4233,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_templates.get = function(uuid)
+               keep_services.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4221,17 +4253,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_templates.create = function(pipelinetemplate,
+               keep_services.create = function(keepservice,
                        ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("pipeline_templates")
+                       endPoint <- stringr::str_interp("keep_services")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(pipelinetemplate) > 0)
-                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4246,16 +4278,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_templates.update = function(pipelinetemplate, uuid)
+               keep_services.update = function(keepservice, uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       if(length(pipelinetemplate) > 0)
-                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4270,9 +4302,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_templates.delete = function(uuid)
+               keep_services.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4290,18 +4322,13 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_templates.list = function(filters = NULL,
-                       where = NULL, order = NULL, select = NULL,
-                       distinct = NULL, limit = "100", offset = "0",
-                       count = "exact")
+               keep_services.accessible = function()
                {
-                       endPoint <- stringr::str_interp("pipeline_templates")
+                       endPoint <- stringr::str_interp("keep_services/accessible")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(filters = filters, where = where,
-                                                         order = order, select = select, distinct = distinct,
-                                                         limit = limit, offset = offset, count = count)
+                       queryArgs <- NULL
                        
                        body <- NULL
                        
@@ -4315,13 +4342,18 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.get = function(uuid)
+               keep_services.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
                {
-                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       endPoint <- stringr::str_interp("keep_services")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
                        
                        body <- NULL
                        
@@ -4335,22 +4367,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.create = function(pipelineinstance,
-                       ensure_unique_name = "false")
+               pipeline_templates.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_instances")
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       queryArgs <- NULL
                        
-                       if(length(pipelineinstance) > 0)
-                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
-                                                        auto_unbox = TRUE)
-                       else
-                               body <- NULL
+                       body <- NULL
                        
-                       response <- private$REST$http$exec("POST", url, headers, body,
+                       response <- private$REST$http$exec("GET", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -4360,21 +4387,22 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.update = function(pipelineinstance, uuid)
+               pipeline_templates.create = function(pipelinetemplate,
+                       ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       endPoint <- stringr::str_interp("pipeline_templates")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- NULL
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(pipelineinstance) > 0)
-                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
                        
-                       response <- private$REST$http$exec("PUT", url, headers, body,
+                       response <- private$REST$http$exec("POST", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -4384,17 +4412,21 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.delete = function(uuid)
+               pipeline_templates.update = function(pipelinetemplate, uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
                        queryArgs <- NULL
                        
-                       body <- NULL
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
                        
-                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                       response <- private$REST$http$exec("PUT", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -4404,9 +4436,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.cancel = function(uuid)
+               pipeline_templates.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}/cancel")
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4414,7 +4446,7 @@ Arvados <- R6::R6Class(
                        
                        body <- NULL
                        
-                       response <- private$REST$http$exec("POST", url, headers, body,
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
                                                           queryArgs, private$numRetries)
                        resource <- private$REST$httpParser$parseJSONResponse(response)
                        
@@ -4424,12 +4456,12 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               pipeline_instances.list = function(filters = NULL,
+               pipeline_templates.list = function(filters = NULL,
                        where = NULL, order = NULL, select = NULL,
                        distinct = NULL, limit = "100", offset = "0",
                        count = "exact")
                {
-                       endPoint <- stringr::str_interp("pipeline_instances")
+                       endPoint <- stringr::str_interp("pipeline_templates")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4449,9 +4481,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.get = function(uuid)
+               pipeline_instances.get = function(uuid)
                {
-                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4469,18 +4501,17 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.create = function(node, ensure_unique_name = "false",
-                       assign_slot = NULL)
+               pipeline_instances.create = function(pipelineinstance,
+                       ensure_unique_name = "false")
                {
-                       endPoint <- stringr::str_interp("nodes")
+                       endPoint <- stringr::str_interp("pipeline_instances")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
-                                                         assign_slot = assign_slot)
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
                        
-                       if(length(node) > 0)
-                               body <- jsonlite::toJSON(list(node = node), 
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4495,16 +4526,16 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.update = function(node, uuid, assign_slot = NULL)
+               pipeline_instances.update = function(pipelineinstance, uuid)
                {
-                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(assign_slot = assign_slot)
+                       queryArgs <- NULL
                        
-                       if(length(node) > 0)
-                               body <- jsonlite::toJSON(list(node = node), 
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
                                                         auto_unbox = TRUE)
                        else
                                body <- NULL
@@ -4519,9 +4550,9 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.delete = function(uuid)
+               pipeline_instances.delete = function(uuid)
                {
-                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
@@ -4539,13 +4570,13 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.ping = function(uuid, ping_secret)
+               pipeline_instances.cancel = function(uuid)
                {
-                       endPoint <- stringr::str_interp("nodes/${uuid}/ping")
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}/cancel")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
-                       queryArgs <- list(ping_secret = ping_secret)
+                       queryArgs <- NULL
                        
                        body <- NULL
                        
@@ -4559,11 +4590,12 @@ Arvados <- R6::R6Class(
                        resource
                },
 
-               nodes.list = function(filters = NULL, where = NULL,
-                       order = NULL, select = NULL, distinct = NULL,
-                       limit = "100", offset = "0", count = "exact")
+               pipeline_instances.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
                {
-                       endPoint <- stringr::str_interp("nodes")
+                       endPoint <- stringr::str_interp("pipeline_instances")
                        url <- paste0(private$host, endPoint)
                        headers <- list(Authorization = paste("OAuth2", private$token), 
                                        "Content-Type" = "application/json")
index 8f737831c4634cc09a3121a86e04dcbf0361946b..70bb4450eccca6efd453002dc7a0962c904fb0d0 100644 (file)
@@ -5,9 +5,9 @@
 source("./R/util.R")
 
 #' ArvadosFile
-#' 
+#'
 #' ArvadosFile class represents a file inside Arvados collection.
-#' 
+#'
 #' @section Usage:
 #' \preformatted{file = ArvadosFile$new(name)}
 #'
@@ -15,7 +15,7 @@ source("./R/util.R")
 #' \describe{
 #'   \item{name}{Name of the file.}
 #' }
-#' 
+#'
 #' @section Methods:
 #' \describe{
 #'   \item{getName()}{Returns name of the file.}
@@ -26,7 +26,8 @@ source("./R/util.R")
 #'   \item{flush()}{Write connections content to a file (override current content of the file).}
 #'   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
 #'   \item{getSizeInBytes()}{Returns file size in bytes.}
-#'   \item{move(newLocation)}{Moves file to a new location inside collection.}
+#'   \item{move(destination)}{Moves file to a new location inside collection.}
+#'   \item{copy(destination)}{Copies file to a new location inside collection.}
 #' }
 #'
 #' @name ArvadosFile
@@ -37,7 +38,7 @@ source("./R/util.R")
 #' myFile$write("This is new file content")
 #' fileContent <- myFile$read()
 #' fileContent <- myFile$read("text")
-#' fileContent <- myFile$read("raw", offset = 8, length = 4) 
+#' fileContent <- myFile$read("raw", offset = 8, length = 4)
 #'
 #' #Write a table:
 #' arvConnection <- myFile$connection("w")
@@ -49,6 +50,7 @@ source("./R/util.R")
 #' mytable <- read.table(arvConnection)
 #'
 #' myFile$move("newFolder/myFile")
+#' myFile$copy("newFolder/myFile")
 #' }
 NULL
 
@@ -83,7 +85,6 @@ ArvadosFile <- R6::R6Class(
 
             fileSize <- REST$getResourceSize(self$getRelativePath(),
                                              private$collection$uuid)
-
             fileSize
         },
 
@@ -99,7 +100,7 @@ ArvadosFile <- R6::R6Class(
 
         getCollection = function() private$collection,
 
-        setCollection = function(collection)
+        setCollection = function(collection, setRecursively = TRUE)
         {
             private$collection <- collection
         },
@@ -141,14 +142,14 @@ ArvadosFile <- R6::R6Class(
 
         connection = function(rw)
         {
-            if (rw == "r" || rw == "rb") 
+            if (rw == "r" || rw == "rb")
             {
                 REST <- private$collection$getRESTService()
-                return(REST$getConnection(private$collection$uuid,
-                                          self$getRelativePath(),
+                return(REST$getConnection(self$getRelativePath(),
+                                          private$collection$uuid,
                                           rw))
             }
-            else if (rw == "w") 
+            else if (rw == "w")
             {
                 private$buffer <- textConnection(NULL, "w")
 
@@ -156,7 +157,7 @@ ArvadosFile <- R6::R6Class(
             }
         },
 
-        flush = function() 
+        flush = function()
         {
             v <- textConnectionValue(private$buffer)
             close(private$buffer)
@@ -176,20 +177,18 @@ ArvadosFile <- R6::R6Class(
             writeResult
         },
 
-        move = function(newLocation)
+        move = function(destination)
         {
             if(is.null(private$collection))
-                stop("ArvadosFile doesn't belong to any collection")
+                stop("ArvadosFile doesn't belong to any collection.")
 
-            newLocation <- trimFromEnd(newLocation, "/")
-            nameAndPath <- splitToPathAndName(newLocation)
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
 
             newParent <- private$collection$get(nameAndPath$path)
 
             if(is.null(newParent))
-            {
-                stop("Unable to get destination subcollection")
-            }
+                stop("Unable to get destination subcollection.")
 
             childWithSameName <- newParent$get(nameAndPath$name)
 
@@ -202,11 +201,50 @@ ArvadosFile <- R6::R6Class(
                       private$collection$uuid)
 
             private$dettachFromCurrentParent()
-            private$attachToNewParent(newParent)
+            private$attachToNewParent(self, newParent)
 
+            private$parent <- newParent
             private$name <- nameAndPath$name
 
-            "Content moved successfully."
+            self
+        },
+
+        copy = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$copy(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            newFile <- self$duplicate(nameAndPath$name)
+            newFile$setCollection(self$getCollection())
+            private$attachToNewParent(newFile, newParent)
+            newFile$setParent(newParent)
+
+            newFile
+        },
+
+        duplicate = function(newName = NULL)
+        {
+            name <- if(!is.null(newName)) newName else private$name
+            newFile <- ArvadosFile$new(name)
+            newFile
         }
     ),
 
@@ -218,30 +256,29 @@ ArvadosFile <- R6::R6Class(
         collection = NULL,
         buffer     = NULL,
 
-        attachToNewParent = function(newParent)
+        attachToNewParent = function(content, newParent)
         {
-            #Note: We temporary set parents collection to NULL. This will ensure that
-            #      add method doesn't post file on REST.
+            # We temporary set parents collection to NULL. This will ensure that
+            # add method doesn't post this file on REST.
+            # We also need to set content's collection to NULL because
+            # add method throws exception if we try to add content that already
+            # belongs to a collection.
             parentsCollection <- newParent$getCollection()
+            content$setCollection(NULL, setRecursively = FALSE)
             newParent$setCollection(NULL, setRecursively = FALSE)
-
-            newParent$add(self)
-
+            newParent$add(content)
+            content$setCollection(parentsCollection, setRecursively = FALSE)
             newParent$setCollection(parentsCollection, setRecursively = FALSE)
-
-            private$parent <- newParent
         },
 
         dettachFromCurrentParent = function()
         {
-            #Note: We temporary set parents collection to NULL. This will ensure that
-            #      remove method doesn't remove this subcollection from REST.
+            # We temporary set parents collection to NULL. This will ensure that
+            # remove method doesn't remove this file from REST.
             parent <- private$parent
             parentsCollection <- parent$getCollection()
             parent$setCollection(NULL, setRecursively = FALSE)
-
             parent$remove(private$name)
-
             parent$setCollection(parentsCollection, setRecursively = FALSE)
         }
     ),
@@ -255,7 +292,7 @@ ArvadosFile <- R6::R6Class(
 #'
 #' @param x Instance of ArvadosFile class
 #' @param ... Optional arguments.
-#' @export 
+#' @export
 print.ArvadosFile = function(x, ...)
 {
     collection   <- NULL
@@ -267,8 +304,8 @@ print.ArvadosFile = function(x, ...)
         relativePath <- paste0("/", relativePath)
     }
 
-    cat(paste0("Type:          ", "\"", "ArvadosFile",         "\""), sep = "\n")
-    cat(paste0("Name:          ", "\"", x$getName(),           "\""), sep = "\n")
-    cat(paste0("Relative path: ", "\"", relativePath,          "\""), sep = "\n")
-    cat(paste0("Collection:    ", "\"", collection,            "\""), sep = "\n")
+    cat(paste0("Type:          ", "\"", "ArvadosFile", "\""), sep = "\n")
+    cat(paste0("Name:          ", "\"", x$getName(),   "\""), sep = "\n")
+    cat(paste0("Relative path: ", "\"", relativePath,  "\""), sep = "\n")
+    cat(paste0("Collection:    ", "\"", collection,    "\""), sep = "\n")
 }
index e23da138329786cba49e3a8001479461dd30be77..8869d7be67846b449200fe2c675936dd1c4133db 100644 (file)
@@ -8,9 +8,9 @@ source("./R/RESTService.R")
 source("./R/util.R")
 
 #' Collection
-#' 
+#'
 #' Collection class provides interface for working with Arvados collections.
-#' 
+#'
 #' @section Usage:
 #' \preformatted{collection = Collection$new(arv, uuid)}
 #'
@@ -19,13 +19,14 @@ source("./R/util.R")
 #'   \item{arv}{Arvados object.}
 #'   \item{uuid}{UUID of a collection.}
 #' }
-#' 
+#'
 #' @section Methods:
 #' \describe{
 #'   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
-#'   \item{create(fileNames, relativePath = "")}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+#'   \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
 #'   \item{remove(fileNames)}{Remove one or more files from the collection.}
-#'   \item{move(content, newLocation)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+#'   \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+#'   \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
 #'   \item{getFileListing()}{Returns collections file content as character vector.}
 #'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
 #' }
@@ -36,9 +37,6 @@ source("./R/util.R")
 #' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
 #' collection <- Collection$new(arv, "uuid")
 #'
-#' newFile <- ArvadosFile$new("myFile")
-#' collection$add(newFile, "myFolder")
-#'
 #' createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
 #'
 #' collection$remove("location/to/my/file.cpp")
@@ -58,23 +56,17 @@ Collection <- R6::R6Class(
     public = list(
 
                uuid = NULL,
-        # api  = NULL,
 
-               initialize = function(api, uuid) 
+               initialize = function(api, uuid)
         {
-            # self$api <- api
             private$REST <- api$getRESTService()
-
             self$uuid <- uuid
-
-            private$fileContent <- private$REST$getCollectionContent(uuid)
-            private$tree <- CollectionTree$new(private$fileContent, self)
         },
 
         add = function(content, relativePath = "")
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
             if(relativePath == ""  ||
                relativePath == "." ||
@@ -94,6 +86,9 @@ Collection <- R6::R6Class(
             if("ArvadosFile"   %in% class(content) ||
                "Subcollection" %in% class(content))
             {
+                if(!is.null(content$getCollection()))
+                    stop("Content already belongs to a collection.")
+
                 if(content$getName() == "")
                     stop("Content has invalid name.")
 
@@ -108,50 +103,32 @@ Collection <- R6::R6Class(
             }
         },
 
-        create = function(fileNames, relativePath = "")
+        create = function(files)
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
-            if(relativePath == ""  ||
-               relativePath == "." ||
-               relativePath == "./")
+            if(is.character(files))
             {
-                subcollection <- private$tree$getTree()
-            }
-            else
-            {
-                relativePath  <- trimFromEnd(relativePath, "/") 
-                subcollection <- self$get(relativePath)
-            }
-
-            if(is.null(subcollection))
-                stop(paste("Subcollection", relativePath, "doesn't exist."))
-
-            if(is.character(fileNames))
-            {
-                arvadosFiles <- NULL
-                sapply(fileNames, function(fileName)
+                sapply(files, function(file)
                 {
-                    childWithSameName <- subcollection$get(fileName)
+                    childWithSameName <- self$get(file)
                     if(!is.null(childWithSameName))
                         stop("Destination already contains file with same name.")
 
-                    newFile <- ArvadosFile$new(fileName)
-                    subcollection$add(newFile)
+                    newTreeBranch <- private$tree$createBranch(file)
+                    private$tree$addBranch(private$tree$getTree(), newTreeBranch)
 
-                    arvadosFiles <<- c(arvadosFiles, newFile)
+                    private$REST$create(file, self$uuid)
+                    newTreeBranch$setCollection(self)
                 })
 
-                if(length(arvadosFiles) == 1)
-                    return(arvadosFiles[[1]])
-                else
-                    return(arvadosFiles)
+                "Created"
             }
-            else 
+            else
             {
                 stop(paste0("Expected character vector, got ",
-                            paste0("(", paste0(class(fileNames), collapse = ", "), ")"),
+                            paste0("(", paste0(class(files), collapse = ", "), ")"),
                             "."))
             }
         },
@@ -159,7 +136,7 @@ Collection <- R6::R6Class(
         remove = function(paths)
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
             if(is.character(paths))
             {
@@ -181,7 +158,7 @@ Collection <- R6::R6Class(
 
                 "Content removed"
             }
-            else 
+            else
             {
                 stop(paste0("Expected character vector, got ",
                             paste0("(", paste0(class(paths), collapse = ", "), ")"),
@@ -189,10 +166,10 @@ Collection <- R6::R6Class(
             }
         },
 
-        move = function(content, newLocation)
+        move = function(content, destination)
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
             content <- trimFromEnd(content, "/")
 
@@ -201,13 +178,37 @@ Collection <- R6::R6Class(
             if(is.null(elementToMove))
                 stop("Content you want to move doesn't exist in the collection.")
 
-            elementToMove$move(newLocation)
+            elementToMove$move(destination)
+        },
+
+        copy = function(content, destination)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            content <- trimFromEnd(content, "/")
+
+            elementToCopy <- self$get(content)
+
+            if(is.null(elementToCopy))
+                stop("Content you want to copy doesn't exist in the collection.")
+
+            elementToCopy$copy(destination)
+        },
+
+        refresh = function()
+        {
+            if(!is.null(private$tree))
+            {
+                private$tree$getTree()$setCollection(NULL, setRecursively = TRUE)
+                private$tree <- NULL
+            }
         },
 
         getFileListing = function()
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
             content <- private$REST$getCollectionContent(self$uuid)
             content[order(tolower(content))]
@@ -216,32 +217,11 @@ Collection <- R6::R6Class(
         get = function(relativePath)
         {
             if(is.null(private$tree))
-                private$genereateCollectionTreeStructure()
+                private$generateCollectionTreeStructure()
 
             private$tree$getElement(relativePath)
         },
 
-               toJSON = function() 
-        {
-                       fields <- sapply(private$classFields, function(field)
-                       {
-                               self[[field]]
-                       }, USE.NAMES = TRUE)
-                       
-                       jsonlite::toJSON(list("collection" = 
-                     Filter(Negate(is.null), fields)), auto_unbox = TRUE)
-               },
-
-               isEmpty = function() {
-                       fields <- sapply(private$classFields,
-                                        function(field) self[[field]])
-
-                       if(any(sapply(fields, function(field) !is.null(field) && field != "")))
-                               FALSE
-                       else
-                               TRUE
-               },
-
         getRESTService = function() private$REST,
         setRESTService = function(newRESTService) private$REST <- newRESTService
     ),
@@ -251,9 +231,8 @@ Collection <- R6::R6Class(
         REST        = NULL,
         tree        = NULL,
         fileContent = NULL,
-        classFields = NULL,
 
-        genereateCollectionTreeStructure = function()
+        generateCollectionTreeStructure = function()
         {
             if(is.null(self$uuid))
                 stop("Collection uuid is not defined.")
@@ -275,7 +254,7 @@ Collection <- R6::R6Class(
 #'
 #' @param x Instance of Collection class
 #' @param ... Optional arguments.
-#' @export 
+#' @export
 print.Collection = function(x, ...)
 {
     cat(paste0("Type: ", "\"", "Arvados Collection", "\""), sep = "\n")
index 8686f88c1a8a3c55b695351b9993df55939d0f1a..5f7a29455ae4a58aaae6792f6dd1eb26ae30ae4e 100644 (file)
@@ -15,55 +15,16 @@ CollectionTree <- R6::R6Class(
         initialize = function(fileContent, collection)
         {
             self$pathsList <- fileContent
-
-            treeBranches <- sapply(fileContent, function(filePath)
-            {
-                splitPath <- unlist(strsplit(filePath, "/", fixed = TRUE))
-                branch <- private$createBranch(splitPath)      
-            })
-
+            treeBranches <- sapply(fileContent, function(filePath) self$createBranch(filePath))
             root <- Subcollection$new("")
-
-            sapply(treeBranches, function(branch)
-            {
-                private$addBranch(root, branch)
-            })
-
+            sapply(treeBranches, function(branch) self$addBranch(root, branch))
             root$setCollection(collection)
             private$tree <- root
         },
 
-        getElement = function(relativePath)
-        {
-            relativePath <- trimFromStart(relativePath, "./")
-            relativePath <- trimFromEnd(relativePath, "/")
-
-            if(endsWith(relativePath, "/"))
-                relativePath <- substr(relativePath, 0, nchar(relativePath) - 1)
-
-            splitPath <- unlist(strsplit(relativePath, "/", fixed = TRUE))
-            returnElement <- private$tree
-
-            for(pathFragment in splitPath)
-            {
-                returnElement <- returnElement$get(pathFragment)
-
-                if(is.null(returnElement))
-                    return(NULL)
-            }
-
-            returnElement
-        },
-
-        getTree = function() private$tree
-    ),
-
-    private = list(
-
-        tree = NULL,
-
-        createBranch = function(splitPath)
+        createBranch = function(filePath)
         {
+            splitPath <- unlist(strsplit(filePath, "/", fixed = TRUE))
             branch <- NULL
             lastElementIndex <- length(splitPath)
 
@@ -80,7 +41,7 @@ CollectionTree <- R6::R6Class(
                     branch <- newFolder
                 }
             }
-            
+
             branch
         },
 
@@ -90,24 +51,55 @@ CollectionTree <- R6::R6Class(
 
             if(is.null(child))
             {
+                # Make sure we are don't make any REST call while adding child
+                collection <- container$getCollection()
+                container$setCollection(NULL, setRecursively = FALSE)
                 container$add(node)
+                container$setCollection(collection, setRecursively = FALSE)
             }
             else
             {
-                # Note: REST always returns folder name alone before other folder 
+                # Note: REST always returns folder name alone before other folder
                 # content, so in first iteration we don't know if it's a file
-                # or folder since its just a name, so we assume it's a file. 
-                # If we encounter that same name again we know 
+                # or folder since its just a name, so we assume it's a file.
+                # If we encounter that same name again we know
                 # it's a folder so we need to replace ArvadosFile with Subcollection.
                 if("ArvadosFile" %in% class(child))
-                {
                     child = private$replaceFileWithSubcollection(child)
-                }
 
-                private$addBranch(child, node$getFirst())
+                self$addBranch(child, node$getFirst())
             }
         },
 
+        getElement = function(relativePath)
+        {
+            relativePath <- trimFromStart(relativePath, "./")
+            relativePath <- trimFromEnd(relativePath, "/")
+
+            if(endsWith(relativePath, "/"))
+                relativePath <- substr(relativePath, 0, nchar(relativePath) - 1)
+
+            splitPath <- unlist(strsplit(relativePath, "/", fixed = TRUE))
+            returnElement <- private$tree
+
+            for(pathFragment in splitPath)
+            {
+                returnElement <- returnElement$get(pathFragment)
+
+                if(is.null(returnElement))
+                    return(NULL)
+            }
+
+            returnElement
+        },
+
+        getTree = function() private$tree
+    ),
+
+    private = list(
+
+        tree = NULL,
+
         replaceFileWithSubcollection = function(arvadosFile)
         {
             subcollection <- Subcollection$new(arvadosFile$getName())
index 8ce68f3837f158486534c6adc55e4ff23e9386e1..cd492166a139bf56dccebf732f2533c443440cf7 100644 (file)
@@ -10,12 +10,12 @@ HttpParser <- R6::R6Class(
 
         validContentTypes = NULL,
 
-        initialize = function() 
+        initialize = function()
         {
             self$validContentTypes <- c("text", "raw")
         },
 
-        parseJSONResponse = function(serverResponse) 
+        parseJSONResponse = function(serverResponse)
         {
             parsed_response <- httr::content(serverResponse,
                                              as = "parsed",
@@ -41,7 +41,7 @@ HttpParser <- R6::R6Class(
             result[-1]
         },
 
-        getFileSizesFromResponse = function(response, uri)    
+        getFileSizesFromResponse = function(response, uri)
         {
             text <- rawToChar(response$content)
             doc <- XML::xmlParse(text, asText=TRUE)
index 95dd375debe5ce076638c55de49a57db1f2d8f0d..07defca90f4c99e8be9f8a73f7412f398ab1a701 100644 (file)
@@ -13,10 +13,10 @@ HttpRequest <- R6::R6Class(
         validContentTypes = NULL,
         validVerbs = NULL,
 
-        initialize = function() 
+        initialize = function()
         {
             self$validContentTypes <- c("text", "raw")
-            self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE")
+            self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE", "COPY")
         },
 
         exec = function(verb, url, headers = NULL, body = NULL, queryParams = NULL,
@@ -30,7 +30,7 @@ HttpRequest <- R6::R6Class(
 
             config <- httr::add_headers(unlist(headers))
             if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
-               config$options = list(ssl_verifypeer = FALSE)
+               config$options = list(ssl_verifypeer = 0L)
 
             # times = 1 regular call + numberOfRetries
             response <- httr::RETRY(verb, url = url, body = body,
@@ -58,6 +58,17 @@ HttpRequest <- R6::R6Class(
             }
 
             return("")
+        },
+
+        getConnection = function(url, headers, openMode)
+        {
+            h <- curl::new_handle()
+            curl::handle_setheaders(h, .list = headers)
+
+            if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
+               curl::handle_setopt(h, ssl_verifypeer = 0L)
+
+            conn <- curl::curl(url = url, open = openMode, handle = h)
         }
     ),
 
index ac65d0df3f37b6baa6031bc8cbab71b163e27a76..78b2c35e32fa117190f033075e1ea5ee2a3805e3 100644 (file)
@@ -66,7 +66,7 @@ RESTService <- R6::R6Class(
         {
             fileURL <- paste0(self$getWebDavHostName(), "c=",
                               uuid, "/", relativePath);
-            headers <- list(Authorization = paste("OAuth2", self$token)) 
+            headers <- list(Authorization = paste("OAuth2", self$token))
 
             serverResponse <- self$http$exec("DELETE", fileURL, headers,
                                              retryTimes = self$numRetries)
@@ -81,10 +81,10 @@ RESTService <- R6::R6Class(
         {
             collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/")
             fromURL <- paste0(collectionURL, from)
-            toURL <- paste0(collectionURL, to)
+            toURL <- paste0(collectionURL, trimFromStart(to, "/"))
 
             headers <- list("Authorization" = paste("OAuth2", self$token),
-                           "Destination" = toURL)
+                            "Destination" = toURL)
 
             serverResponse <- self$http$exec("MOVE", fromURL, headers,
                                              retryTimes = self$numRetries)
@@ -95,6 +95,24 @@ RESTService <- R6::R6Class(
             serverResponse
         },
 
+        copy = function(from, to, uuid)
+        {
+            collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/")
+            fromURL <- paste0(collectionURL, from)
+            toURL <- paste0(collectionURL, trimFromStart(to, "/"))
+
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Destination" = toURL)
+
+            serverResponse <- self$http$exec("COPY", fromURL, headers,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
         getCollectionContent = function(uuid)
         {
             collectionURL <- URLencode(paste0(self$getWebDavHostName(),
@@ -186,18 +204,13 @@ RESTService <- R6::R6Class(
             self$httpParser$parseResponse(serverResponse, "text")
         },
 
-        getConnection = function(uuid, relativePath, openMode)
+        getConnection = function(relativePath, uuid, openMode)
         {
-            fileURL <- paste0(self$getWebDavHostName(), 
+            fileURL <- paste0(self$getWebDavHostName(),
                               "c=", uuid, "/", relativePath);
             headers <- list(Authorization = paste("OAuth2", self$token))
 
-            h <- curl::new_handle()
-            curl::handle_setheaders(h, .list = headers)
-
-            conn <- curl::curl(url = fileURL, open = openMode, handle = h)
-
-            conn
+            conn <- self$http$getConnection(fileURL, headers, openMode)
         }
     ),
 
@@ -210,7 +223,7 @@ RESTService <- R6::R6Class(
         {
             fileURL <- paste0(self$getWebDavHostName(), "c=",
                               uuid, "/", relativePath)
-            headers <- list(Authorization = paste("OAuth2", self$token), 
+            headers <- list(Authorization = paste("OAuth2", self$token),
                             "Content-Type" = contentType)
             body <- NULL
 
index 60714a4ad835b9bc201fb780bb38b5fb8a81461c..17a9ef3ee3ba6180546763da637a8824905d66dc 100644 (file)
@@ -5,10 +5,10 @@
 source("./R/util.R")
 
 #' Subcollection
-#' 
+#'
 #' Subcollection class represents a folder inside Arvados collection.
 #' It is essentially a composite of arvadosFiles and other subcollections.
-#' 
+#'
 #' @section Usage:
 #' \preformatted{subcollection = Subcollection$new(name)}
 #'
@@ -16,7 +16,7 @@ source("./R/util.R")
 #' \describe{
 #'   \item{name}{Name of the subcollection.}
 #' }
-#' 
+#'
 #' @section Methods:
 #' \describe{
 #'   \item{getName()}{Returns name of the subcollection.}
@@ -26,7 +26,8 @@ source("./R/util.R")
 #'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
 #'   \item{getFileListing()}{Returns subcollections file content as character vector.}
 #'   \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
-#'   \item{move(newLocation)}{Moves subcollection to a new location inside collection.}
+#'   \item{move(destination)}{Moves subcollection to a new location inside collection.}
+#'   \item{copy(destination)}{Copies subcollection to a new location inside collection.}
 #' }
 #'
 #' @name Subcollection
@@ -40,6 +41,7 @@ source("./R/util.R")
 #' myFolder$remove("myFile")
 #'
 #' myFolder$move("newLocation/myFolder")
+#' myFolder$copy("newLocation/myFolder")
 #' }
 NULL
 
@@ -56,7 +58,7 @@ Subcollection <- R6::R6Class(
         },
 
         getName = function() private$name,
-        
+
         getRelativePath = function()
         {
             relativePath <- c(private$name)
@@ -77,6 +79,9 @@ Subcollection <- R6::R6Class(
             if("ArvadosFile"   %in% class(content) ||
                "Subcollection" %in% class(content))
             {
+                if(!is.null(content$getCollection()))
+                    stop("Content already belongs to a collection.")
+
                 if(content$getName() == "")
                     stop("Content has invalid name.")
 
@@ -87,7 +92,7 @@ Subcollection <- R6::R6Class(
                                "or Subcollection with same name."))
 
                 if(!is.null(private$collection))
-                {       
+                {
                     if(self$getRelativePath() != "")
                         contentPath <- paste0(self$getRelativePath(),
                                               "/", content$getFileListing())
@@ -161,20 +166,18 @@ Subcollection <- R6::R6Class(
             return(sum(fileSizes))
         },
 
-        move = function(newLocation)
+        move = function(destination)
         {
             if(is.null(private$collection))
-                stop("Subcollection doesn't belong to any collection")
+                stop("Subcollection doesn't belong to any collection.")
 
-            newLocation <- trimFromEnd(newLocation, "/")
-            nameAndPath <- splitToPathAndName(newLocation)
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
 
             newParent <- private$collection$get(nameAndPath$path)
 
             if(is.null(newParent))
-            {
-                stop("Unable to get destination subcollection")
-            }
+                stop("Unable to get destination subcollection.")
 
             childWithSameName <- newParent$get(nameAndPath$name)
 
@@ -187,11 +190,53 @@ Subcollection <- R6::R6Class(
                       private$collection$uuid)
 
             private$dettachFromCurrentParent()
-            private$attachToNewParent(newParent)
+            private$attachToNewParent(self, newParent)
 
+            private$parent <- newParent
             private$name <- nameAndPath$name
 
-            "Content moved successfully."
+            self
+        },
+
+        copy = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("Subcollection doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent) || !("Subcollection" %in% class(newParent)))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$copy(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            newContent <- self$duplicate(nameAndPath$name)
+            newContent$setCollection(self$getCollection(), setRecursively = TRUE)
+            newContent$setParent(newParent)
+            private$attachToNewParent(newContent, newParent)
+
+            newContent
+        },
+
+        duplicate = function(newName = NULL)
+        {
+            name <- if(!is.null(newName)) newName else private$name
+            root <- Subcollection$new(name)
+            for(child in private$children)
+                root$add(child$duplicate())
+
+            root
         },
 
         get = function(name)
@@ -254,30 +299,29 @@ Subcollection <- R6::R6Class(
             }
         },
 
-        attachToNewParent = function(newParent)
+        attachToNewParent = function(content, newParent)
         {
-            #Note: We temporary set parents collection to NULL. This will ensure that
-            #      add method doesn't post file on REST.
+            # We temporary set parents collection to NULL. This will ensure that
+            # add method doesn't post this subcollection to REST.
+            # We also need to set content's collection to NULL because
+            # add method throws exception if we try to add content that already
+            # belongs to a collection.
             parentsCollection <- newParent$getCollection()
+            content$setCollection(NULL, setRecursively = FALSE)
             newParent$setCollection(NULL, setRecursively = FALSE)
-
-            newParent$add(self)
-
+            newParent$add(content)
+            content$setCollection(parentsCollection, setRecursively = FALSE)
             newParent$setCollection(parentsCollection, setRecursively = FALSE)
-
-            private$parent <- newParent
         },
 
         dettachFromCurrentParent = function()
         {
-            #Note: We temporary set parents collection to NULL. This will ensure that
-            #      remove method doesn't remove this subcollection from REST.
+            # We temporary set parents collection to NULL. This will ensure that
+            # remove method doesn't remove this subcollection from REST.
             parent <- private$parent
             parentsCollection <- parent$getCollection()
             parent$setCollection(NULL, setRecursively = FALSE)
-
             parent$remove(private$name)
-
             parent$setCollection(parentsCollection, setRecursively = FALSE)
         },
 
@@ -302,7 +346,7 @@ Subcollection <- R6::R6Class(
             content
         }
     ),
-    
+
     cloneable = FALSE
 )
 
@@ -312,7 +356,7 @@ Subcollection <- R6::R6Class(
 #'
 #' @param x Instance of Subcollection class
 #' @param ... Optional arguments.
-#' @export 
+#' @export
 print.Subcollection = function(x, ...)
 {
     collection   <- NULL
index 3e8c2fa0cf2b1494c33a7246a9f97a8669a3b514..1aef20b6cb90fe11d7440219bbe24d464af988c2 100644 (file)
@@ -343,7 +343,7 @@ genMethodsDoc <- function(methodResources, resourceNames)
     }, methodResources, resourceNames)))
 
     projectDoc <- genProjectMethodsDoc()
-    
+
     c(methodsDoc, projectDoc)
 }
 
@@ -401,10 +401,10 @@ getAPIClassMethodList <- function(methodResources, resourceNames)
                methodNames[!(methodNames %in% c("index", "show", "destroy"))])
 
     }, methodResources, resourceNames)))
-    
+
     hardcodedMethods <- c("projects.create", "projects.get",
                           "projects.list", "projects.update", "projects.delete")
-    paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}") 
+    paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}")
 }
 
 getMethodDoc <- function(methodName, methodMetaData)
@@ -447,7 +447,7 @@ getMethodDescription <- function(methodMetaData)
                                  className <- sapply(prop, function(ref) ref)
                                  objectName <- paste0(tolower(substr(className, 1, 1)),
                                                       substr(className, 2, nchar(className)))
-                                 paste("#' @param", objectName, className, "object.") 
+                                 paste("#' @param", objectName, className, "object.")
                              })))
     }
 
@@ -457,7 +457,7 @@ getMethodDescription <- function(methodMetaData)
     {
         arg <- methodMetaData$parameters[[argName]]
         argDescription <- arg$description
-        paste("#' @param", argName, argDescription) 
+        paste("#' @param", argName, argDescription)
     })))
 
     c(requestDoc, argsDoc)
@@ -541,7 +541,7 @@ formatArgs <- function(prependAtStart, prependToEachSplit,
 {
     if(length(args) > 1)
     {
-        args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",") 
+        args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",")
     }
 
     args[1] <- paste0(prependAtStart, args[1])
@@ -564,12 +564,12 @@ formatArgs <- function(prependAtStart, prependToEachSplit,
 
         argLines <- c(argLines, line)
     }
-    
+
     argLines <- unlist(argLines)
     argLinesLen <- length(argLines)
 
     if(argLinesLen > 1)
-        argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen]) 
+        argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen])
 
     argLines
 }
index c98f803e5ea8e123b4e058b8256850668b5ae068..fa0cda45bfc65dfefb2c681eabee5c3668e822d9 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 .onLoad <- function(libName, pkgName)
 {
     minAllowedRVersion <- "3.3.0"
index be34b2fdb1dabd3531ca74e696a8240528418520..c1d6c7cf4f01eebaae55764630e8e1c68d6f1def 100644 (file)
@@ -133,7 +133,7 @@ files <- collection$getFileListing()
 arvadosFile <- collection$get("location/to/my/file.cpp")
 ```
 
-    or
+or
 
 ```{r}
 arvadosSubcollection <- collection$get("location/to/my/directory/")
@@ -177,7 +177,7 @@ fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
 size <- arvadosFile$getSizeInBytes()
 ```
 
-    or
+or
 
 ```{r}
 size <- arvadosSubcollection$getSizeInBytes()
@@ -186,31 +186,16 @@ size <- arvadosSubcollection$getSizeInBytes()
 * Create new file in a collection:
 
 ```{r}
-collection$create(fileNames, optionalRelativePath)
+collection$create(files)
 ```
 
-    Example:
+Example:
 
 ```{r}
-mainFile <- collection$create("main.cpp", "cpp/src/")
-fileList <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+mainFile <- collection$create("cpp/src/main.cpp")
+fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
 ```
 
-* Add existing ArvadosFile or Subcollection to a collection:
-
-```{r}
-folder <- Subcollection$new("src")
-file   <- ArvadosFile$new("main.cpp")
-folder$add(file)
-```
-
-```{r}
-collection$add(folder, "cpp")
-```
-
-This examples will add file "main.cpp" in "./cpp/src/" folder if folder exists.
-If subcollection contains more files or folders they will be added recursively.
-
 * Delete file from a collection:
 
 ```{r}
@@ -234,9 +219,9 @@ subcollection$remove("fileInsideSubcollection.exe")
 subcollection$remove("folderInsideSubcollection/")
 ```
 
-* Move file or folder inside collection:
+* Move or rename a file or folder within a collection (moving between collections is currently not supported):
 
-Directley from collection
+Directly from collection
 
 ```{r}
 collection$move("folder/file.cpp", "file.cpp")
@@ -259,6 +244,28 @@ subcollection$move("newDestination/folder")
 Make sure to include new file name in destination.
 In second example file$move("newDestination/") will not work.
 
+* Copy file or folder within a collection (copying between collections is currently not supported):
+
+Directly from collection
+
+```{r}
+collection$copy("folder/file.cpp", "file.cpp")
+```
+
+Or from file
+
+```{r}
+file <- collection$get("location/to/my/file.cpp")
+file$copy("destination/file.cpp")
+```
+
+Or from subcollection
+
+```{r}
+subcollection <- collection$get("location/to/folder")
+subcollection$copy("destination/folder")
+```
+
 #### Working with Aravdos projects
 
 * Get a project:
index 95a2e5561fa9ce21b1d5bd236489595b8c2c034d..51f98d81dcd4de28b9e5faca303ee87a5da7a58d 100644 (file)
@@ -166,6 +166,7 @@ Arvados class gives users ability to access Arvados REST API.
        \item{}{\code{\link{users.delete}}}
        \item{}{\code{\link{users.get}}}
        \item{}{\code{\link{users.list}}}
+       \item{}{\code{\link{users.merge}}}
        \item{}{\code{\link{users.setup}}}
        \item{}{\code{\link{users.system}}}
        \item{}{\code{\link{users.unsetup}}}
index b7840dc16ff4cbbd15dd7383b9fcde0c59416606..514e9e846df958548bacefa670b35e69814d0767 100644 (file)
@@ -29,7 +29,8 @@ ArvadosFile class represents a file inside Arvados collection.
   \item{flush()}{Write connections content to a file (override current content of the file).}
   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
   \item{getSizeInBytes()}{Returns file size in bytes.}
-  \item{move(newLocation)}{Moves file to a new location inside collection.}
+  \item{move(destination)}{Moves file to a new location inside collection.}
+  \item{copy(destination)}{Copies file to a new location inside collection.}
 }
 }
 
@@ -40,7 +41,7 @@ myFile <- ArvadosFile$new("myFile")
 myFile$write("This is new file content")
 fileContent <- myFile$read()
 fileContent <- myFile$read("text")
-fileContent <- myFile$read("raw", offset = 8, length = 4) 
+fileContent <- myFile$read("raw", offset = 8, length = 4)
 
 #Write a table:
 arvConnection <- myFile$connection("w")
@@ -52,5 +53,6 @@ arvConnection <- myFile$connection("r")
 mytable <- read.table(arvConnection)
 
 myFile$move("newFolder/myFile")
+myFile$copy("newFolder/myFile")
 }
 }
index 8cf29a2ea56d19cc32307cdab3c0a488537ac295..fbe6038664447df230ba77295f17d5df97274d29 100644 (file)
@@ -23,9 +23,10 @@ Collection class provides interface for working with Arvados collections.
 
 \describe{
   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
-  \item{create(fileNames, relativePath = "")}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+  \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
   \item{remove(fileNames)}{Remove one or more files from the collection.}
-  \item{move(content, newLocation)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+  \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+  \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
   \item{getFileListing()}{Returns collections file content as character vector.}
   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
 }
@@ -36,9 +37,6 @@ Collection class provides interface for working with Arvados collections.
 arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
 collection <- Collection$new(arv, "uuid")
 
-newFile <- ArvadosFile$new("myFile")
-collection$add(newFile, "myFolder")
-
 createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
 
 collection$remove("location/to/my/file.cpp")
index df0970b30fd4ef843b595f8db52efbafd9b140a4..0b27a8bc4389d134b5f360bf012cd17d7934d77f 100644 (file)
@@ -5,7 +5,7 @@
 \title{Subcollection}
 \description{
 Subcollection class represents a folder inside Arvados collection.
-It is essentially a composite of ArvadosFiles and other Subcollections.
+It is essentially a composite of arvadosFiles and other subcollections.
 }
 \section{Usage}{
 
@@ -29,7 +29,8 @@ It is essentially a composite of ArvadosFiles and other Subcollections.
   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
   \item{getFileListing()}{Returns subcollections file content as character vector.}
   \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
-  \item{move(newLocation)}{Moves subcollection to a new location inside collection.}
+  \item{move(destination)}{Moves subcollection to a new location inside collection.}
+  \item{copy(destination)}{Copies subcollection to a new location inside collection.}
 }
 }
 
@@ -43,5 +44,6 @@ myFolder$get("myFile")
 myFolder$remove("myFile")
 
 myFolder$move("newLocation/myFolder")
+myFolder$copy("newLocation/myFolder")
 }
 }
diff --git a/sdk/R/man/users.merge.Rd b/sdk/R/man/users.merge.Rd
new file mode 100644 (file)
index 0000000..a539591
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.merge}
+\alias{users.merge}
+\title{users.merge}
+\usage{
+arv$users.merge(new_owner_uuid,
+       new_user_token, redirect_to_new_user = NULL)
+}
+\arguments{
+\item{new_owner_uuid}{}
+
+\item{new_user_token}{}
+
+\item{redirect_to_new_user}{}
+}
+\value{
+User object.
+}
+\description{
+users.merge is a method defined in Arvados class.
+}
index c97572c193f1eadbd315928fb09d56aff5e2d7a2..c23283989a9c982146168a4ec883a670bd2e7510 100644 (file)
@@ -11,13 +11,13 @@ FakeHttpParser <- R6::R6Class(
         validContentTypes = NULL,
         parserCallCount = NULL,
 
-        initialize = function() 
+        initialize = function()
         {
             self$parserCallCount <- 0
             self$validContentTypes <- c("text", "raw")
         },
 
-        parseJSONResponse = function(serverResponse) 
+        parseJSONResponse = function(serverResponse)
         {
             self$parserCallCount <- self$parserCallCount + 1
 
@@ -47,7 +47,7 @@ FakeHttpParser <- R6::R6Class(
             serverResponse
         },
 
-        getFileSizesFromResponse = function(serverResponse, uri)    
+        getFileSizesFromResponse = function(serverResponse, uri)
         {
             self$parserCallCount <- self$parserCallCount + 1
 
index 2633abdf2c745bf0e4c9afcee1b73b7c5751fbeb..7734e0d9284d6bde09b478ab30171e687ea75a2c 100644 (file)
@@ -22,11 +22,13 @@ FakeHttpRequest <- R6::R6Class(
         JSONEncodedBodyIsProvided               = NULL,
         requestBodyIsProvided                   = NULL,
 
-        numberOfGETRequests    = NULL,
-        numberOfDELETERequests = NULL,
-        numberOfPUTRequests    = NULL,
-        numberOfPOSTRequests   = NULL,
-        numberOfMOVERequests   = NULL,
+        numberOfGETRequests        = NULL,
+        numberOfDELETERequests     = NULL,
+        numberOfPUTRequests        = NULL,
+        numberOfPOSTRequests       = NULL,
+        numberOfMOVERequests       = NULL,
+        numberOfCOPYRequests       = NULL,
+        numberOfgetConnectionCalls = NULL,
 
         initialize = function(expectedURL      = NULL,
                               serverResponse   = NULL,
@@ -56,6 +58,9 @@ FakeHttpRequest <- R6::R6Class(
             self$numberOfPUTRequests    <- 0
             self$numberOfPOSTRequests   <- 0
             self$numberOfMOVERequests   <- 0
+            self$numberOfCOPYRequests   <- 0
+
+            self$numberOfgetConnectionCalls <- 0
 
             self$serverMaxElementsPerRequest <- 5
         },
@@ -78,6 +83,8 @@ FakeHttpRequest <- R6::R6Class(
                 self$numberOfDELETERequests <- self$numberOfDELETERequests + 1
             else if(verb == "MOVE")
                 self$numberOfMOVERequests <- self$numberOfMOVERequests + 1
+            else if(verb == "COPY")
+                self$numberOfCOPYRequests <- self$numberOfCOPYRequests + 1
             else if(verb == "PROPFIND")
             {
                 return(self$content)
@@ -87,18 +94,24 @@ FakeHttpRequest <- R6::R6Class(
                 return(private$getElements(offset, limit))
             else
                 return(self$content)
+        },
+
+        getConnection = function(url, headers, openMode)
+        {
+            self$numberOfgetConnectionCalls <- self$numberOfgetConnectionCalls + 1
+            c(url, headers, openMode)
         }
     ),
 
     private = list(
 
-        validateURL = function(url) 
+        validateURL = function(url)
         {
             if(!is.null(self$expectedURL) && url == self$expectedURL)
                 self$URLIsProperlyConfigured <- TRUE
         },
 
-        validateHeaders = function(headers) 
+        validateHeaders = function(headers)
         {
             if(!is.null(headers$Authorization))
                 self$requestHeaderContainsAuthorizationField <- TRUE
@@ -115,11 +128,11 @@ FakeHttpRequest <- R6::R6Class(
 
         validateBody = function(body)
         {
-            if(!is.null(body))           
+            if(!is.null(body))
             {
                 self$requestBodyIsProvided <- TRUE
 
-                if(class(body) == "json")           
+                if(class(body) == "json")
                     self$JSONEncodedBodyIsProvided <- TRUE
             }
         },
@@ -143,7 +156,7 @@ FakeHttpRequest <- R6::R6Class(
             {
                 if(offset > self$content$items_available)
                     stop("Invalid offset")
-                
+
                 start <- offset + 1
             }
 
index 08e8717de5e4b97b5776c2c6cc8893c523f4c133..a91da04fd1295edf5332b54b61da47ab2ea4687d 100644 (file)
@@ -18,6 +18,7 @@ FakeRESTService <- R6::R6Class(
         createCallCount               = NULL,
         deleteCallCount               = NULL,
         moveCallCount                 = NULL,
+        copyCallCount                 = NULL,
         getCollectionContentCallCount = NULL,
         getResourceSizeCallCount      = NULL,
         readCallCount                 = NULL,
@@ -31,7 +32,7 @@ FakeRESTService <- R6::R6Class(
         collectionContent = NULL,
         returnContent     = NULL,
 
-        initialize = function(collectionContent = NULL, returnContent = NULL, 
+        initialize = function(collectionContent = NULL, returnContent = NULL,
                               expectedFilterContent = NULL)
         {
             self$getResourceCallCount    <- 0
@@ -44,6 +45,7 @@ FakeRESTService <- R6::R6Class(
             self$createCallCount               <- 0
             self$deleteCallCount               <- 0
             self$moveCallCount                 <- 0
+            self$copyCallCount                 <- 0
             self$getCollectionContentCallCount <- 0
             self$getResourceSizeCallCount      <- 0
             self$readCallCount                 <- 0
@@ -135,6 +137,12 @@ FakeRESTService <- R6::R6Class(
             self$returnContent
         },
 
+        copy = function(from, to, uuid)
+        {
+            self$copyCallCount <- self$copyCallCount + 1
+            self$returnContent
+        },
+
         getCollectionContent = function(uuid)
         {
             self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1
@@ -146,21 +154,21 @@ FakeRESTService <- R6::R6Class(
             self$getResourceSizeCallCount <- self$getResourceSizeCallCount + 1
             self$returnContent
         },
-        
+
         read = function(relativePath, uuid, contentType = "text", offset = 0, length = 0)
         {
             self$readCallCount <- self$readCallCount + 1
             self$returnContent
         },
 
-        write = function(uuid, relativePath, content, contentType)
+        write = function(relativePath, uuid, content, contentType)
         {
             self$writeBuffer <- content
             self$writeCallCount <- self$writeCallCount + 1
             self$returnContent
         },
 
-        getConnection = function(relativePath, uuid, openMode)
+        getConnection = function(uuid, relativePath, openMode)
         {
             self$getConnectionCallCount <- self$getConnectionCallCount + 1
             self$returnContent
index fb14888aab91b982d88dbdddca0be9589f757fb8..e3457c993f7c88cee4a963ca7006a90c6078f478 100644 (file)
@@ -9,30 +9,30 @@ context("ArvadosFile")
 test_that("constructor raises error if  file name is empty string", {
 
     expect_that(ArvadosFile$new(""), throws_error("Invalid name."))
-}) 
+})
 
 test_that("getFileListing always returns file name", {
 
     dog <- ArvadosFile$new("dog")
 
     expect_that(dog$getFileListing(), equals("dog"))
-}) 
+})
 
 test_that("get always returns NULL", {
 
     dog <- ArvadosFile$new("dog")
-    
+
     responseIsNull <- is.null(dog$get("something"))
     expect_that(responseIsNull, is_true())
-}) 
+})
 
 test_that("getFirst always returns NULL", {
 
     dog <- ArvadosFile$new("dog")
-    
+
     responseIsNull <- is.null(dog$getFirst())
     expect_that(responseIsNull, is_true())
-}) 
+})
 
 test_that(paste("getSizeInBytes returns zero if arvadosFile",
                 "is not part of a collection"), {
@@ -40,7 +40,7 @@ test_that(paste("getSizeInBytes returns zero if arvadosFile",
     dog <- ArvadosFile$new("dog")
 
     expect_that(dog$getSizeInBytes(), equals(0))
-}) 
+})
 
 test_that(paste("getSizeInBytes delegates size calculation",
                 "to REST service class"), {
@@ -57,7 +57,7 @@ test_that(paste("getSizeInBytes delegates size calculation",
     resourceSize <- fish$getSizeInBytes()
 
     expect_that(resourceSize, equals(100))
-}) 
+})
 
 test_that("getRelativePath returns path relative to the tree root", {
 
@@ -69,7 +69,7 @@ test_that("getRelativePath returns path relative to the tree root", {
     fish$add(shark)
 
     expect_that(shark$getRelativePath(), equals("animal/fish/shark"))
-}) 
+})
 
 test_that("read raises exception if file doesn't belong to a collection", {
 
@@ -77,11 +77,10 @@ test_that("read raises exception if file doesn't belong to a collection", {
 
     expect_that(dog$read(),
                 throws_error("ArvadosFile doesn't belong to any collection."))
-}) 
+})
 
 test_that("read raises exception offset or length is negative number", {
 
-
     collectionContent <- c("animal", "animal/fish")
     fakeREST <- FakeRESTService$new(collectionContent)
 
@@ -96,7 +95,7 @@ test_that("read raises exception offset or length is negative number", {
                 throws_error("Offset and length must be positive values."))
     expect_that(fish$read(contentType = "text", offset = -1, length = -1),
                 throws_error("Offset and length must be positive values."))
-}) 
+})
 
 test_that("read delegates reading operation to REST service class", {
 
@@ -108,15 +107,15 @@ test_that("read delegates reading operation to REST service class", {
     api$setRESTService(fakeREST)
     collection <- Collection$new(api, "myUUID")
     fish <- collection$get("animal/fish")
-    
+
     fileContent <- fish$read("text")
 
     expect_that(fileContent, equals("my file"))
     expect_that(fakeREST$readCallCount, equals(1))
-}) 
+})
 
 test_that(paste("connection delegates connection creation ro RESTService class",
-                "which returns curl connection opened in read mode when", 
+                "which returns curl connection opened in read mode when",
                 "'r' of 'rb' is passed as argument"), {
 
     collectionContent <- c("animal", "animal/fish")
@@ -130,7 +129,7 @@ test_that(paste("connection delegates connection creation ro RESTService class",
     connection <- fish$connection("r")
 
     expect_that(fakeREST$getConnectionCallCount, equals(1))
-}) 
+})
 
 test_that(paste("connection returns textConnection opened",
                 "in write mode when 'w' is passed as argument"), {
@@ -152,11 +151,10 @@ test_that(paste("connection returns textConnection opened",
 
     expect_that(writeResult[1], equals("file"))
     expect_that(writeResult[2], equals("content"))
-}) 
+})
 
 test_that("flush sends data stored in a connection to a REST server", {
 
-
     collectionContent <- c("animal", "animal/fish")
     fakeREST <- FakeRESTService$new(collectionContent)
 
@@ -172,7 +170,7 @@ test_that("flush sends data stored in a connection to a REST server", {
     fish$flush()
 
     expect_that(fakeREST$writeBuffer, equals("file content"))
-}) 
+})
 
 test_that("write raises exception if file doesn't belong to a collection", {
 
@@ -180,7 +178,7 @@ test_that("write raises exception if file doesn't belong to a collection", {
 
     expect_that(dog$write(),
                 throws_error("ArvadosFile doesn't belong to any collection."))
-}) 
+})
 
 test_that("write delegates writing operation to REST service class", {
 
@@ -192,11 +190,11 @@ test_that("write delegates writing operation to REST service class", {
     api$setRESTService(fakeREST)
     collection <- Collection$new(api, "myUUID")
     fish <- collection$get("animal/fish")
-    
+
     fileContent <- fish$write("new file content")
 
     expect_that(fakeREST$writeBuffer, equals("new file content"))
-}) 
+})
 
 test_that(paste("move raises exception if arvados file",
                 "doesn't belong to any collection"), {
@@ -204,13 +202,12 @@ test_that(paste("move raises exception if arvados file",
     animal <- ArvadosFile$new("animal")
 
     expect_that(animal$move("new/location"),
-                throws_error("ArvadosFile doesn't belong to any collection"))
-}) 
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
 
 test_that(paste("move raises exception if newLocationInCollection",
                 "parameter is invalid"), {
 
-
     collectionContent <- c("animal",
                            "animal/fish",
                            "animal/dog",
@@ -226,8 +223,8 @@ test_that(paste("move raises exception if newLocationInCollection",
     dog <- collection$get("animal/dog")
 
     expect_that(dog$move("objects/dog"),
-                throws_error("Unable to get destination subcollection"))
-}) 
+                throws_error("Unable to get destination subcollection."))
+})
 
 test_that("move raises exception if new location contains content with the same name", {
 
@@ -248,11 +245,10 @@ test_that("move raises exception if new location contains content with the same
     expect_that(dog$move("dog"),
                 throws_error("Destination already contains content with same name."))
 
-}) 
+})
 
 test_that("move moves arvados file inside collection tree", {
 
-
     collectionContent <- c("animal",
                            "animal/fish",
                            "animal/dog",
@@ -273,3 +269,85 @@ test_that("move moves arvados file inside collection tree", {
     expect_that(dogIsNullOnOldLocation, is_true())
     expect_that(dogExistsOnNewLocation, is_true())
 })
+
+test_that(paste("copy raises exception if arvados file",
+                "doesn't belong to any collection"), {
+
+    animal <- ArvadosFile$new("animal")
+
+    expect_that(animal$copy("new/location"),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
+
+test_that(paste("copy raises exception if location parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$copy("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("copy raises exception if new location contains content with the same name", {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "dog")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$copy("dog"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that("copy copies arvados file inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    dog$copy("dog")
+    dogExistsOnOldLocation <- !is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogExistsOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+})
+
+test_that("duplicate performs deep cloning of Arvados file", {
+    arvFile <- ArvadosFile$new("foo")
+    newFile1 <- arvFile$duplicate()
+    newFile2 <- arvFile$duplicate("bar")
+
+    expect_that(newFile1$getFileListing(), equals(arvFile$getFileListing()))
+    expect_that(newFile2$getFileListing(), equals(c("bar")))
+})
index c3c70910e4c63acea6d86f5df71cc9bab9f3e72f..636359ae21a7b196d406ec2b16ee8839e0921f9e 100644 (file)
@@ -100,41 +100,19 @@ test_that("create raises exception if passed argumet is not character vector", {
     collection <- Collection$new(api, "myUUID")
 
     expect_that(collection$create(10),
-                throws_error("Expected character vector, got (numeric).", 
+                throws_error("Expected character vector, got (numeric).",
                              fixed = TRUE))
 })
 
-test_that("create raises exception if relative path is not valid", {
-
-    collectionContent <- c("animal",
-                           "animal/fish",
-                           "ball")
-
-    fakeREST <- FakeRESTService$new(collectionContent)
-
-    api <- Arvados$new("myToken", "myHostName")
-    api$setRESTService(fakeREST)
-    collection <- Collection$new(api, "myUUID")
-
-    newPen <- ArvadosFile$new("pen")
-
-    expect_that(collection$create(newPen, "objects"),
-                throws_error("Subcollection objects doesn't exist.",
-                              fixed = TRUE))
-})
-
 test_that(paste("create adds files specified by fileNames",
                 "to local tree structure and remote REST service"), {
 
-    collectionContent <- c("animal", "animal/fish", "ball")
-    fakeREST <- FakeRESTService$new(collectionContent)
-
+    fakeREST <- FakeRESTService$new()
     api <- Arvados$new("myToken", "myHostName")
     api$setRESTService(fakeREST)
     collection <- Collection$new(api, "myUUID")
 
-    files <- c("dog", "cat")
-    collection$create(files, "animal")
+    collection$create(c("animal/dog", "animal/cat"))
 
     dog <- collection$get("animal/dog")
     cat <- collection$get("animal/cat")
@@ -156,7 +134,7 @@ test_that("remove raises exception if passed argumet is not character vector", {
     collection <- Collection$new(api, "myUUID")
 
     expect_that(collection$remove(10),
-                throws_error("Expected character vector, got (numeric).", 
+                throws_error("Expected character vector, got (numeric).",
                              fixed = TRUE))
 })
 
@@ -238,7 +216,7 @@ test_that("getFileListing returns sorted collection content received from REST s
     api$setRESTService(fakeREST)
     collection <- Collection$new(api, "myUUID")
 
-    contentMatchExpected <- all(collection$getFileListing() == 
+    contentMatchExpected <- all(collection$getFileListing() ==
                                 c("animal", "animal/fish", "ball"))
 
     expect_that(contentMatchExpected, is_true())
@@ -262,3 +240,58 @@ test_that("get returns arvados file or subcollection from internal tree structur
     expect_that(fishIsNotNull, is_true())
     expect_that(fish$getName(), equals("fish"))
 })
+
+test_that(paste("copy copies content to a new location inside file tree",
+                "and on REST service"), {
+
+    collectionContent <- c("animal", "animal/dog", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$copy("animal/dog", "dog")
+
+    dogExistsOnOldLocation <- !is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogExistsOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+    expect_that(fakeREST$copyCallCount, equals(1))
+})
+
+test_that("copy raises exception if new location is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$copy("fish", "object"),
+                throws_error("Content you want to copy doesn't exist in the collection.",
+                             fixed = TRUE))
+})
+
+test_that("refresh invalidates current tree structure", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "aaaaa-j7d0g-ccccccccccccccc")
+
+    # Before refresh
+    fish <- collection$get("animal/fish")
+    expect_that(fish$getName(), equals("fish"))
+    expect_that(fish$getCollection()$uuid, equals("aaaaa-j7d0g-ccccccccccccccc"))
+
+    collection$refresh()
+
+    # After refresh
+    expect_that(fish$getName(), equals("fish"))
+    expect_true(is.null(fish$getCollection()))
+})
index 5c8a40526988bb562c45b5702fd921a743f0a77c..1a3aefecd012325658ad408ee2a699682907dbaf 100644 (file)
@@ -7,7 +7,7 @@ context("CollectionTree")
 test_that("constructor creates file tree from character array properly", {
 
     collection <- "myCollection"
-    characterArray <- c("animal", 
+    characterArray <- c("animal",
                         "animal/dog",
                         "boat")
 
@@ -44,12 +44,12 @@ test_that("constructor creates file tree from character array properly", {
     expect_that(boatIsOfTypeArvadosFile, is_true())
     expect_that(boatsParentIsRoot, is_true())
     expect_that(allElementsBelongToSameCollection, is_true())
-}) 
+})
 
 test_that("getElement returns element from tree if element exists on specified path", {
 
     collection <- "myCollection"
-    characterArray <- c("animal", 
+    characterArray <- c("animal",
                         "animal/dog",
                         "boat")
 
@@ -58,12 +58,12 @@ test_that("getElement returns element from tree if element exists on specified p
     dog <- collectionTree$getElement("animal/dog")
 
     expect_that(dog$getName(), equals("dog"))
-}) 
+})
 
 test_that("getElement returns NULL from tree if element doesn't exists on specified path", {
 
     collection <- "myCollection"
-    characterArray <- c("animal", 
+    characterArray <- c("animal",
                         "animal/dog",
                         "boat")
 
@@ -73,12 +73,12 @@ test_that("getElement returns NULL from tree if element doesn't exists on specif
     fishIsNULL <- is.null(fish)
 
     expect_that(fishIsNULL, is_true())
-}) 
+})
 
 test_that("getElement trims ./ from start of relativePath", {
 
     collection <- "myCollection"
-    characterArray <- c("animal", 
+    characterArray <- c("animal",
                         "animal/dog",
                         "boat")
 
@@ -88,12 +88,12 @@ test_that("getElement trims ./ from start of relativePath", {
     dogWithDotSlash <- collectionTree$getElement("./animal/dog")
 
     expect_that(dogWithDotSlash$getName(), equals(dog$getName()))
-}) 
+})
 
 test_that("getElement trims / from end of relativePath", {
 
     collection <- "myCollection"
-    characterArray <- c("animal", 
+    characterArray <- c("animal",
                         "animal/dog",
                         "boat")
 
@@ -103,4 +103,4 @@ test_that("getElement trims / from end of relativePath", {
     animalWithSlash <- collectionTree$getElement("animal/")
 
     expect_that(animalWithSlash$getName(), equals(animal$getName()))
-}) 
+})
index a119d88bf82fa226e26d5127f3ae001d1b515a2e..82c0fb0dd2fed88598e8fd14a8dd88a11d065b71 100644 (file)
@@ -20,7 +20,7 @@ test_that("parseJSONResponse generates and returns JSON object from server respo
 
     expect_that(barExists, is_true())
     expect_that(unlist(result$bar$foo), equals(10))
-}) 
+})
 
 test_that(paste("parseResponse generates and returns character vector",
                 "from server response if outputType is text"), {
@@ -35,10 +35,10 @@ test_that(paste("parseResponse generates and returns character vector",
     parsedResponse <- parser$parseResponse(serverResponse, "text")
 
     expect_that(parsedResponse, equals("random text"))
-}) 
+})
 
 
-webDAVResponseSample = 
+webDAVResponseSample =
     paste0("<?xml version=\"1.0\" encoding=\"UTF-8\"?><D:multistatus xmlns:",
            "D=\"DAV:\"><D:response><D:href>/c=aaaaa-bbbbb-ccccccccccccccc</D",
            ":href><D:propstat><D:prop><D:resourcetype><D:collection xmlns:D=",
@@ -76,7 +76,7 @@ test_that(paste("getFileNamesFromResponse returns file names belonging to specif
     resultMatchExpected <- all.equal(result, expectedResult)
 
     expect_that(resultMatchExpected, is_true())
-}) 
+})
 
 test_that(paste("getFileSizesFromResponse returns file sizes",
                 "parsed from webDAV server response"), {
@@ -93,4 +93,4 @@ test_that(paste("getFileSizesFromResponse returns file sizes",
     resultMatchExpected <- result == expectedResult
 
     expect_that(resultMatchExpected, is_true())
-}) 
+})
index 5ad8aa03115207035ee7f369ded5fbcd597e0ba7..f12463c805dda10e67325adb2a892d5223600932 100644 (file)
@@ -5,12 +5,12 @@
 context("Http Request")
 
 
-test_that("execyte raises exception if http verb is not valid", {
+test_that("execute raises exception if http verb is not valid", {
 
     http <- HttpRequest$new()
     expect_that(http$exec("FAKE VERB", "url"),
                throws_error("Http verb is not valid."))
-}) 
+})
 
 test_that("createQuery generates and encodes query portion of http", {
 
@@ -22,10 +22,87 @@ test_that("createQuery generates and encodes query portion of http", {
     expect_that(http$createQuery(queryParams),
                 equals(paste0("/?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red",
                               "%22%5D%5D&limit=20&offset=50")))
-}) 
+})
 
 test_that("createQuery generates and empty string when queryParams is an empty list", {
 
     http <- HttpRequest$new()
     expect_that(http$createQuery(list()), equals(""))
-}) 
+})
+
+test_that("exec calls httr functions correctly", {
+    httrNamespace <- getNamespace("httr")
+
+    # Monkeypatch httr functions and assert that they are called later
+    add_headersCalled <- FALSE
+    unlockBinding("add_headers", httrNamespace)
+    newAddHeaders <- function(h)
+    {
+        add_headersCalled <<- TRUE
+        list()
+    }
+    httrNamespace$add_headers <- newAddHeaders
+    lockBinding("add_headers", httrNamespace)
+
+    expectedConfig <- list()
+    retryCalled <- FALSE
+    unlockBinding("RETRY", httrNamespace)
+    newRETRY <- function(verb, url, body, config, times)
+    {
+        retryCalled <<- TRUE
+        expectedConfig <<- config
+    }
+    httrNamespace$RETRY <- newRETRY
+    lockBinding("RETRY", httrNamespace)
+
+    Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE)
+    http <- HttpRequest$new()
+    http$exec("GET", "url")
+
+    expect_that(add_headersCalled, is_true())
+    expect_that(retryCalled, is_true())
+    expect_that(expectedConfig$options, equals(list(ssl_verifypeer = 0L)))
+})
+
+test_that("getConnection calls curl functions correctly", {
+    curlNamespace <- getNamespace("curl")
+
+    # Monkeypatch curl functions and assert that they are called later
+    curlCalled <- FALSE
+    unlockBinding("curl", curlNamespace)
+    newCurl <- function(url, open, handle) curlCalled <<- TRUE
+    curlNamespace$curl <- newCurl
+    lockBinding("curl", curlNamespace)
+
+    new_handleCalled <- FALSE
+    unlockBinding("new_handle", curlNamespace)
+    newHandleFun <- function()
+    {
+        new_handleCalled <<- TRUE
+        list()
+    }
+    curlNamespace$new_handle <- newHandleFun
+    lockBinding("new_handle", curlNamespace)
+
+    handle_setheadersCalled <- FALSE
+    unlockBinding("handle_setheaders", curlNamespace)
+    newHandleSetHeaders <- function(h, .list) handle_setheadersCalled <<- TRUE
+    curlNamespace$handle_setheaders <- newHandleSetHeaders
+    lockBinding("handle_setheaders", curlNamespace)
+
+    handle_setoptCalled <- FALSE
+    unlockBinding("handle_setopt", curlNamespace)
+    newHandleSetOpt <- function(h, ssl_verifypeer) handle_setoptCalled <<- TRUE
+    curlNamespace$handle_setopt <- newHandleSetOpt
+    lockBinding("handle_setopt", curlNamespace)
+
+
+    Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE)
+    http <- HttpRequest$new()
+    http$getConnection("location", list(), "r")
+
+    expect_that(new_handleCalled, is_true())
+    expect_that(handle_setheadersCalled, is_true())
+    expect_that(handle_setoptCalled, is_true())
+    expect_that(curlCalled, is_true())
+})
index 859b6180f3380c2d834b99e126aa0c7761155368..64988e33db2c3c4614112d2eb993687d6e169199 100644 (file)
@@ -22,7 +22,7 @@ test_that("getWebDavHostName calls REST service properly", {
     expect_that(httpRequest$URLIsProperlyConfigured, is_true())
     expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
     expect_that(httpRequest$numberOfGETRequests, equals(1))
-}) 
+})
 
 test_that("getWebDavHostName returns webDAV host name properly", {
 
@@ -32,8 +32,8 @@ test_that("getWebDavHostName returns webDAV host name properly", {
     REST <- RESTService$new("token", "host",
                             httpRequest, FakeHttpParser$new())
 
-    expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName())) 
-}) 
+    expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName()))
+})
 
 test_that("create calls REST service properly", {
 
@@ -51,7 +51,7 @@ test_that("create calls REST service properly", {
     expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(fakeHttp$numberOfPUTRequests, equals(1))
-}) 
+})
 
 test_that("create raises exception if server response code is not between 200 and 300", {
 
@@ -60,13 +60,13 @@ test_that("create raises exception if server response code is not between 200 an
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$create("file", uuid),
                 throws_error("Server code: 404"))
-}) 
+})
 
 test_that("delete calls REST service properly", {
 
@@ -75,7 +75,7 @@ test_that("delete calls REST service properly", {
     fakeHttp <- FakeHttpRequest$new(expectedURL)
     fakeHttpParser <- FakeHttpParser$new()
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, fakeHttpParser,
                             0, "https://webDavHost/")
 
@@ -84,7 +84,7 @@ test_that("delete calls REST service properly", {
     expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(fakeHttp$numberOfDELETERequests, equals(1))
-}) 
+})
 
 test_that("delete raises exception if server response code is not between 200 and 300", {
 
@@ -99,7 +99,7 @@ test_that("delete raises exception if server response code is not between 200 an
 
     expect_that(REST$delete("file", uuid),
                 throws_error("Server code: 404"))
-}) 
+})
 
 test_that("move calls REST service properly", {
 
@@ -108,7 +108,7 @@ test_that("move calls REST service properly", {
     fakeHttp <- FakeHttpRequest$new(expectedURL)
     fakeHttpParser <- FakeHttpParser$new()
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, fakeHttpParser,
                             0, "https://webDavHost/")
 
@@ -118,7 +118,7 @@ test_that("move calls REST service properly", {
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(fakeHttp$requestHeaderContainsDestinationField, is_true())
     expect_that(fakeHttp$numberOfMOVERequests, equals(1))
-}) 
+})
 
 test_that("move raises exception if server response code is not between 200 and 300", {
 
@@ -127,13 +127,47 @@ test_that("move raises exception if server response code is not between 200 and
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$move("file", "newDestination/file", uuid),
                 throws_error("Server code: 404"))
-}) 
+})
+
+test_that("copy calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$copy("file", "newDestination/file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsDestinationField, is_true())
+    expect_that(fakeHttp$numberOfCOPYRequests, equals(1))
+})
+
+test_that("copy raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$copy("file", "newDestination/file", uuid),
+                throws_error("Server code: 404"))
+})
 
 test_that("getCollectionContent retreives correct content from WebDAV server", {
 
@@ -145,7 +179,7 @@ test_that("getCollectionContent retreives correct content from WebDAV server", {
 
     fakeHttp <- FakeHttpRequest$new(expectedURL, returnContent)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
@@ -155,7 +189,7 @@ test_that("getCollectionContent retreives correct content from WebDAV server", {
 
     expect_that(returnedContentMatchExpected, is_true())
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
-}) 
+})
 
 test_that("getCollectionContent raises exception if server returns empty response", {
 
@@ -163,26 +197,26 @@ test_that("getCollectionContent raises exception if server returns empty respons
     response <- ""
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$getCollectionContent(uuid),
                 throws_error("Response is empty, request may be misconfigured"))
-}) 
+})
 
 test_that("getCollectionContent parses server response", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
     fakeHttpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             FakeHttpRequest$new(), fakeHttpParser,
                             0, "https://webDavHost/")
 
     REST$getCollectionContent(uuid)
 
     expect_that(fakeHttpParser$parserCallCount, equals(1))
-}) 
+})
 
 test_that("getCollectionContent raises exception if server returns empty response", {
 
@@ -190,13 +224,13 @@ test_that("getCollectionContent raises exception if server returns empty respons
     response <- ""
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$getCollectionContent(uuid),
                 throws_error("Response is empty, request may be misconfigured"))
-}) 
+})
 
 test_that(paste("getCollectionContent raises exception if server",
                 "response code is not between 200 and 300"), {
@@ -206,13 +240,13 @@ test_that(paste("getCollectionContent raises exception if server",
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$getCollectionContent(uuid),
                 throws_error("Server code: 404"))
-}) 
+})
 
 
 test_that("getResourceSize calls REST service properly", {
@@ -235,7 +269,7 @@ test_that("getResourceSize calls REST service properly", {
     expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(returnedContentMatchExpected, is_true())
-}) 
+})
 
 test_that("getResourceSize raises exception if server returns empty response", {
 
@@ -243,13 +277,13 @@ test_that("getResourceSize raises exception if server returns empty response", {
     response <- ""
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$getResourceSize("file", uuid),
                 throws_error("Response is empty, request may be misconfigured"))
-}) 
+})
 
 test_that(paste("getResourceSize raises exception if server",
                 "response code is not between 200 and 300"), {
@@ -259,26 +293,26 @@ test_that(paste("getResourceSize raises exception if server",
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$getResourceSize("file", uuid),
                 throws_error("Server code: 404"))
-}) 
+})
 
 test_that("getResourceSize parses server response", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
     fakeHttpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             FakeHttpRequest$new(), fakeHttpParser,
                             0, "https://webDavHost/")
 
     REST$getResourceSize("file", uuid)
 
     expect_that(fakeHttpParser$parserCallCount, equals(1))
-}) 
+})
 
 test_that("read calls REST service properly", {
 
@@ -290,7 +324,7 @@ test_that("read calls REST service properly", {
 
     fakeHttp <- FakeHttpRequest$new(expectedURL, serverResponse)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
@@ -300,7 +334,7 @@ test_that("read calls REST service properly", {
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(fakeHttp$requestHeaderContainsRangeField, is_true())
     expect_that(returnResult, equals("file content"))
-}) 
+})
 
 test_that("read raises exception if server response code is not between 200 and 300", {
 
@@ -309,48 +343,48 @@ test_that("read raises exception if server response code is not between 200 and
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$read("file", uuid),
                 throws_error("Server code: 404"))
-}) 
+})
 
 test_that("read raises exception if contentType is not valid", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
     fakeHttp <- FakeHttpRequest$new()
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, HttpParser$new(),
                             0, "https://webDavHost/")
 
     expect_that(REST$read("file", uuid, "some invalid content type"),
                 throws_error("Invalid contentType. Please use text or raw."))
-}) 
+})
 
 test_that("read parses server response", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
     fakeHttpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             FakeHttpRequest$new(), fakeHttpParser,
                             0, "https://webDavHost/")
 
     REST$read("file", uuid, "text", 1024, 512)
 
     expect_that(fakeHttpParser$parserCallCount, equals(1))
-}) 
+})
 
 test_that("write calls REST service properly", {
 
-    fileContent <- "new file content" 
+    fileContent <- "new file content"
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
     expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
     fakeHttp <- FakeHttpRequest$new(expectedURL)
 
-    REST <- RESTService$new("token", "https://host/", 
+    REST <- RESTService$new("token", "https://host/",
                             fakeHttp, FakeHttpParser$new(),
                             0, "https://webDavHost/")
 
@@ -360,12 +394,12 @@ test_that("write calls REST service properly", {
     expect_that(fakeHttp$requestBodyIsProvided, is_true())
     expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
     expect_that(fakeHttp$requestHeaderContainsContentTypeField, is_true())
-}) 
+})
 
 test_that("write raises exception if server response code is not between 200 and 300", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
-    fileContent <- "new file content" 
+    fileContent <- "new file content"
     response <- list()
     response$status_code <- 404
     fakeHttp <- FakeHttpRequest$new(serverResponse = response)
@@ -376,4 +410,17 @@ test_that("write raises exception if server response code is not between 200 and
 
     expect_that(REST$write("file", uuid, fileContent, "text/html"),
                 throws_error("Server code: 404"))
-}) 
+})
+
+test_that("getConnection calls REST service properly", {
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttp <- FakeHttpRequest$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    REST$getConnection("file", uuid, "r")
+
+    expect_that(fakeHttp$numberOfgetConnectionCalls, equals(1))
+})
index e025586c58a968f6c0d61a47512087a69d601635..a6e420962bce9777d56d69c4ded58c015986b288 100644 (file)
@@ -15,7 +15,7 @@ test_that("getRelativePath returns path relative to the tree root", {
 
     expect_that(animal$getRelativePath(), equals("animal"))
     expect_that(fish$getRelativePath(), equals("animal/fish"))
-}) 
+})
 
 test_that(paste("getFileListing by default returns sorted path of all files",
                 "relative to the current subcollection"), {
@@ -38,7 +38,7 @@ test_that(paste("getFileListing by default returns sorted path of all files",
                     all(expectedResult == result)
 
     expect_that(resultsMatch, is_true())
-}) 
+})
 
 test_that(paste("getFileListing returns sorted names of all direct children",
                 "if fullPath is set to FALSE"), {
@@ -59,7 +59,7 @@ test_that(paste("getFileListing returns sorted names of all direct children",
                     all(expectedResult == result)
 
     expect_that(resultsMatch, is_true())
-}) 
+})
 
 test_that("add adds content to inside collection tree", {
 
@@ -75,7 +75,7 @@ test_that("add adds content to inside collection tree", {
 
     expect_that(animalContainsFish, is_true())
     expect_that(animalContainsDog, is_true())
-}) 
+})
 
 test_that("add raises exception if content name is empty string", {
 
@@ -86,7 +86,7 @@ test_that("add raises exception if content name is empty string", {
                 throws_error("Content has invalid name.", fixed = TRUE))
 })
 
-test_that(paste("add raises exception if ArvadosFile/Subcollection", 
+test_that(paste("add raises exception if ArvadosFile/Subcollection",
                 "with same name already exists in the subcollection"), {
 
     animal     <- Subcollection$new("animal")
@@ -102,9 +102,9 @@ test_that(paste("add raises exception if ArvadosFile/Subcollection",
     expect_that(animal$add(thirdFish),
                 throws_error(paste("Subcollection already contains ArvadosFile or",
                                    "Subcollection with same name."), fixed = TRUE))
-}) 
+})
 
-test_that(paste("add raises exception if passed argument is", 
+test_that(paste("add raises exception if passed argument is",
                 "not ArvadosFile or Subcollection"), {
 
     animal <- Subcollection$new("animal")
@@ -113,11 +113,11 @@ test_that(paste("add raises exception if passed argument is",
     expect_that(animal$add(number),
                 throws_error(paste("Expected AravodsFile or Subcollection object,",
                                    "got (numeric)."), fixed = TRUE))
-}) 
+})
 
-test_that(paste("add post content to a REST service", 
+test_that(paste("add post content to a REST service",
                 "if subcollection belongs to a collection"), {
-    
+
     collectionContent <- c("animal", "animal/fish")
     fakeREST <- FakeRESTService$new(collectionContent)
 
@@ -131,7 +131,7 @@ test_that(paste("add post content to a REST service",
     animal$add(dog)
 
     expect_that(fakeREST$createCallCount, equals(1))
-}) 
+})
 
 test_that("remove removes content from subcollection", {
 
@@ -144,9 +144,9 @@ test_that("remove removes content from subcollection", {
     returnValueAfterRemovalIsNull <- is.null(animal$get("fish"))
 
     expect_that(returnValueAfterRemovalIsNull, is_true())
-}) 
+})
 
-test_that(paste("remove raises exception", 
+test_that(paste("remove raises exception",
                 "if content to remove doesn't exist in the subcollection"), {
 
     animal <- Subcollection$new("animal")
@@ -154,7 +154,7 @@ test_that(paste("remove raises exception",
     expect_that(animal$remove("fish"),
                 throws_error(paste("Subcollection doesn't contains ArvadosFile",
                                    "or Subcollection with specified name.")))
-}) 
+})
 
 test_that("remove raises exception if passed argument is not character vector", {
 
@@ -164,11 +164,11 @@ test_that("remove raises exception if passed argument is not character vector",
     expect_that(animal$remove(number),
                 throws_error(paste("Expected character,",
                                    "got (numeric)."), fixed = TRUE))
-}) 
+})
 
-test_that(paste("remove removes content from REST service", 
+test_that(paste("remove removes content from REST service",
                 "if subcollection belongs to a collection"), {
-    
+
     collectionContent <- c("animal", "animal/fish", "animal/dog")
     fakeREST <- FakeRESTService$new(collectionContent)
 
@@ -180,9 +180,9 @@ test_that(paste("remove removes content from REST service",
     animal$remove("fish")
 
     expect_that(fakeREST$deleteCallCount, equals(1))
-}) 
+})
 
-test_that(paste("get returns ArvadosFile or Subcollection", 
+test_that(paste("get returns ArvadosFile or Subcollection",
                 "if file or folder with given name exists"), {
 
     animal <- Subcollection$new("animal")
@@ -203,9 +203,9 @@ test_that(paste("get returns ArvadosFile or Subcollection",
 
     expect_that(returnedDogIsArvadosFile, is_true())
     expect_that(returnedDog$getName(), equals("dog"))
-}) 
+})
 
-test_that(paste("get returns NULL if file or folder", 
+test_that(paste("get returns NULL if file or folder",
                 "with given name doesn't exists"), {
 
     animal <- Subcollection$new("animal")
@@ -216,7 +216,7 @@ test_that(paste("get returns NULL if file or folder",
     returnedDogIsNull <- is.null(animal$get("dog"))
 
     expect_that(returnedDogIsNull, is_true())
-}) 
+})
 
 test_that("getFirst returns first child in the subcollection", {
 
@@ -226,7 +226,7 @@ test_that("getFirst returns first child in the subcollection", {
     animal$add(fish)
 
     expect_that(animal$getFirst()$getName(), equals("fish"))
-}) 
+})
 
 test_that("getFirst returns NULL if subcollection contains no children", {
 
@@ -235,7 +235,7 @@ test_that("getFirst returns NULL if subcollection contains no children", {
     returnedElementIsNull <- is.null(animal$getFirst())
 
     expect_that(returnedElementIsNull, is_true())
-}) 
+})
 
 test_that(paste("setCollection by default sets collection",
                 "filed of subcollection and all its children"), {
@@ -248,7 +248,7 @@ test_that(paste("setCollection by default sets collection",
 
     expect_that(animal$getCollection(), equals("myCollection"))
     expect_that(fish$getCollection(), equals("myCollection"))
-}) 
+})
 
 test_that(paste("setCollection sets collection filed of subcollection only",
                 "if parameter setRecursively is set to FALSE"), {
@@ -262,7 +262,7 @@ test_that(paste("setCollection sets collection filed of subcollection only",
 
     expect_that(animal$getCollection(), equals("myCollection"))
     expect_that(fishCollectionIsNull, is_true())
-}) 
+})
 
 test_that(paste("move raises exception if subcollection",
                 "doesn't belong to any collection"), {
@@ -271,7 +271,7 @@ test_that(paste("move raises exception if subcollection",
 
     expect_that(animal$move("new/location"),
                 throws_error("Subcollection doesn't belong to any collection"))
-}) 
+})
 
 test_that("move raises exception if new location contains content with the same name", {
 
@@ -290,7 +290,7 @@ test_that("move raises exception if new location contains content with the same
     expect_that(fish$move("fish"),
                 throws_error("Destination already contains content with same name."))
 
-}) 
+})
 
 test_that(paste("move raises exception if newLocationInCollection",
                 "parameter is invalid"), {
@@ -309,8 +309,8 @@ test_that(paste("move raises exception if newLocationInCollection",
     fish <- collection$get("animal/fish")
 
     expect_that(fish$move("objects/dog"),
-                throws_error("Unable to get destination subcollection"))
-}) 
+                throws_error("Unable to get destination subcollection."))
+})
 
 test_that("move moves subcollection inside collection tree", {
 
@@ -332,7 +332,7 @@ test_that("move moves subcollection inside collection tree", {
 
     expect_that(fishIsNullOnOldLocation, is_true())
     expect_that(fishExistsOnNewLocation, is_true())
-}) 
+})
 
 test_that(paste("getSizeInBytes returns zero if subcollection",
                 "is not part of a collection"), {
@@ -340,7 +340,7 @@ test_that(paste("getSizeInBytes returns zero if subcollection",
     animal <- Subcollection$new("animal")
 
     expect_that(animal$getSizeInBytes(), equals(0))
-}) 
+})
 
 test_that(paste("getSizeInBytes delegates size calculation",
                 "to REST service class"), {
@@ -358,3 +358,87 @@ test_that(paste("getSizeInBytes delegates size calculation",
 
     expect_that(resourceSize, equals(100))
 })
+
+#########################
+test_that(paste("copy raises exception if subcollection",
+                "doesn't belong to any collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$copy("new/location"),
+                throws_error("Subcollection doesn't belong to any collection."))
+})
+
+test_that("copy raises exception if new location contains content with the same name", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$copy("fish"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that(paste("copy raises exception if location parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$copy("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("copy copies subcollection inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fish$copy("fish")
+    fishExistsOnOldLocation <- !is.null(collection$get("animal/fish"))
+    fishExistsOnNewLocation <- !is.null(collection$get("fish"))
+
+    expect_that(fishExistsOnOldLocation, is_true())
+    expect_that(fishExistsOnNewLocation, is_true())
+})
+
+test_that("duplicate performs deep cloning of Subcollection", {
+    foo <- ArvadosFile$new("foo")
+    bar <- ArvadosFile$new("bar")
+    sub <- Subcollection$new("qux")
+    sub$add(foo)
+    sub$add(bar)
+
+    newSub1 <- sub$duplicate()
+    newSub2 <- sub$duplicate("quux")
+
+    expect_that(newSub1$getFileListing(), equals(sub$getFileListing()))
+    expect_that(sort(newSub2$getFileListing()), equals(c("quux/bar", "quux/foo")))
+})
index 9f5e07c1767af6c089274a308dc3dc270fb25c2f..419e8785fdf53d0b7c0ef538e6dc9eeb8f7a8ee0 100644 (file)
@@ -26,7 +26,7 @@ test_that("listAll always returns all resource items from server", {
     result <- listAll(testFunction)
 
     expect_that(length(result), equals(8))
-}) 
+})
 
 test_that("trimFromStart trims string correctly if string starts with trimCharacters", {
 
@@ -36,7 +36,7 @@ test_that("trimFromStart trims string correctly if string starts with trimCharac
     result <- trimFromStart(sample, trimCharacters)
 
     expect_that(result, equals("random"))
-}) 
+})
 
 test_that("trimFromStart returns original string if string doesn't starts with trimCharacters", {
 
@@ -46,7 +46,7 @@ test_that("trimFromStart returns original string if string doesn't starts with t
     result <- trimFromStart(sample, trimCharacters)
 
     expect_that(result, equals("./something/random"))
-}) 
+})
 
 test_that("trimFromEnd trims string correctly if string ends with trimCharacters", {
 
@@ -56,7 +56,7 @@ test_that("trimFromEnd trims string correctly if string ends with trimCharacters
     result <- trimFromEnd(sample, trimCharacters)
 
     expect_that(result, equals("./something"))
-}) 
+})
 
 test_that("trimFromEnd returns original string if string doesn't end with trimCharacters", {
 
@@ -66,11 +66,11 @@ test_that("trimFromEnd returns original string if string doesn't end with trimCh
     result <- trimFromStart(sample, trimCharacters)
 
     expect_that(result, equals("./something/random"))
-}) 
+})
 
 test_that("RListToPythonList converts nested R list to char representation of Python list", {
 
-    sample <- list("insert", list("random", list("text")), list("here")) 
+    sample <- list("insert", list("random", list("text")), list("here"))
 
     result              <- RListToPythonList(sample)
     resultWithSeparator <- RListToPythonList(sample, separator = ",+")
@@ -78,7 +78,7 @@ test_that("RListToPythonList converts nested R list to char representation of Py
     expect_that(result, equals("[\"insert\", [\"random\", \"text\"], \"here\"]"))
     expect_that(resultWithSeparator,
                 equals("[\"insert\",+[\"random\",+\"text\"],+\"here\"]"))
-}) 
+})
 
 test_that("appendToStartIfNotExist appends characters to beginning of a string", {
 
@@ -88,7 +88,7 @@ test_that("appendToStartIfNotExist appends characters to beginning of a string",
     result <- appendToStartIfNotExist(sample, charactersToAppend)
 
     expect_that(result, equals("Happy New Year"))
-}) 
+})
 
 test_that(paste("appendToStartIfNotExist returns original string if string",
                 "doesn't start with specified characters"), {
@@ -99,7 +99,7 @@ test_that(paste("appendToStartIfNotExist returns original string if string",
     result <- appendToStartIfNotExist(sample, charactersToAppend)
 
     expect_that(result, equals("Happy New Year"))
-}) 
+})
 
 test_that(paste("splitToPathAndName splits relative path to file/folder",
                 "name and rest of the path"), {
@@ -110,4 +110,4 @@ test_that(paste("splitToPathAndName splits relative path to file/folder",
 
     expect_that(result$name, equals("file.exe"))
     expect_that(result$path, equals("path/to/my"))
-}) 
+})
index fd48b4852df4f1223eb7ce6fc125fc2234e78f6d..723b5166865ab6b272dbb885b92f73c009125141 100644 (file)
@@ -30,15 +30,15 @@ Gem::Specification.new do |s|
   s.executables << "arv-crunch-job"
   s.executables << "arv-tag"
   s.required_ruby_version = '>= 2.1.0'
-  s.add_runtime_dependency 'arvados', '~> 0.1', '>= 0.1.20150128223554'
+  s.add_runtime_dependency 'arvados', '~> 1.2.0', '>= 1.2.0'
   # Our google-api-client dependency used to be < 0.9, but that could be
   # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
-  s.add_runtime_dependency 'google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
+  s.add_runtime_dependency 'cure-google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
   s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 5'
   s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
-  s.add_runtime_dependency 'trollop', '~> 2.0'
+  s.add_runtime_dependency 'optimist', '~> 3.0'
   s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
-  s.add_runtime_dependency 'oj', '~> 2.0', '>= 2.0.3'
+  s.add_runtime_dependency 'oj', '~> 3.0'
   s.add_runtime_dependency 'curb', '~> 0.8'
   s.homepage    =
     'https://arvados.org'
index 9783af202fffd029e5921d99002990eabbb466bb..7110b4b991ab41da1f77fc256ed67b7eadd3daef 100755 (executable)
@@ -35,7 +35,7 @@ begin
   require 'andand'
   require 'curb'
   require 'oj'
-  require 'trollop'
+  require 'optimist'
 rescue LoadError => error
   abort <<-EOS
 
@@ -43,7 +43,7 @@ Error loading gems: #{error}
 
 Please install all required gems:
 
-  gem install arvados activesupport andand curb json oj trollop
+  gem install arvados activesupport andand curb json oj optimist
 
   EOS
 end
@@ -375,7 +375,7 @@ end
 
 def arv_create client, arvados, global_opts, remaining_opts
   types = resource_types(arvados.discovery_document)
-  create_opts = Trollop::options do
+  create_opts = Optimist::options do
     opt :project_uuid, "Project uuid in which to create the object", :type => :string
     stop_on resource_types(arvados.discovery_document)
   end
@@ -392,7 +392,7 @@ def arv_create client, arvados, global_opts, remaining_opts
   rsc = rsc.first
 
   discovered_params = arvados.discovery_document["resources"][rsc]["methods"]["create"]["parameters"]
-  method_opts = Trollop::options do
+  method_opts = Optimist::options do
     banner head_banner
     banner "Usage: arv create [--project-uuid] #{object_type} [create parameters]"
     banner ""
@@ -402,7 +402,7 @@ def arv_create client, arvados, global_opts, remaining_opts
       opts = Hash.new()
       opts[:type] = v["type"].to_sym if v.include?("type")
       if [:datetime, :text, :object, :array].index opts[:type]
-        opts[:type] = :string                       # else trollop bork
+        opts[:type] = :string                       # else optimist bork
       end
       opts[:default] = v["default"] if v.include?("default")
       opts[:default] = v["default"].to_i if opts[:type] == :integer
@@ -483,7 +483,7 @@ end
 def parse_arguments(discovery_document, subcommands)
   resources_and_subcommands = resource_types(discovery_document) + subcommands
 
-  option_parser = Trollop::Parser.new do
+  option_parser = Optimist::Parser.new do
     version __FILE__
     banner head_banner
     banner "Usage: arv [--flags] subcommand|resource [method] [--parameters]"
@@ -513,7 +513,7 @@ def parse_arguments(discovery_document, subcommands)
     stop_on resources_and_subcommands
   end
 
-  global_opts = Trollop::with_standard_exception_handling option_parser do
+  global_opts = Optimist::with_standard_exception_handling option_parser do
     o = option_parser.parse ARGV
   end
 
@@ -544,7 +544,7 @@ def parse_arguments(discovery_document, subcommands)
     discovered_params = discovery_document\
     ["resources"][resource.pluralize]\
     ["methods"][method]["parameters"]
-    method_opts = Trollop::options do
+    method_opts = Optimist::options do
       banner head_banner
       banner "Usage: arv #{resource} #{method} [--parameters]"
       banner ""
@@ -554,7 +554,7 @@ def parse_arguments(discovery_document, subcommands)
         opts = Hash.new()
         opts[:type] = v["type"].to_sym if v.include?("type")
         if [:datetime, :text, :object, :array].index opts[:type]
-          opts[:type] = :string                       # else trollop bork
+          opts[:type] = :string                       # else optimist bork
         end
         opts[:default] = v["default"] if v.include?("default")
         opts[:default] = v["default"].to_i if opts[:type] == :integer
@@ -672,7 +672,7 @@ if resource_body
   elsif resource_body_is_readable_file
     resource_body = resource_body_file.read()
     begin
-      # we don't actually need the results of the parsing, 
+      # we don't actually need the results of the parsing,
       # just checking for the JSON::ParserError exception
       JSON.parse resource_body
     rescue JSON::ParserError => e
index b66e9c0526e3a9b7926b381d9fee7ec8cbb6b901..336b1a2c74500b7125fec392717dd1c557e5f2bf 100755 (executable)
@@ -17,14 +17,14 @@ begin
   require 'rubygems'
   require 'json'
   require 'pp'
-  require 'trollop'
+  require 'optimist'
   require 'google/api_client'
 rescue LoadError => l
   $stderr.puts $:
   abort <<-EOS
 #{$0}: fatal: #{l.message}
 Some runtime dependencies may be missing.
-Try: gem install arvados pp google-api-client json trollop
+Try: gem install arvados pp google-api-client json optimist
   EOS
 end
 
@@ -35,7 +35,7 @@ end
 # Parse command line options (the kind that control the behavior of
 # this program, that is, not the pipeline component parameters).
 
-p = Trollop::Parser.new do
+p = Optimist::Parser.new do
   version __FILE__
   banner(<<EOF)
 
@@ -122,7 +122,7 @@ EOF
       type: :string)
   stop_on [:'--']
 end
-$options = Trollop::with_standard_exception_handling p do
+$options = Optimist::with_standard_exception_handling p do
   p.parse ARGV
 end
 $debuglevel = $options[:debug_level] || ($options[:debug] && 1) || 0
index b1783bccf3f277cd705b5012b382182cb3e6b3ad..f709020fc75e91a3449adceabfebd31e02dca13d 100755 (executable)
@@ -113,11 +113,11 @@ begin
   require 'json'
   require 'pp'
   require 'oj'
-  require 'trollop'
+  require 'optimist'
 rescue LoadError
   abort <<-EOS
 #{$0}: fatal: some runtime dependencies are missing.
-Try: gem install pp google-api-client json trollop
+Try: gem install pp google-api-client json optimist
   EOS
 end
 
@@ -156,7 +156,7 @@ class Google::APIClient
   end
 end
 
-global_opts = Trollop::options do
+global_opts = Optimist::options do
   banner usage_string
   banner ""
   opt :dry_run, "Don't actually do anything", :short => "-n"
@@ -169,7 +169,7 @@ global_opts = Trollop::options do
   stop_on ['add', 'remove']
 end
 
-p = Trollop::Parser.new do
+p = Optimist::Parser.new do
   opt(:all,
       "Remove this tag from all objects under your ownership. Only valid with `tag remove'.",
       :short => :none)
@@ -180,7 +180,7 @@ p = Trollop::Parser.new do
       :short => :o)
 end
 
-$options = Trollop::with_standard_exception_handling p do
+$options = Optimist::with_standard_exception_handling p do
   p.parse ARGV
 end
 
index 9343fcfbfd2f97bc182daa788f5c45f74b8ae078..b8afe638ac3c6a517058fd3e85a49b90607f150c 100755 (executable)
@@ -132,6 +132,7 @@ my $resume_stash;
 my $cgroup_root = "/sys/fs/cgroup";
 my $docker_bin = "docker.io";
 my $docker_run_args = "";
+my $srun_sync_timeout = 15*60;
 GetOptions('force-unlock' => \$force_unlock,
            'git-dir=s' => \$git_dir,
            'job=s' => \$jobspec,
@@ -141,6 +142,7 @@ GetOptions('force-unlock' => \$force_unlock,
            'cgroup-root=s' => \$cgroup_root,
            'docker-bin=s' => \$docker_bin,
            'docker-run-args=s' => \$docker_run_args,
+           'srun-sync-timeout=i' => \$srun_sync_timeout,
     );
 
 if (defined $job_api_token) {
@@ -2007,6 +2009,8 @@ sub srun_sync
   my ($stdout_r, $stdout_w);
   pipe $stdout_r, $stdout_w or croak("pipe() failed: $!");
 
+  my $started_srun = scalar time;
+
   my $srunpid = fork();
   if ($srunpid == 0)
   {
@@ -2050,6 +2054,12 @@ sub srun_sync
     if (!$busy) {
       select(undef, undef, undef, 0.1);
     }
+    if (($started_srun + $srun_sync_timeout) < scalar time) {
+      # Exceeded general timeout for "srun_sync" operations, likely
+      # means something got stuck on the remote node.
+      Log(undef, "srun_sync exceeded timeout, will fail.");
+      $main::please_freeze = 1;
+    }
     killem(keys %proc) if $main::please_freeze;
   }
   my $exited = $?;
index 131795ee2c0173703a7385c8676d11536ff17398..da24dc48465426ced81d1d37311b271bf45132a4 100644 (file)
@@ -135,6 +135,21 @@ class ArvCwlRunner(object):
             else:
                 raise Exception("Unsupported API '%s', expected one of %s" % (work_api, expected_api))
 
+        if self.work_api == "jobs":
+            logger.warn("""
+*******************************
+Using the deprecated 'jobs' API.
+
+To get rid of this warning:
+
+Users: read about migrating at
+http://doc.arvados.org/user/cwl/cwl-style.html#migrate
+and use the option --api=containers
+
+Admins: configure the cluster to disable the 'jobs' API as described at:
+http://doc.arvados.org/install/install-api-server.html#disable_api_methods
+*******************************""")
+
         self.loadingContext = ArvLoadingContext(vars(arvargs))
         self.loadingContext.fetcher_constructor = self.fetcher_constructor
         self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
@@ -528,7 +543,7 @@ class ArvCwlRunner(object):
             runnerjob.run(submitargs)
             return (runnerjob.uuid, "success")
 
-        self.poll_api = arvados.api('v1')
+        self.poll_api = arvados.api('v1', timeout=kwargs["http_timeout"])
         self.polling_thread = threading.Thread(target=self.poll_states)
         self.polling_thread.start()
 
@@ -757,6 +772,9 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     parser.add_argument("--thread-count", type=int,
                         default=4, help="Number of threads to use for job submit and output collection.")
 
+    parser.add_argument("--http-timeout", type=int,
+                        default=5*60, dest="http_timeout", help="Http timeout. Default is 5 minutes.")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--trash-intermediate", action="store_true",
                         default=False, dest="trash_intermediate",
@@ -829,7 +847,9 @@ def main(args, stdout, stderr, api_client=None, keep_client=None,
 
     try:
         if api_client is None:
-            api_client = arvados.safeapi.ThreadSafeApiCache(api_params={"model": OrderedJsonModel()}, keep_params={"num_retries": 4})
+            api_client = arvados.safeapi.ThreadSafeApiCache(
+                api_params={"model": OrderedJsonModel(), "timeout": arvargs.http_timeout},
+                keep_params={"num_retries": 4})
             keep_client = api_client.keep
             # Make an API object now so errors are reported early.
             api_client.users().current().execute()
index 948a9a46feab30bf3f8759fee94d81d14205e42d..49c40b1daefd6262f10e7019f55d24baa36b06d2 100644 (file)
@@ -488,7 +488,7 @@ class RunnerContainer(Runner):
         self.uuid = response["uuid"]
         self.arvrunner.process_submitted(self)
 
-        logger.info("%s submitted container %s", self.arvrunner.label(self), response["uuid"])
+        logger.info("%s submitted container_request %s", self.arvrunner.label(self), response["uuid"])
 
     def done(self, record):
         try:
index 15689a9010934cf2b8847ec08825cf30bd3e13eb..316a652529b384205661827e2c46d056025d5506 100644 (file)
@@ -83,7 +83,7 @@ class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
         p = sp[0]
         if p.startswith("keep:") and arvados.util.keep_locator_pattern.match(p[5:]):
             pdh = p[5:]
-            return (self.collection_cache.get(pdh), sp[1] if len(sp) == 2 else None)
+            return (self.collection_cache.get(pdh), urlparse.unquote(sp[1]) if len(sp) == 2 else None)
         else:
             return (None, path)
 
@@ -139,6 +139,17 @@ class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
         else:
             return super(CollectionFsAccess, self).exists(fn)
 
+    def size(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            if rest:
+                arvfile = collection.find(rest)
+                if isinstance(arvfile, arvados.arvfile.ArvadosFile):
+                    return arvfile.size()
+            raise IOError(errno.EINVAL, "Not a path to a file %s" % (fn))
+        else:
+            return super(CollectionFsAccess, self).size(fn)
+
     def isfile(self, fn):  # type: (unicode) -> bool
         collection, rest = self.get_collection(fn)
         if collection is not None:
index 3ad1aa6a704632a945b2ed059c10f40a87cdb578..29c0535d93c8a0ff164af01949ae152f056591d7 100644 (file)
@@ -129,6 +129,8 @@ def upload_dependencies(arvrunner, name, document_loader,
 
     sc = []
     def only_real(obj):
+        # Only interested in local files than need to be uploaded,
+        # don't include file literals, keep references, etc.
         if obj.get("location", "").startswith("file:"):
             sc.append(obj)
 
@@ -168,8 +170,13 @@ def upload_dependencies(arvrunner, name, document_loader,
 
     visit_class(workflowobj, ("CommandLineTool", "Workflow"), discover_default_secondary_files)
 
-    for d in discovered:
-        sc.extend(discovered[d])
+    for d in list(discovered.keys()):
+        # Only interested in discovered secondaryFiles which are local
+        # files that need to be uploaded.
+        if d.startswith("file:"):
+            sc.extend(discovered[d])
+        else:
+            del discovered[d]
 
     mapper = ArvPathMapper(arvrunner, sc, "",
                            "keep:%s",
index 0cab074d9a8a9755f941c6a59e226d4bd9d1e5f3..2b7b31b9f3f4b4070cbd14d986ffe87259989200 100644 (file)
@@ -33,10 +33,12 @@ setup(name='arvados-cwl-runner',
       # Note that arvados/build/run-build-packages.sh looks at this
       # file to determine what version of cwltool and schema-salad to build.
       install_requires=[
-          'cwltool==1.0.20180615183820',
-          'schema-salad==2.7.20180501211602',
-          'typing >= 3.5.3',
-          'ruamel.yaml >=0.13.11, <0.15',
+          'cwltool==1.0.20180806194258',
+          'schema-salad==2.7.20180719125426',
+          'typing >= 3.6.4',
+          # Need to limit ruamel.yaml version to 0.15.26 because of bug
+          # https://bitbucket.org/ruamel/yaml/issues/227/regression-parsing-flow-mapping
+          'ruamel.yaml >=0.13.11, <= 0.15.26',
           'arvados-python-client>=1.1.4.20180607143841',
           'setuptools',
           'ciso8601 >=1.0.6, <2.0.0',
diff --git a/sdk/cwl/tests/13931-size-job.yml b/sdk/cwl/tests/13931-size-job.yml
new file mode 100644 (file)
index 0000000..97b46dd
--- /dev/null
@@ -0,0 +1,3 @@
+fastq1:
+  class: File
+  location: keep:20850f01122e860fb878758ac1320877+71/sample1_S01_R1_001.fastq.gz
\ No newline at end of file
diff --git a/sdk/cwl/tests/13931-size.cwl b/sdk/cwl/tests/13931-size.cwl
new file mode 100644 (file)
index 0000000..aed1bd6
--- /dev/null
@@ -0,0 +1,10 @@
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  fastq1: File
+outputs:
+  out: stdout
+baseCommand: echo
+arguments:
+  - $(inputs.fastq1.size)
+stdout: size.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/13976-keepref-wf.cwl b/sdk/cwl/tests/13976-keepref-wf.cwl
new file mode 100644 (file)
index 0000000..7aa7b0a
--- /dev/null
@@ -0,0 +1,17 @@
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  - class: InlineJavascriptRequirement
+arguments:
+  - ls
+  - -l
+  - $(inputs.hello)
+inputs:
+  hello:
+    type: File
+    default:
+      class: File
+      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
+    secondaryFiles:
+      - .idx
+outputs: []
\ No newline at end of file
index 4869e3e524153af30feb6a654e65e2cac6c57f3f..8635aae65507fadb6be76d27156167855440ac68 100755 (executable)
@@ -12,4 +12,8 @@ fi
 if ! arv-get 4d8a70b1e63b2aad6984e40e338e2373+69 > /dev/null ; then
     arv-put --portable-data-hash secondaryFiles/hello.txt*
 fi
+if ! arv-get 20850f01122e860fb878758ac1320877+71 > /dev/null ; then
+    arv-put --portable-data-hash samples/sample1_S01_R1_001.fastq.gz
+fi
+
 exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
index 8eac71886cbf643ca97db1e033b9ba2808b40137..e51c7a2531dbea456112ae577ec4698c88883e09 100644 (file)
   tool: 12418-glob-empty-collection.cwl
   doc: "Test glob output on empty collection"
 
+- job: null
+  output:
+    out: null
+  tool: 13976-keepref-wf.cwl
+  doc: "Test issue 13976"
+
 - job: null
   output:
     out: out
     out: null
   tool: wf-defaults/wf7.cwl
   doc: workflow level default in RunInSingleContainer
+
+- job: 13931-size-job.yml
+  output:
+    "out": {
+        "checksum": "sha1$5bf6e5357bd42a6b1d2a3a040e16a91490064d26",
+        "location": "size.txt",
+        "class": "File",
+        "size": 3
+    }
+  tool: 13931-size.cwl
+  doc: Test that size is set for files in Keep
index ae234414a3df90888cfbe9028c06aa5efbba9f55..69f3ae046e31ca3e02aa52be7967e43da2394d00 100644 (file)
@@ -5,6 +5,7 @@
 import arvados_cwl
 import arvados_cwl.context
 from arvados_cwl.arvdocker import arv_docker_clear_cache
+import arvados.config
 import logging
 import mock
 import unittest
@@ -21,6 +22,32 @@ if not os.getenv('ARVADOS_DEBUG'):
     logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
     logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
 
+class CollectionMock(object):
+    def __init__(self, vwdmock, *args, **kwargs):
+        self.vwdmock = vwdmock
+        self.count = 0
+
+    def open(self, *args, **kwargs):
+        self.count += 1
+        return self.vwdmock.open(*args, **kwargs)
+
+    def copy(self, *args, **kwargs):
+        self.count += 1
+        self.vwdmock.copy(*args, **kwargs)
+
+    def save_new(self, *args, **kwargs):
+        pass
+
+    def __len__(self):
+        return self.count
+
+    def portable_data_hash(self):
+        if self.count == 0:
+            return arvados.config.EMPTY_BLOCK_LOCATOR
+        else:
+            return "99999999999999999999999999999996+99"
+
+
 class TestContainer(unittest.TestCase):
 
     def helper(self, runner, enable_reuse=True):
@@ -231,8 +258,7 @@ class TestContainer(unittest.TestCase):
         runner.fs_access.get_collection.side_effect = get_collection_mock
 
         vwdmock = mock.MagicMock()
-        collection_mock.return_value = vwdmock
-        vwdmock.portable_data_hash.return_value = "99999999999999999999999999999996+99"
+        collection_mock.side_effect = lambda *args, **kwargs: CollectionMock(vwdmock, *args, **kwargs)
 
         tool = cmap({
             "inputs": [],
@@ -481,7 +507,8 @@ class TestContainer(unittest.TestCase):
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
-            "portable_data_hash": "99999999999999999999999999999993+99"}
+            "portable_data_hash": "99999999999999999999999999999994+99",
+            "manifest_text": ". 99999999999999999999999999999994+99 0:0:file1 0:0:file2"}
 
         document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
index c110bc5d53cd4634656d93fab2937954be973d07..20efe1513981585b3c699f73d0dbba6994f7c682 100644 (file)
@@ -19,6 +19,7 @@ from schema_salad.ref_resolver import Loader
 from schema_salad.sourceline import cmap
 from .mock_discovery import get_rootDesc
 from .matcher import JsonDiffMatcher, StripYAMLComments
+from .test_container import CollectionMock
 
 if not os.getenv('ARVADOS_DEBUG'):
     logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
@@ -388,7 +389,9 @@ class TestWorkflow(unittest.TestCase):
         tool, metadata = loadingContext.loader.resolve_ref("tests/wf/scatter2.cwl")
         metadata["cwlVersion"] = tool["cwlVersion"]
 
-        mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
+        mockc = mock.MagicMock()
+        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mockc, *args, **kwargs)
+        mockcollectionreader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "token.txt")
 
         arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
         arvtool.formatgraph = None
@@ -411,8 +414,8 @@ class TestWorkflow(unittest.TestCase):
                         'HOME': '$(task.outdir)',
                         'TMPDIR': '$(task.tmpdir)'},
                                'task.vwd': {
-                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999999+118/workflow.cwl',
-                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999999+118/cwl.input.yml'
+                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
                                },
                     'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
                     'task.stdout': 'cwl.output.json'}]},
@@ -429,13 +432,14 @@ class TestWorkflow(unittest.TestCase):
                      ['docker_image_locator', 'in docker', 'arvados/jobs']],
             find_or_create=True)
 
-        mockcollection().open().__enter__().write.assert_has_calls([mock.call(subwf)])
-        mockcollection().open().__enter__().write.assert_has_calls([mock.call(
+        mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])
+        mockc.open().__enter__().write.assert_has_calls([mock.call(
 '''{
   "fileblub": {
     "basename": "token.txt",
     "class": "File",
-    "location": "/keep/99999999999999999999999999999999+118/token.txt"
+    "location": "/keep/99999999999999999999999999999999+118/token.txt",
+    "size": 0
   },
   "sleeptime": 5
 }''')])
@@ -467,7 +471,7 @@ class TestWorkflow(unittest.TestCase):
         tool, metadata = loadingContext.loader.resolve_ref("tests/wf/echo-wf.cwl")
         metadata["cwlVersion"] = tool["cwlVersion"]
 
-        mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
+        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mock.MagicMock(), *args, **kwargs)
 
         arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
         arvtool.formatgraph = None
@@ -489,8 +493,8 @@ class TestWorkflow(unittest.TestCase):
                         'HOME': '$(task.outdir)',
                         'TMPDIR': '$(task.tmpdir)'},
                                'task.vwd': {
-                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999999+118/workflow.cwl',
-                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999999+118/cwl.input.yml'
+                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
                                },
                     'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
                     'task.stdout': 'cwl.output.json'}]},
index cd46251300dfb95862cb7957f510e108dd78b281..8875b7d954d916e332175b5dbdd67103833719b1 100644 (file)
@@ -132,7 +132,8 @@ def stubs(func):
                     "listing": [{
                         "basename": "renamed.txt",
                         "class": "File",
-                        "location": "keep:99999999999999999999999999999998+99/file1.txt"
+                        "location": "keep:99999999999999999999999999999998+99/file1.txt",
+                        "size": 0
                     }],
                     'class': 'Directory'
                 },
@@ -164,7 +165,8 @@ def stubs(func):
                                   {
                                       'basename': 'renamed.txt',
                                       'class': 'File', 'location':
-                                      'keep:99999999999999999999999999999998+99/file1.txt'
+                                      'keep:99999999999999999999999999999998+99/file1.txt',
+                                      'size': 0
                                   }
                               ]}},
                         'cwl:tool': '3fffdeaa75e018172e1b583425f4ebff+60/workflow.cwl#main',
@@ -225,7 +227,8 @@ def stubs(func):
                         'z': {'basename': 'anonymous', 'class': 'Directory', 'listing': [
                             {'basename': 'renamed.txt',
                              'class': 'File',
-                             'location': 'keep:99999999999999999999999999999998+99/file1.txt'
+                             'location': 'keep:99999999999999999999999999999998+99/file1.txt',
+                             'size': 0
                             }
                         ]}
                     },
@@ -286,14 +289,14 @@ class TestSubmit(unittest.TestCase):
                 'manifest_text':
                 '. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
                 'replication_desired': None,
-                'name': 'submit_tool.cwl dependencies',
-            }), ensure_unique_name=True),
+                'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+            }), ensure_unique_name=False),
             mock.call(body=JsonDiffMatcher({
                 'manifest_text':
                 '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
                 'replication_desired': None,
-                'name': 'submit_wf.cwl input',
-            }), ensure_unique_name=True),
+                'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+            }), ensure_unique_name=False),
             mock.call(body=JsonDiffMatcher({
                 'manifest_text':
                 '. 61df2ed9ee3eb7dd9b799e5ca35305fa+1217 0:1217:workflow.cwl\n',
@@ -482,14 +485,14 @@ class TestSubmit(unittest.TestCase):
                 'manifest_text':
                 '. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
                 'replication_desired': None,
-                'name': 'submit_tool.cwl dependencies',
-            }), ensure_unique_name=True),
+                'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+            }), ensure_unique_name=False),
             mock.call(body=JsonDiffMatcher({
                 'manifest_text':
                 '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
                 'replication_desired': None,
-                'name': 'submit_wf.cwl input',
-            }), ensure_unique_name=True)])
+                'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+            }), ensure_unique_name=False)])
 
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         stubs.api.container_requests().create.assert_called_with(
@@ -781,6 +784,7 @@ class TestSubmit(unittest.TestCase):
     @stubs
     def test_submit_file_keepref(self, stubs, tm, collectionReader):
         capture_stdout = cStringIO.StringIO()
+        collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "blorp.txt")
         exited = arvados_cwl.main(
             ["--submit", "--no-wait", "--api=containers", "--debug",
              "tests/wf/submit_keepref_wf.cwl"],
index 7b3b4503efc239661f5b03b2afb0cfac3ca8cc4d..c84252c7b8c135b0eb6105881dab64f70424006b 100644 (file)
@@ -64,7 +64,8 @@
                                 "class": "File",
                                 "location": "keep:99999999999999999999999999999998+99/file1.txt",
                                 "nameext": ".txt",
-                                "nameroot": "renamed"
+                                "nameroot": "renamed",
+                                "size": 0
                             }
                         ]
                     },
index f9f1e967b94f7e589a60888261eae4a7916a88c1..aa1f18052f8afcbe289da18d597b6e66d62d3db6 100644 (file)
@@ -20,7 +20,7 @@ ENV DEBIAN_FRONTEND noninteractive
 
 RUN apt-get update -q && apt-get install -qy git python-pip python-virtualenv python-dev libcurl4-gnutls-dev libgnutls28-dev nodejs python-pyasn1-modules
 
-RUN pip install -U setuptools
+RUN pip install -U setuptools six
 
 ARG sdk
 ARG runner
index 3343bdb9aa0e4fcec1d478489dbbf387cc0ff1dd..ec0239eb37bf0a45bb715b35eab757c6c94850d5 100644 (file)
@@ -14,3 +14,7 @@ type APIClientAuthorization struct {
 type APIClientAuthorizationList struct {
        Items []APIClientAuthorization `json:"items"`
 }
+
+func (aca APIClientAuthorization) TokenV2() string {
+       return "v2/" + aca.UUID + "/" + aca.APIToken
+}
index 353901855683f296811a42e64b008568071dbdad..6edd18418bb8015087f8b486acf6ee21d2d26db4 100644 (file)
@@ -56,6 +56,27 @@ type Cluster struct {
        NodeProfiles       map[string]NodeProfile
        InstanceTypes      InstanceTypeMap
        HTTPRequestTimeout Duration
+       RemoteClusters     map[string]RemoteCluster
+       PostgreSQL         PostgreSQL
+}
+
+type PostgreSQL struct {
+       Connection     PostgreSQLConnection
+       ConnectionPool int
+}
+
+type PostgreSQLConnection map[string]string
+
+type RemoteCluster struct {
+       // API endpoint host or host:port; default is {id}.arvadosapi.com
+       Host string
+       // Perform a proxy request when a local client requests an
+       // object belonging to this remote.
+       Proxy bool
+       // Scheme, default "https". Can be set to "http" for testing.
+       Scheme string
+       // Disable TLS verify. Can be set to true for testing.
+       Insecure bool
 }
 
 type InstanceType struct {
@@ -172,6 +193,7 @@ func (np *NodeProfile) ServicePorts() map[ServiceName]string {
 }
 
 type SystemServiceInstance struct {
-       Listen string
-       TLS    bool
+       Listen   string
+       TLS      bool
+       Insecure bool
 }
diff --git a/sdk/go/arvados/postgresql.go b/sdk/go/arvados/postgresql.go
new file mode 100644 (file)
index 0000000..47953ce
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "strings"
+
+func (c PostgreSQLConnection) String() string {
+       s := ""
+       for k, v := range c {
+               s += strings.ToLower(k)
+               s += "='"
+               s += strings.Replace(
+                       strings.Replace(v, `\`, `\\`, -1),
+                       `'`, `\'`, -1)
+               s += "' "
+       }
+       return s
+}
index 91da5a3fd62ce6eb099e4ce0c0e206a1220268ae..e3a9f4ae8d892ea03f379e7dad3ded4a6dc951fc 100644 (file)
@@ -173,8 +173,11 @@ func New(c *arvados.Client) (*ArvadosClient, error) {
                ApiServer:   c.APIHost,
                ApiToken:    c.AuthToken,
                ApiInsecure: c.Insecure,
-               Client: &http.Client{Transport: &http.Transport{
-                       TLSClientConfig: MakeTLSConfig(c.Insecure)}},
+               Client: &http.Client{
+                       Timeout: 5 * time.Minute,
+                       Transport: &http.Transport{
+                               TLSClientConfig: MakeTLSConfig(c.Insecure)},
+               },
                External:          false,
                Retries:           2,
                KeepServiceURIs:   c.KeepServiceURIs,
index a434690775089c38a092499ae79f7fa0fcdec0e0..6a4b6232aceb82754dbee606504a8608ba96d054 100644 (file)
@@ -46,6 +46,8 @@ const (
 
        FooCollectionSharingTokenUUID = "zzzzz-gj3su-gf02tdm4g1z3e3u"
        FooCollectionSharingToken     = "iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss"
+
+       WorkflowWithDefinitionYAMLUUID = "zzzzz-7fd4e-validworkfloyml"
 )
 
 // PathologicalManifest : A valid manifest designed to test
index ea492430e41297ddb8465c73b62c477e20af2357..ad1d398c763d7eaacefefcde8993e39044582f2a 100644 (file)
@@ -34,7 +34,7 @@ var EncodeTokenCookie func([]byte) string = base64.URLEncoding.EncodeToString
 // token.
 var DecodeTokenCookie func(string) ([]byte, error) = base64.URLEncoding.DecodeString
 
-// LoadTokensFromHttpRequest loads all tokens it can find in the
+// LoadTokensFromHTTPRequest loads all tokens it can find in the
 // headers and query string of an http query.
 func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
        // Load plain token from "Authorization: OAuth2 ..." header
@@ -83,7 +83,21 @@ func (a *Credentials) loadTokenFromCookie(r *http.Request) {
        a.Tokens = append(a.Tokens, string(token))
 }
 
-// TODO: LoadTokensFromHttpRequestBody(). We can't assume in
-// LoadTokensFromHttpRequest() that [or how] we should read and parse
-// the request body. This has to be requested explicitly by the
-// application.
+// LoadTokensFromHTTPRequestBody() loads credentials from the request
+// body.
+//
+// This is separate from LoadTokensFromHTTPRequest() because it's not
+// always desirable to read the request body. This has to be requested
+// explicitly by the application.
+func (a *Credentials) LoadTokensFromHTTPRequestBody(r *http.Request) error {
+       if r.Header.Get("Content-Type") != "application/x-www-form-urlencoded" {
+               return nil
+       }
+       if err := r.ParseForm(); err != nil {
+               return err
+       }
+       if t := r.PostFormValue("api_token"); t != "" {
+               a.Tokens = append(a.Tokens, t)
+       }
+       return nil
+}
diff --git a/sdk/go/auth/salt.go b/sdk/go/auth/salt.go
new file mode 100644 (file)
index 0000000..667a30f
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package auth
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "errors"
+       "fmt"
+       "io"
+       "regexp"
+       "strings"
+)
+
+var (
+       reObsoleteToken  = regexp.MustCompile(`^[0-9a-z]{41,}$`)
+       ErrObsoleteToken = errors.New("obsolete token format")
+       ErrTokenFormat   = errors.New("badly formatted token")
+       ErrSalted        = errors.New("token already salted")
+)
+
+func SaltToken(token, remote string) (string, error) {
+       parts := strings.Split(token, "/")
+       if len(parts) < 3 || parts[0] != "v2" {
+               if reObsoleteToken.MatchString(token) {
+                       return "", ErrObsoleteToken
+               } else {
+                       return "", ErrTokenFormat
+               }
+       }
+       uuid := parts[1]
+       secret := parts[2]
+       if len(secret) != 40 {
+               // not already salted
+               hmac := hmac.New(sha1.New, []byte(secret))
+               io.WriteString(hmac, remote)
+               secret = fmt.Sprintf("%x", hmac.Sum(nil))
+               return "v2/" + uuid + "/" + secret, nil
+       } else if strings.HasPrefix(uuid, remote) {
+               // already salted for the desired remote
+               return token, nil
+       } else {
+               // salted for a different remote, can't be used
+               return "", ErrSalted
+       }
+}
index 3289c67b013f37a67ae8ddeaa52d3fd74abe34e5..c8fb5aeb37e97eb9a43846301e2a07ea63e28ae7 100644 (file)
@@ -9,12 +9,12 @@ package dispatch
 import (
        "context"
        "fmt"
-       "log"
        "sync"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "github.com/Sirupsen/logrus"
 )
 
 const (
@@ -25,10 +25,21 @@ const (
        Cancelled = arvados.ContainerStateCancelled
 )
 
+type Logger interface {
+       Printf(string, ...interface{})
+       Warnf(string, ...interface{})
+       Debugf(string, ...interface{})
+}
+
 // Dispatcher struct
 type Dispatcher struct {
        Arv *arvadosclient.ArvadosClient
 
+       Logger Logger
+
+       // Batch size for container queries
+       BatchSize int64
+
        // Queue polling frequency
        PollPeriod time.Duration
 
@@ -62,6 +73,10 @@ type DispatchFunc func(*Dispatcher, arvados.Container, <-chan arvados.Container)
 // dispatcher's token. When a new one appears, Run calls RunContainer
 // in a new goroutine.
 func (d *Dispatcher) Run(ctx context.Context) error {
+       if d.Logger == nil {
+               d.Logger = logrus.StandardLogger()
+       }
+
        err := d.Arv.Call("GET", "api_client_authorizations", "", "current", nil, &d.auth)
        if err != nil {
                return fmt.Errorf("error getting my token UUID: %v", err)
@@ -72,6 +87,10 @@ func (d *Dispatcher) Run(ctx context.Context) error {
        poll := time.NewTicker(d.PollPeriod)
        defer poll.Stop()
 
+       if d.BatchSize == 0 {
+               d.BatchSize = 100
+       }
+
        for {
                select {
                case <-poll.C:
@@ -135,7 +154,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
                // Containers that I know about that didn't show up in any
                // query should be let go.
                for uuid, tracker := range todo {
-                       log.Printf("Container %q not returned by any query, stopping tracking.", uuid)
+                       d.Logger.Printf("Container %q not returned by any query, stopping tracking.", uuid)
                        tracker.close()
                }
 
@@ -145,7 +164,10 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 // Start a runner in a new goroutine, and send the initial container
 // record to its updates channel.
 func (d *Dispatcher) start(c arvados.Container) *runTracker {
-       tracker := &runTracker{updates: make(chan arvados.Container, 1)}
+       tracker := &runTracker{
+               updates: make(chan arvados.Container, 1),
+               logger:  d.Logger,
+       }
        tracker.updates <- c
        go func() {
                d.RunContainer(d, c, tracker.updates)
@@ -159,22 +181,44 @@ func (d *Dispatcher) start(c arvados.Container) *runTracker {
 }
 
 func (d *Dispatcher) checkForUpdates(filters [][]interface{}, todo map[string]*runTracker) bool {
+       var countList arvados.ContainerList
        params := arvadosclient.Dict{
                "filters": filters,
+               "count":   "exact",
+               "limit":   0,
                "order":   []string{"priority desc"}}
-
-       var list arvados.ContainerList
-       for offset, more := 0, true; more; offset += len(list.Items) {
+       err := d.Arv.List("containers", params, &countList)
+       if err != nil {
+               d.Logger.Warnf("error getting count of containers: %q", err)
+               return false
+       }
+       itemsAvailable := countList.ItemsAvailable
+       params = arvadosclient.Dict{
+               "filters": filters,
+               "count":   "none",
+               "limit":   d.BatchSize,
+               "order":   []string{"priority desc"}}
+       offset := 0
+       for {
                params["offset"] = offset
+
+               // This list variable must be a new one declared
+               // inside the loop: otherwise, items in the API
+               // response would get deep-merged into the items
+               // loaded in previous iterations.
+               var list arvados.ContainerList
+
                err := d.Arv.List("containers", params, &list)
                if err != nil {
-                       log.Printf("Error getting list of containers: %q", err)
+                       d.Logger.Warnf("error getting list of containers: %q", err)
                        return false
                }
-               more = len(list.Items) > 0 && list.ItemsAvailable > len(list.Items)+offset
                d.checkListForUpdates(list.Items, todo)
+               offset += len(list.Items)
+               if len(list.Items) == 0 || itemsAvailable <= offset {
+                       return true
+               }
        }
-       return true
 }
 
 func (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo map[string]*runTracker) {
@@ -189,7 +233,7 @@ func (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo ma
                delete(todo, c.UUID)
 
                if c.LockedByUUID != "" && c.LockedByUUID != d.auth.UUID {
-                       log.Printf("debug: ignoring %s locked by %s", c.UUID, c.LockedByUUID)
+                       d.Logger.Debugf("ignoring %s locked by %s", c.UUID, c.LockedByUUID)
                } else if alreadyTracking {
                        switch c.State {
                        case Queued:
@@ -207,7 +251,7 @@ func (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo ma
                                }
                                err := d.lock(c.UUID)
                                if err != nil {
-                                       log.Printf("debug: error locking container %s: %s", c.UUID, err)
+                                       d.Logger.Warnf("error locking container %s: %s", c.UUID, err)
                                        break
                                }
                                c.State = Locked
@@ -231,7 +275,7 @@ func (d *Dispatcher) UpdateState(uuid string, state arvados.ContainerState) erro
                        "container": arvadosclient.Dict{"state": state},
                }, nil)
        if err != nil {
-               log.Printf("Error updating container %s to state %q: %s", uuid, state, err)
+               d.Logger.Warnf("error updating container %s to state %q: %s", uuid, state, err)
        }
        return err
 }
@@ -286,6 +330,7 @@ func (d *Dispatcher) TrackContainer(uuid string) error {
 type runTracker struct {
        closing bool
        updates chan arvados.Container
+       logger  Logger
 }
 
 func (tracker *runTracker) close() {
@@ -301,7 +346,7 @@ func (tracker *runTracker) update(c arvados.Container) {
        }
        select {
        case <-tracker.updates:
-               log.Printf("debug: runner is handling updates slowly, discarded previous update for %s", c.UUID)
+               tracker.logger.Debugf("runner is handling updates slowly, discarded previous update for %s", c.UUID)
        default:
        }
        tracker.updates <- c
diff --git a/sdk/go/httpserver/metrics.go b/sdk/go/httpserver/metrics.go
new file mode 100644 (file)
index 0000000..b52068e
--- /dev/null
@@ -0,0 +1,135 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net/http"
+       "strconv"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+       "github.com/Sirupsen/logrus"
+       "github.com/gogo/protobuf/jsonpb"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+type Handler interface {
+       http.Handler
+
+       // Returns an http.Handler that serves the Handler's metrics
+       // data at /metrics and /metrics.json, and passes other
+       // requests through to next.
+       ServeAPI(next http.Handler) http.Handler
+}
+
+type metrics struct {
+       next         http.Handler
+       logger       *logrus.Logger
+       registry     *prometheus.Registry
+       reqDuration  *prometheus.SummaryVec
+       timeToStatus *prometheus.SummaryVec
+       exportProm   http.Handler
+}
+
+func (*metrics) Levels() []logrus.Level {
+       return logrus.AllLevels
+}
+
+// Fire implements logrus.Hook in order to collect data points from
+// request logs.
+func (m *metrics) Fire(ent *logrus.Entry) error {
+       if tts, ok := ent.Data["timeToStatus"].(stats.Duration); !ok {
+       } else if method, ok := ent.Data["reqMethod"].(string); !ok {
+       } else if code, ok := ent.Data["respStatusCode"].(int); !ok {
+       } else {
+               m.timeToStatus.WithLabelValues(strconv.Itoa(code), strings.ToLower(method)).Observe(time.Duration(tts).Seconds())
+       }
+       return nil
+}
+
+func (m *metrics) exportJSON(w http.ResponseWriter, req *http.Request) {
+       jm := jsonpb.Marshaler{Indent: "  "}
+       mfs, _ := m.registry.Gather()
+       w.Write([]byte{'['})
+       for i, mf := range mfs {
+               if i > 0 {
+                       w.Write([]byte{','})
+               }
+               jm.Marshal(w, mf)
+       }
+       w.Write([]byte{']'})
+}
+
+// ServeHTTP implements http.Handler.
+func (m *metrics) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       m.next.ServeHTTP(w, req)
+}
+
+// ServeAPI returns a new http.Handler that serves current data at
+// metrics API endpoints (currently "GET /metrics(.json)?") and passes
+// other requests through to next.
+//
+// Typical example:
+//
+//     m := Instrument(...)
+//     srv := http.Server{Handler: m.ServeAPI(m)}
+func (m *metrics) ServeAPI(next http.Handler) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               switch {
+               case req.Method != "GET" && req.Method != "HEAD":
+                       next.ServeHTTP(w, req)
+               case req.URL.Path == "/metrics.json":
+                       m.exportJSON(w, req)
+               case req.URL.Path == "/metrics":
+                       m.exportProm.ServeHTTP(w, req)
+               default:
+                       next.ServeHTTP(w, req)
+               }
+       })
+}
+
+// Instrument returns a new Handler that passes requests through to
+// the next handler in the stack, and tracks metrics of those
+// requests.
+//
+// For the metrics to be accurate, the caller must ensure every
+// request passed to the Handler also passes through
+// LogRequests(logger, ...), and vice versa.
+//
+// If registry is nil, a new registry is created.
+//
+// If logger is nil, logrus.StandardLogger() is used.
+func Instrument(registry *prometheus.Registry, logger *logrus.Logger, next http.Handler) Handler {
+       if logger == nil {
+               logger = logrus.StandardLogger()
+       }
+       if registry == nil {
+               registry = prometheus.NewRegistry()
+       }
+       reqDuration := prometheus.NewSummaryVec(prometheus.SummaryOpts{
+               Name: "request_duration_seconds",
+               Help: "Summary of request duration.",
+       }, []string{"code", "method"})
+       timeToStatus := prometheus.NewSummaryVec(prometheus.SummaryOpts{
+               Name: "time_to_status_seconds",
+               Help: "Summary of request TTFB.",
+       }, []string{"code", "method"})
+       registry.MustRegister(timeToStatus)
+       registry.MustRegister(reqDuration)
+       m := &metrics{
+               next:         promhttp.InstrumentHandlerDuration(reqDuration, next),
+               logger:       logger,
+               registry:     registry,
+               reqDuration:  reqDuration,
+               timeToStatus: timeToStatus,
+               exportProm: promhttp.HandlerFor(registry, promhttp.HandlerOpts{
+                       ErrorLog: logger,
+               }),
+       }
+       m.logger.AddHook(m)
+       return m
+}
index b652db77d18a73214740672da6588f0fbaab3de3..b18ce25fd218201ab75f4b3c14f9c7b66f84f373 100644 (file)
@@ -153,7 +153,7 @@ def http_cache(data_type):
     return cache.SafeHTTPCache(path, max_age=60*60*24*2)
 
 def api(version=None, cache=True, host=None, token=None, insecure=False,
-        request_id=None, **kwargs):
+        request_id=None, timeout=5*60, **kwargs):
     """Return an apiclient Resources object for an Arvados instance.
 
     :version:
@@ -173,6 +173,9 @@ def api(version=None, cache=True, host=None, token=None, insecure=False,
     :insecure:
       If True, ignore SSL certificate validation errors.
 
+    :timeout:
+      A timeout value for http requests.
+
     :request_id:
       Default X-Request-Id header value for outgoing requests that
       don't already provide one. If None or omitted, generate a random
@@ -225,6 +228,9 @@ def api(version=None, cache=True, host=None, token=None, insecure=False,
             http_kwargs['disable_ssl_certificate_validation'] = True
         kwargs['http'] = httplib2.Http(**http_kwargs)
 
+    if kwargs['http'].timeout is None:
+        kwargs['http'].timeout = timeout
+
     kwargs['http'] = _patch_http_request(kwargs['http'], token)
 
     svc = apiclient_discovery.build('arvados', version, cache_discovery=False, **kwargs)
index c4748fa995759ef0cc934b699a14523f8a3181f8..96f5bdd44a12ae42c25fbe64f68b342cb0356fcf 100644 (file)
@@ -1,6 +1,19 @@
 # Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) 2018 Genome Research Ltd.
 #
 # SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
 from __future__ import print_function
 from __future__ import absolute_import
@@ -22,6 +35,7 @@ import sys
 import errno
 import arvados.commands._util as arv_cmd
 import arvados.collection
+import arvados.config as config
 
 from arvados._version import __version__
 
@@ -207,22 +221,48 @@ def uploadfiles(files, api, dry_run=False, num_retries=0,
                     for src in iterfiles:
                         write_file(collection, pathprefix, os.path.join(root, src), not packed)
 
-        filters=[["portable_data_hash", "=", collection.portable_data_hash()]]
-        if name:
-            filters.append(["name", "like", name+"%"])
-        if project:
-            filters.append(["owner_uuid", "=", project])
-
-        exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)
-
-        if exists["items"]:
-            item = exists["items"][0]
-            pdh = item["portable_data_hash"]
-            logger.info("Using collection %s (%s)", pdh, item["uuid"])
-        elif len(collection) > 0:
-            collection.save_new(name=name, owner_uuid=project, ensure_unique_name=True)
+        pdh = None
+        if len(collection) > 0:
+            # non-empty collection
+            filters = [["portable_data_hash", "=", collection.portable_data_hash()]]
+            name_pdh = "%s (%s)" % (name, collection.portable_data_hash())
+            if name:
+                filters.append(["name", "=", name_pdh])
+            if project:
+                filters.append(["owner_uuid", "=", project])
+
+            # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`
+            # and there is a potential race with other workflows that may have created the collection
+            # between when we list it and find it does not exist and when we attempt to create it.
+            tries = 2
+            while pdh is None and tries > 0:
+                exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)
+
+                if exists["items"]:
+                    item = exists["items"][0]
+                    pdh = item["portable_data_hash"]
+                    logger.info("Using collection %s (%s)", pdh, item["uuid"])
+                else:
+                    try:
+                        collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=False)
+                        pdh = collection.portable_data_hash()
+                        logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+                    except arvados.errors.ApiError as ae:
+                        tries -= 1
+            if pdh is None:
+                # Something weird going on here, probably a collection
+                # with a conflicting name but wrong PDH.  We won't
+                # able to reuse it but we still need to save our
+                # collection, so so save it with unique name.
+                logger.info("Name conflict on '%s', existing collection has an unexpected portable data hash", name_pdh)
+                collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=True)
+                pdh = collection.portable_data_hash()
+                logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+        else:
+            # empty collection
             pdh = collection.portable_data_hash()
-            logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+            assert (pdh == config.EMPTY_BLOCK_LOCATOR), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
+            logger.info("Using empty collection %s", pdh)
 
     for c in files:
         c.keepref = "%s/%s" % (pdh, c.fn)
index 5e066f014598560ed211a215ef8866150a77bac3..8f576196bc4cd623076ed59c4166e4f40a48f369 100644 (file)
@@ -51,10 +51,10 @@ setup(name='arvados-python-client',
           'google-api-python-client >=1.6.2, <1.7',
           'httplib2 >=0.9.2',
           'pycurl >=7.19.5.1',
-          'ruamel.yaml >=0.13.11, <0.15',
+          'ruamel.yaml >=0.13.11, <= 0.15.26',
           'setuptools',
-          'ws4py <0.4',
-          'subprocess32>=3.5.1',
+          'ws4py >=0.4.2',
+          'subprocess32 >=3.5.1',
       ],
       test_suite='tests',
       tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
index ce1929fdf710b5960df392cfe348016b4309becb..c21ef95f2af3a18ea8f48352a9e2b780ea1b0e1f 100644 (file)
@@ -7,18 +7,25 @@ error_log "{{ERRORLOG}}" info;          # Yes, must be specified here _and_ cmdl
 events {
 }
 http {
-  access_log "{{ACCESSLOG}}" combined;
+  log_format customlog
+    '[$time_local] $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
+    '"$http_referer" "$http_user_agent"';
+  access_log "{{ACCESSLOG}}" customlog;
   client_body_temp_path "{{TMPDIR}}";
   upstream arv-git-http {
     server localhost:{{GITPORT}};
   }
   server {
     listen *:{{GITSSLPORT}} ssl default_server;
-    server_name _;
+    server_name arv-git-http;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://arv-git-http;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   upstream keepproxy {
@@ -26,11 +33,15 @@ http {
   }
   server {
     listen *:{{KEEPPROXYSSLPORT}} ssl default_server;
-    server_name _;
+    server_name keepproxy;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keepproxy;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   upstream keep-web {
@@ -38,25 +49,44 @@ http {
   }
   server {
     listen *:{{KEEPWEBSSLPORT}} ssl default_server;
-    server_name ~^(?<request_host>.*)$;
+    server_name keep-web;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keep-web;
-      proxy_set_header Host $request_host:{{KEEPWEBPORT}};
+      proxy_set_header Host $http_host;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   server {
     listen *:{{KEEPWEBDLSSLPORT}} ssl default_server;
-    server_name ~.*;
+    server_name keep-web-dl ~.*;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keep-web;
-      proxy_set_header Host download:{{KEEPWEBPORT}};
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-      proxy_redirect //download:{{KEEPWEBPORT}}/ https://$host:{{KEEPWEBDLSSLPORT}}/;
+      proxy_set_header X-Forwarded-Proto https;
+
+      # Unlike other proxy sections, here we need to override the
+      # requested Host header and use proxy_redirect because of the
+      # way the test suite orchestrates services. Keep-web's "download
+      # only" behavior relies on the Host header matching a configured
+      # value, but when run_test_servers.py writes keep-web's command
+      # line, the keep-web-dl TLS port (which clients will connect to
+      # and include in their Host header) has not yet been assigned.
+      #
+      # In production, "proxy_set_header Host $http_host;
+      # proxy_redirect off;" works: keep-web's redirect URLs will
+      # match the request URL received by Nginx.
+      #
+      # Here, keep-web will issue redirects to https://download/ and
+      # Nginx will rewrite them.
+      #
+      proxy_set_header Host  download;
+      proxy_redirect https://download/ https://$host:{{KEEPWEBDLSSLPORT}}/;
     }
   }
   upstream ws {
@@ -64,15 +94,17 @@ http {
   }
   server {
     listen *:{{WSSPORT}} ssl default_server;
-    server_name ~^(?<request_host>.*)$;
+    server_name websocket;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://ws;
       proxy_set_header Upgrade $http_upgrade;
       proxy_set_header Connection "upgrade";
-      proxy_set_header Host $request_host:{{WSPORT}};
+      proxy_set_header Host $http_host;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   upstream controller {
@@ -80,12 +112,15 @@ http {
   }
   server {
     listen *:{{CONTROLLERSSLPORT}} ssl default_server;
-    server_name _;
+    server_name controller;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://controller;
+      proxy_set_header Host $http_host;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
 }
index f7ca6daf6f65c190ccd22a1a04ad3cb1996f3e75..7b1f6059aeef07b8ff2a2d03a6d4980f9d5a835f 100644 (file)
@@ -174,7 +174,7 @@ def find_available_port():
     sock.close()
     return port
 
-def _wait_until_port_listens(port, timeout=10):
+def _wait_until_port_listens(port, timeout=10, warn=True):
     """Wait for a process to start listening on the given port.
 
     If nothing listens on the port within the specified timeout (given
@@ -196,11 +196,13 @@ def _wait_until_port_listens(port, timeout=10):
         except subprocess.CalledProcessError:
             time.sleep(0.1)
             continue
-        return
-    print(
-        "WARNING: Nothing is listening on port {} (waited {} seconds).".
-        format(port, timeout),
-        file=sys.stderr)
+        return True
+    if warn:
+        print(
+            "WARNING: Nothing is listening on port {} (waited {} seconds).".
+            format(port, timeout),
+            file=sys.stderr)
+    return False
 
 def _logfilename(label):
     """Set up a labelled log file, and return a path to write logs to.
@@ -375,8 +377,11 @@ def reset():
         'POST',
         headers={'Authorization': 'OAuth2 {}'.format(token)})
     os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
-    os.environ['ARVADOS_API_HOST'] = existing_api_host
     os.environ['ARVADOS_API_TOKEN'] = token
+    if _wait_until_port_listens(_getport('controller-ssl'), timeout=0.5, warn=False):
+        os.environ['ARVADOS_API_HOST'] = '0.0.0.0:'+str(_getport('controller-ssl'))
+    else:
+        os.environ['ARVADOS_API_HOST'] = existing_api_host
 
 def stop(force=False):
     """Stop the API server, if one is running.
@@ -408,6 +413,14 @@ def run_controller():
         f.write("""
 Clusters:
   zzzzz:
+    HTTPRequestTimeout: 30s
+    PostgreSQL:
+      ConnectionPool: 32
+      Connection:
+        host: {}
+        dbname: {}
+        user: {}
+        password: {}
     NodeProfiles:
       "*":
         "arvados-controller":
@@ -415,7 +428,15 @@ Clusters:
         "arvados-api-server":
           Listen: ":{}"
           TLS: true
-        """.format(port, rails_api_port))
+          Insecure: true
+        """.format(
+            _dbconfig('host'),
+            _dbconfig('database'),
+            _dbconfig('username'),
+            _dbconfig('password'),
+            port,
+            rails_api_port,
+        ))
     logf = open(_logfilename('controller'), 'a')
     controller = subprocess.Popen(
         ["arvados-server", "controller", "-config", conf],
@@ -634,7 +655,7 @@ def run_keep_web():
     keepweb = subprocess.Popen(
         ['keep-web',
          '-allow-anonymous',
-         '-attachment-only-host=download:'+str(keepwebport),
+         '-attachment-only-host=download',
          '-listen=:'+str(keepwebport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('keep-web'), 'w') as f:
index e39c4263177b8ed4f290c17cb744c309725c56bc..609af6e23dda07b2467f6cc78dfe3f69ae00bb65 100644 (file)
@@ -29,7 +29,7 @@ Gem::Specification.new do |s|
   s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
   # Our google-api-client dependency used to be < 0.9, but that could be
   # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
-  s.add_dependency('google-api-client', '>= 0.7', '< 0.8.9')
+  s.add_dependency('cure-google-api-client', '>= 0.7', '< 0.8.9')
   # work around undeclared dependency on i18n in some activesupport 3.x.x:
   s.add_dependency('i18n', '~> 0')
   s.add_dependency('json', '>= 1.7.7', '<3')
index 93d27e649fb22a47ba32ce7f75f8890967c9ad2e..69383d12f63f22ded7957c25fd012d7530763ae4 100644 (file)
@@ -3,6 +3,11 @@
 # SPDX-License-Identifier: Apache-2.0
 
 require 'google/api_client'
+# Monkeypatch google-api-client gem to avoid sending newline characters
+# on headers to make ruby-2.3.7+ happy.
+# See: https://dev.arvados.org/issues/13920
+Google::APIClient::ENV::OS_VERSION.strip!
+
 require 'json'
 require 'tempfile'
 
index 7d4d4bba176591a97e2d166c6e291e4588dbe99e..0dc38f1e726f2e1dfbf0f1645c389f964920ee14 100644 (file)
@@ -21,6 +21,9 @@ group :test, :development do
   gem 'mocha', require: false
 end
 
+# We need this dependency because of crunchv1
+gem 'arvados-cli'
+
 # We'll need to update related code prior to Rails 5.
 # See: https://github.com/rails/activerecord-deprecated_finders
 gem 'activerecord-deprecated_finders', require: 'active_record/deprecated_finders'
@@ -32,7 +35,6 @@ gem 'pg', '~> 0.18'
 
 gem 'multi_json'
 gem 'oj'
-gem 'oj_mimic_json'
 
 # for building assets
 gem 'sass-rails',   '~> 4.0'
@@ -62,7 +64,6 @@ gem 'faye-websocket'
 gem 'themes_for_rails', git: 'https://github.com/curoverse/themes_for_rails'
 
 gem 'arvados', '>= 0.1.20150615153458'
-gem 'arvados-cli', '>= 0.1.20161017193526'
 gem 'httpclient'
 
 gem 'sshkey'
index a3ff8e78997855640e93aa479d92dd93cb36b012..f935f2c0723a64a99bce17cef26679862931672e 100644 (file)
@@ -58,14 +58,14 @@ GEM
       i18n (~> 0)
       json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
-    arvados-cli (1.1.4.20180412190507)
+    arvados-cli (1.1.4.20180723133344)
       activesupport (>= 3.2.13, < 5)
       andand (~> 1.3, >= 1.3.3)
       arvados (~> 0.1, >= 0.1.20150128223554)
       curb (~> 0.8)
       google-api-client (~> 0.6, >= 0.6.3, < 0.8.9)
       json (>= 1.7.7, < 3)
-      oj (~> 2.0, >= 2.0.3)
+      oj (~> 3.0)
       trollop (~> 2.0)
     autoparse (0.3.3)
       addressable (>= 2.3.1)
@@ -87,7 +87,7 @@ GEM
     coffee-script-source (1.12.2)
     concurrent-ruby (1.0.5)
     crass (1.0.4)
-    curb (0.9.4)
+    curb (0.9.6)
     database_cleaner (1.7.0)
     erubis (2.7.0)
     eventmachine (1.2.6)
@@ -179,8 +179,7 @@ GEM
       multi_json (~> 1.3)
       multi_xml (~> 0.5)
       rack (>= 1.2, < 3)
-    oj (2.18.5)
-    oj_mimic_json (1.0.1)
+    oj (3.6.4)
     omniauth (1.4.3)
       hashie (>= 1.2, < 4)
       rack (>= 1.6.2, < 3)
@@ -292,7 +291,7 @@ DEPENDENCIES
   acts_as_api
   andand
   arvados (>= 0.1.20150615153458)
-  arvados-cli (>= 0.1.20161017193526)
+  arvados-cli
   coffee-rails (~> 4.0)
   database_cleaner
   factory_girl_rails
@@ -304,7 +303,6 @@ DEPENDENCIES
   mocha
   multi_json
   oj
-  oj_mimic_json
   omniauth (~> 1.4.0)
   omniauth-oauth2 (~> 1.1)
   passenger
@@ -328,4 +326,4 @@ DEPENDENCIES
   uglifier (~> 2.0)
 
 BUNDLED WITH
-   1.16.1
+   1.16.3
index 169f4d40f78ea635153a37f4186ea9decb708d8f..05b39c1aed5f71a62b3526cc6df52e12e49022b8 100644 (file)
@@ -78,6 +78,7 @@ class ApplicationController < ActionController::Base
     @distinct = nil
     @response_resource_name = nil
     @attrs = nil
+    @extra_included = nil
   end
 
   def default_url_options
@@ -388,7 +389,9 @@ class ApplicationController < ActionController::Base
       req_id = "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
     end
     response.headers['X-Request-Id'] = Thread.current[:request_id] = req_id
-    yield
+    Rails.logger.tagged(req_id) do
+      yield
+    end
     Thread.current[:request_id] = nil
   end
 
@@ -498,6 +501,9 @@ class ApplicationController < ActionController::Base
       :limit => @limit,
       :items => @objects.as_api_response(nil, {select: @select})
     }
+    if @extra_included
+      list[:included] = @extra_included.as_api_response(nil, {select: @select})
+    end
     case params[:count]
     when nil, '', 'exact'
       if @objects.respond_to? :except
index ec3b69ab052506b54798689d168fb136e0e33321..a963d1fc4d875d2d69129ebb6ec6606e7f65d666 100644 (file)
@@ -7,6 +7,9 @@ require "trashable"
 class Arvados::V1::GroupsController < ApplicationController
   include TrashableController
 
+  skip_before_filter :find_object_by_uuid, only: :shared
+  skip_before_filter :render_404_if_no_object, only: :shared
+
   def self._index_requires_parameters
     (super rescue {}).
       merge({
@@ -63,12 +66,70 @@ class Arvados::V1::GroupsController < ApplicationController
     })
   end
 
+  def shared
+    # The purpose of this endpoint is to return the toplevel set of
+    # groups which are *not* reachable through a direct ownership
+    # chain of projects starting from the current user account.  In
+    # other words, groups which to which access was granted via a
+    # permission link or chain of links.
+    #
+    # This also returns (in the "included" field) the objects that own
+    # those projects (users or non-project groups).
+    #
+    # select groups that are readable by current user AND
+    #   the owner_uuid is a user (but not the current user) OR
+    #   the owner_uuid is not readable by the current user
+    #   the owner_uuid is a group but group_class is not a project
+    #
+    # The intended use of this endpoint is to support clients which
+    # wish to browse those projects which are visible to the user but
+    # are not part of the "home" project.
+
+    load_limit_offset_order_params
+    load_filters_param
+
+    read_parent_check = if current_user.is_admin
+                          ""
+                        else
+                          "NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} WHERE "+
+                            "user_uuid=(:user_uuid) AND target_uuid=groups.owner_uuid AND perm_level >= 1) OR "
+                        end
+
+    @objects = Group.readable_by(*@read_users).where("groups.owner_uuid IN (SELECT users.uuid FROM users WHERE users.uuid != (:user_uuid)) OR "+
+                                                     read_parent_check+
+                                                     "EXISTS(SELECT 1 FROM groups as gp where gp.uuid=groups.owner_uuid and gp.group_class != 'project')",
+                                            user_uuid: current_user.uuid)
+    apply_where_limit_order_params
+
+    owners = @objects.map(&:owner_uuid).to_a
+
+    if params["include"] == "owner_uuid"
+      @extra_included = []
+      [Group, User].each do |klass|
+        @extra_included += klass.readable_by(*@read_users).where(uuid: owners).to_a
+      end
+    end
+
+    index
+  end
+
+  def self._shared_requires_parameters
+    rp = self._index_requires_parameters
+    rp[:include] = { type: 'string', required: false }
+    rp
+  end
+
   protected
 
   def load_searchable_objects
     all_objects = []
     @items_available = 0
 
+    # Reload the orders param, this time without prefixing unqualified
+    # columns ("name" => "groups.name"). Here, unqualified orders
+    # apply to each table being searched, not "groups".
+    load_limit_offset_order_params(fill_table_names: false)
+
     # Trick apply_where_limit_order_params into applying suitable
     # per-table values. *_all are the real ones we'll apply to the
     # aggregate set.
@@ -142,7 +203,7 @@ class Arvados::V1::GroupsController < ApplicationController
       # table_name for the current klass, apply that order.
       # Otherwise, order by recency.
       request_order =
-        request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i } ||
+        request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i || r !~ /\./ } ||
         klass.default_orders.join(", ")
 
       @select = nil
index 594dc436297224463f860fe3d74cb9d718e02bdf..f0992c18314ac22bc60034855a204ab3aedce796 100644 (file)
@@ -25,7 +25,7 @@ class StaticController < ApplicationController
   end
 
   def empty
-    render text: "-"
+    render text: ""
   end
 
 end
index b9edeae06ecf93f0a1b6014eef0772224deed1a1..e43978980125a0fc91b22259ba34d8bb96963c13 100644 (file)
@@ -596,16 +596,24 @@ class ArvadosModel < ActiveRecord::Base
     end
   end
 
-  def self.where_serialized(colname, value)
+  def self.where_serialized(colname, value, md5: false)
+    colsql = colname.to_s
+    if md5
+      colsql = "md5(#{colsql})"
+    end
     if value.empty?
       # rails4 stores as null, rails3 stored as serialized [] or {}
-      sql = "#{colname.to_s} is null or #{colname.to_s} IN (?)"
+      sql = "#{colsql} is null or #{colsql} IN (?)"
       sorted = value
     else
-      sql = "#{colname.to_s} IN (?)"
+      sql = "#{colsql} IN (?)"
       sorted = deep_sort_hash(value)
     end
-    where(sql, [sorted.to_yaml, SafeJSON.dump(sorted)])
+    params = [sorted.to_yaml, SafeJSON.dump(sorted)]
+    if md5
+      params = params.map { |x| Digest::MD5.hexdigest(x) }
+    end
+    where(sql, params)
   end
 
   Serializer = {
index 4772768c8fe086f1e3bc2a25ca7a134cef8d436c..85b12a377b15d5445fa241375b8d6a422977fbee 100644 (file)
@@ -190,22 +190,16 @@ class Collection < ArvadosModel
   end
 
   def manifest_files
+    return '' if !self.manifest_text
+
     names = ''
-    if self.manifest_text
-      self.manifest_text.scan(/ \d+:\d+:(\S+)/) do |name|
-        names << name.first.gsub('\040',' ') + "\n"
-        break if names.length > 2**12
-      end
+    self.manifest_text.scan(/ \d+:\d+:(\S+)/) do |name|
+      names << name.first.gsub('\040',' ') + "\n"
     end
-
-    if self.manifest_text and names.length < 2**12
-      self.manifest_text.scan(/^\.\/(\S+)/m) do |stream_name|
-        names << stream_name.first.gsub('\040',' ') + "\n"
-        break if names.length > 2**12
-      end
+    self.manifest_text.scan(/^\.\/(\S+)/m) do |stream_name|
+      names << stream_name.first.gsub('\040',' ') + "\n"
     end
-
-    names[0,2**12]
+    names
   end
 
   def default_empty_manifest
index 7ec9845bc1983c0819f4d801e5044d8e5765f00f..7176bda926d8aa1cdb0c499958ae4ff6ffc97850 100644 (file)
@@ -228,13 +228,13 @@ class Container < ArvadosModel
 
   def self.find_reusable(attrs)
     log_reuse_info { "starting with #{Container.all.count} container records in database" }
-    candidates = Container.where_serialized(:command, attrs[:command])
+    candidates = Container.where_serialized(:command, attrs[:command], md5: true)
     log_reuse_info(candidates) { "after filtering on command #{attrs[:command].inspect}" }
 
     candidates = candidates.where('cwd = ?', attrs[:cwd])
     log_reuse_info(candidates) { "after filtering on cwd #{attrs[:cwd].inspect}" }
 
-    candidates = candidates.where_serialized(:environment, attrs[:environment])
+    candidates = candidates.where_serialized(:environment, attrs[:environment], md5: true)
     log_reuse_info(candidates) { "after filtering on environment #{attrs[:environment].inspect}" }
 
     candidates = candidates.where('output_path = ?', attrs[:output_path])
@@ -244,13 +244,14 @@ class Container < ArvadosModel
     candidates = candidates.where('container_image = ?', image)
     log_reuse_info(candidates) { "after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})" }
 
-    candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]))
+    candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]), md5: true)
     log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
 
-    candidates = candidates.where('secret_mounts_md5 = ?', Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts]))))
-    log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+    secret_mounts_md5 = Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts])))
+    candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
+    log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
 
-    candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]))
+    candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]), md5: true)
     log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
 
     log_reuse_info { "checking for state=Complete with readable output and log..." }
index dd3ff767dd4c8f86b523add765afe2f3516fba5d..470388a7c7f6786662b4661a454686d9d48e0d15 100644 (file)
@@ -33,6 +33,7 @@ class ContainerRequest < ArvadosModel
   validates :command, :container_image, :output_path, :cwd, :presence => true
   validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
   validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+  validate :validate_datatypes
   validate :validate_scheduling_parameters
   validate :validate_state_change
   validate :check_update_whitelist
@@ -228,6 +229,43 @@ class ContainerRequest < ArvadosModel
     end
   end
 
+  def validate_datatypes
+    command.each do |c|
+      if !c.is_a? String
+        errors.add(:command, "must be an array of strings but has entry #{c.class}")
+      end
+    end
+    environment.each do |k,v|
+      if !k.is_a?(String) || !v.is_a?(String)
+        errors.add(:environment, "must be an map of String to String but has entry #{k.class} to #{v.class}")
+      end
+    end
+    [:mounts, :secret_mounts].each do |m|
+      self[m].each do |k, v|
+        if !k.is_a?(String) || !v.is_a?(Hash)
+          errors.add(m, "must be an map of String to Hash but is has entry #{k.class} to #{v.class}")
+        end
+        if v["kind"].nil?
+          errors.add(m, "each item must have a 'kind' field")
+        end
+        [[String, ["kind", "portable_data_hash", "uuid", "device_type",
+                   "path", "commit", "repository_name", "git_url"]],
+         [Integer, ["capacity"]]].each do |t, fields|
+          fields.each do |f|
+            if !v[f].nil? && !v[f].is_a?(t)
+              errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+            end
+          end
+        end
+        ["writable", "exclude_from_output"].each do |f|
+          if !v[f].nil? && !v[f].is_a?(TrueClass) && !v[f].is_a?(FalseClass)
+            errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+          end
+        end
+      end
+    end
+  end
+
   def validate_scheduling_parameters
     if self.state == Committed
       if scheduling_parameters.include? 'partitions' and
@@ -321,7 +359,13 @@ class ContainerRequest < ArvadosModel
     c = get_requesting_container()
     if !c.nil?
       self.requesting_container_uuid = c.uuid
-      self.priority = c.priority>0 ? 1 : 0
+      # Determine the priority of container request for the requesting
+      # container.
+      self.priority = ContainerRequest.
+            where('container_uuid=? and priority>0', self.requesting_container_uuid).
+            map do |cr|
+        cr.priority
+      end.max || 0
     end
   end
 
index c09a8e5e479db6442cab9210b41631453a700748..24fd61871d26e4c16416d254b3d7e7441e17bb68 100644 (file)
@@ -30,6 +30,9 @@ end
 
 module Server
   class Application < Rails::Application
+    # The following is to avoid SafeYAML's warning message
+    SafeYAML::OPTIONS[:default_mode] = :safe
+
     # Settings in config/environments/* take precedence over those specified here.
     # Application configuration should go into files in config/initializers
     # -- all .rb files in that directory are automatically loaded.
diff --git a/services/api/config/initializers/oj_mimic_json.rb b/services/api/config/initializers/oj_mimic_json.rb
new file mode 100644 (file)
index 0000000..ce2d40c
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'oj'
+
+Oj::Rails.set_encoder()
+Oj::Rails.set_decoder()
+Oj::Rails.optimize()
+Oj::Rails.mimic_JSON()
+
index 3d690930ae18a1a4d1956f0872b37eec77d9d228..78cabc87ac7cd5f66a07becb1207be53ff6e2af3 100644 (file)
@@ -2,6 +2,8 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+ActiveSupport::JSON::Encoding.time_precision = 9
+
 class ActiveSupport::TimeWithZone
   remove_method :as_json
   def as_json *args
index b0c09840d790db1d634139bd796691d82f2b7c8c..b54c3c5bf170cc431140a0925b9846e7f172b397 100644 (file)
@@ -30,6 +30,7 @@ Server::Application.routes.draw do
       resources :groups do
         get 'contents', on: :collection
         get 'contents', on: :member
+        get 'shared', on: :collection
         post 'trash', on: :member
         post 'untrash', on: :member
       end
diff --git a/services/api/db/migrate/20180806133039_index_all_filenames.rb b/services/api/db/migrate/20180806133039_index_all_filenames.rb
new file mode 100644 (file)
index 0000000..36b155c
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class IndexAllFilenames < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE text'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE varchar(8192)'
+  end
+end
diff --git a/services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb b/services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb
new file mode 100644 (file)
index 0000000..8d1cdf3
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPdhAndTrashIndexToCollections < ActiveRecord::Migration
+  def change
+    add_index :collections, [:portable_data_hash, :trash_at]
+  end
+end
diff --git a/services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb b/services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb
new file mode 100644 (file)
index 0000000..94ca100
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddLockIndexToContainers < ActiveRecord::Migration
+  def change
+    # For the current code in sdk/go/dispatch:
+    add_index :containers, [:locked_by_uuid, :priority]
+    # For future dispatchers that use filters instead of offset for
+    # more predictable paging:
+    add_index :containers, [:locked_by_uuid, :uuid]
+  end
+end
diff --git a/services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb b/services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb
new file mode 100644 (file)
index 0000000..3d757e4
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DropPdhIndexFromCollections < ActiveRecord::Migration
+  def change
+    remove_index :collections, column: :portable_data_hash
+  end
+end
diff --git a/services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb b/services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb
new file mode 100644 (file)
index 0000000..a58932e
--- /dev/null
@@ -0,0 +1,8 @@
+class AddMd5IndexToContainers < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'
+  end
+end
diff --git a/services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb b/services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb
new file mode 100644 (file)
index 0000000..7245108
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddQueueIndexToContainers < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_queued_state on containers (state, (priority > 0))'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_queued_state'
+  end
+end
index a201a05aaf83a8efe52469f349e4c84fb75927f3..32b77ed17aa0a4b511fafb50dd04c7d5f45371fa 100644 (file)
@@ -5,6 +5,7 @@
 SET statement_timeout = 0;
 SET client_encoding = 'UTF8';
 SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
 SET check_function_bodies = false;
 SET client_min_messages = warning;
 
@@ -22,8 +23,6 @@ CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
 -- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
 
 
-SET search_path = public, pg_catalog;
-
 SET default_tablespace = '';
 
 SET default_with_oids = false;
@@ -32,7 +31,7 @@ SET default_with_oids = false;
 -- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE api_client_authorizations (
+CREATE TABLE public.api_client_authorizations (
     id integer NOT NULL,
     api_token character varying(255) NOT NULL,
     api_client_id integer NOT NULL,
@@ -53,7 +52,7 @@ CREATE TABLE api_client_authorizations (
 -- Name: api_client_authorizations_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE api_client_authorizations_id_seq
+CREATE SEQUENCE public.api_client_authorizations_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -65,14 +64,14 @@ CREATE SEQUENCE api_client_authorizations_id_seq
 -- Name: api_client_authorizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE api_client_authorizations_id_seq OWNED BY api_client_authorizations.id;
+ALTER SEQUENCE public.api_client_authorizations_id_seq OWNED BY public.api_client_authorizations.id;
 
 
 --
 -- Name: api_clients; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE api_clients (
+CREATE TABLE public.api_clients (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -91,7 +90,7 @@ CREATE TABLE api_clients (
 -- Name: api_clients_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE api_clients_id_seq
+CREATE SEQUENCE public.api_clients_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -103,14 +102,14 @@ CREATE SEQUENCE api_clients_id_seq
 -- Name: api_clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE api_clients_id_seq OWNED BY api_clients.id;
+ALTER SEQUENCE public.api_clients_id_seq OWNED BY public.api_clients.id;
 
 
 --
 -- Name: authorized_keys; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE authorized_keys (
+CREATE TABLE public.authorized_keys (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -131,7 +130,7 @@ CREATE TABLE authorized_keys (
 -- Name: authorized_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE authorized_keys_id_seq
+CREATE SEQUENCE public.authorized_keys_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -143,14 +142,14 @@ CREATE SEQUENCE authorized_keys_id_seq
 -- Name: authorized_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE authorized_keys_id_seq OWNED BY authorized_keys.id;
+ALTER SEQUENCE public.authorized_keys_id_seq OWNED BY public.authorized_keys.id;
 
 
 --
 -- Name: collections; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE collections (
+CREATE TABLE public.collections (
     id integer NOT NULL,
     owner_uuid character varying(255),
     created_at timestamp without time zone NOT NULL,
@@ -168,7 +167,7 @@ CREATE TABLE collections (
     description character varying(524288),
     properties jsonb,
     delete_at timestamp without time zone,
-    file_names character varying(8192),
+    file_names text,
     trash_at timestamp without time zone,
     is_trashed boolean DEFAULT false NOT NULL,
     storage_classes_desired jsonb DEFAULT '["default"]'::jsonb,
@@ -181,7 +180,7 @@ CREATE TABLE collections (
 -- Name: collections_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE collections_id_seq
+CREATE SEQUENCE public.collections_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -193,14 +192,14 @@ CREATE SEQUENCE collections_id_seq
 -- Name: collections_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE collections_id_seq OWNED BY collections.id;
+ALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;
 
 
 --
 -- Name: commit_ancestors; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE commit_ancestors (
+CREATE TABLE public.commit_ancestors (
     id integer NOT NULL,
     repository_name character varying(255),
     descendant character varying(255) NOT NULL,
@@ -215,7 +214,7 @@ CREATE TABLE commit_ancestors (
 -- Name: commit_ancestors_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE commit_ancestors_id_seq
+CREATE SEQUENCE public.commit_ancestors_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -227,14 +226,14 @@ CREATE SEQUENCE commit_ancestors_id_seq
 -- Name: commit_ancestors_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE commit_ancestors_id_seq OWNED BY commit_ancestors.id;
+ALTER SEQUENCE public.commit_ancestors_id_seq OWNED BY public.commit_ancestors.id;
 
 
 --
 -- Name: commits; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE commits (
+CREATE TABLE public.commits (
     id integer NOT NULL,
     repository_name character varying(255),
     sha1 character varying(255),
@@ -248,7 +247,7 @@ CREATE TABLE commits (
 -- Name: commits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE commits_id_seq
+CREATE SEQUENCE public.commits_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -260,14 +259,14 @@ CREATE SEQUENCE commits_id_seq
 -- Name: commits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE commits_id_seq OWNED BY commits.id;
+ALTER SEQUENCE public.commits_id_seq OWNED BY public.commits.id;
 
 
 --
 -- Name: container_requests; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE container_requests (
+CREATE TABLE public.container_requests (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -308,7 +307,7 @@ CREATE TABLE container_requests (
 -- Name: container_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE container_requests_id_seq
+CREATE SEQUENCE public.container_requests_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -320,14 +319,14 @@ CREATE SEQUENCE container_requests_id_seq
 -- Name: container_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE container_requests_id_seq OWNED BY container_requests.id;
+ALTER SEQUENCE public.container_requests_id_seq OWNED BY public.container_requests.id;
 
 
 --
 -- Name: containers; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE containers (
+CREATE TABLE public.containers (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -363,7 +362,7 @@ CREATE TABLE containers (
 -- Name: containers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE containers_id_seq
+CREATE SEQUENCE public.containers_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -375,14 +374,14 @@ CREATE SEQUENCE containers_id_seq
 -- Name: containers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE containers_id_seq OWNED BY containers.id;
+ALTER SEQUENCE public.containers_id_seq OWNED BY public.containers.id;
 
 
 --
 -- Name: groups; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE groups (
+CREATE TABLE public.groups (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -405,7 +404,7 @@ CREATE TABLE groups (
 -- Name: groups_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE groups_id_seq
+CREATE SEQUENCE public.groups_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -417,14 +416,14 @@ CREATE SEQUENCE groups_id_seq
 -- Name: groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE groups_id_seq OWNED BY groups.id;
+ALTER SEQUENCE public.groups_id_seq OWNED BY public.groups.id;
 
 
 --
 -- Name: humans; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE humans (
+CREATE TABLE public.humans (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -441,7 +440,7 @@ CREATE TABLE humans (
 -- Name: humans_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE humans_id_seq
+CREATE SEQUENCE public.humans_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -453,14 +452,14 @@ CREATE SEQUENCE humans_id_seq
 -- Name: humans_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE humans_id_seq OWNED BY humans.id;
+ALTER SEQUENCE public.humans_id_seq OWNED BY public.humans.id;
 
 
 --
 -- Name: job_tasks; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE job_tasks (
+CREATE TABLE public.job_tasks (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -486,7 +485,7 @@ CREATE TABLE job_tasks (
 -- Name: job_tasks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE job_tasks_id_seq
+CREATE SEQUENCE public.job_tasks_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -498,14 +497,14 @@ CREATE SEQUENCE job_tasks_id_seq
 -- Name: job_tasks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE job_tasks_id_seq OWNED BY job_tasks.id;
+ALTER SEQUENCE public.job_tasks_id_seq OWNED BY public.job_tasks.id;
 
 
 --
 -- Name: job_tasks_qsequence_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE job_tasks_qsequence_seq
+CREATE SEQUENCE public.job_tasks_qsequence_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -517,14 +516,14 @@ CREATE SEQUENCE job_tasks_qsequence_seq
 -- Name: job_tasks_qsequence_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE job_tasks_qsequence_seq OWNED BY job_tasks.qsequence;
+ALTER SEQUENCE public.job_tasks_qsequence_seq OWNED BY public.job_tasks.qsequence;
 
 
 --
 -- Name: jobs; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE jobs (
+CREATE TABLE public.jobs (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -566,7 +565,7 @@ CREATE TABLE jobs (
 -- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE jobs_id_seq
+CREATE SEQUENCE public.jobs_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -578,14 +577,14 @@ CREATE SEQUENCE jobs_id_seq
 -- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE jobs_id_seq OWNED BY jobs.id;
+ALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id;
 
 
 --
 -- Name: keep_disks; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE keep_disks (
+CREATE TABLE public.keep_disks (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -612,7 +611,7 @@ CREATE TABLE keep_disks (
 -- Name: keep_disks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE keep_disks_id_seq
+CREATE SEQUENCE public.keep_disks_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -624,14 +623,14 @@ CREATE SEQUENCE keep_disks_id_seq
 -- Name: keep_disks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE keep_disks_id_seq OWNED BY keep_disks.id;
+ALTER SEQUENCE public.keep_disks_id_seq OWNED BY public.keep_disks.id;
 
 
 --
 -- Name: keep_services; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE keep_services (
+CREATE TABLE public.keep_services (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -652,7 +651,7 @@ CREATE TABLE keep_services (
 -- Name: keep_services_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE keep_services_id_seq
+CREATE SEQUENCE public.keep_services_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -664,14 +663,14 @@ CREATE SEQUENCE keep_services_id_seq
 -- Name: keep_services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE keep_services_id_seq OWNED BY keep_services.id;
+ALTER SEQUENCE public.keep_services_id_seq OWNED BY public.keep_services.id;
 
 
 --
 -- Name: links; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE links (
+CREATE TABLE public.links (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -692,7 +691,7 @@ CREATE TABLE links (
 -- Name: links_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE links_id_seq
+CREATE SEQUENCE public.links_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -704,14 +703,14 @@ CREATE SEQUENCE links_id_seq
 -- Name: links_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE links_id_seq OWNED BY links.id;
+ALTER SEQUENCE public.links_id_seq OWNED BY public.links.id;
 
 
 --
 -- Name: logs; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE logs (
+CREATE TABLE public.logs (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -733,7 +732,7 @@ CREATE TABLE logs (
 -- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE logs_id_seq
+CREATE SEQUENCE public.logs_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -745,14 +744,14 @@ CREATE SEQUENCE logs_id_seq
 -- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE logs_id_seq OWNED BY logs.id;
+ALTER SEQUENCE public.logs_id_seq OWNED BY public.logs.id;
 
 
 --
 -- Name: users; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE users (
+CREATE TABLE public.users (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255) NOT NULL,
@@ -778,7 +777,7 @@ CREATE TABLE users (
 -- Name: materialized_permission_view; Type: MATERIALIZED VIEW; Schema: public; Owner: -
 --
 
-CREATE MATERIALIZED VIEW materialized_permission_view AS
+CREATE MATERIALIZED VIEW public.materialized_permission_view AS
  WITH RECURSIVE perm_value(name, val) AS (
          VALUES ('can_read'::text,(1)::smallint), ('can_login'::text,1), ('can_write'::text,2), ('can_manage'::text,3)
         ), perm_edges(tail_uuid, head_uuid, val, follow, trashed) AS (
@@ -788,9 +787,9 @@ CREATE MATERIALIZED VIEW materialized_permission_view AS
             ((pv.val = 3) OR (groups.uuid IS NOT NULL)) AS follow,
             (0)::smallint AS trashed,
             (0)::smallint AS followtrash
-           FROM ((links
+           FROM ((public.links
              LEFT JOIN perm_value pv ON ((pv.name = (links.name)::text)))
-             LEFT JOIN groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))
+             LEFT JOIN public.groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))
           WHERE ((links.link_class)::text = 'permission'::text)
         UNION ALL
          SELECT groups.owner_uuid,
@@ -802,14 +801,14 @@ CREATE MATERIALIZED VIEW materialized_permission_view AS
                     ELSE 0
                 END AS "case",
             1
-           FROM groups
+           FROM public.groups
         ), perm(val, follow, user_uuid, target_uuid, trashed) AS (
          SELECT (3)::smallint AS val,
             true AS follow,
             (users.uuid)::character varying(32) AS user_uuid,
             (users.uuid)::character varying(32) AS target_uuid,
             (0)::smallint AS trashed
-           FROM users
+           FROM public.users
         UNION
          SELECT (LEAST((perm_1.val)::integer, edges.val))::smallint AS val,
             edges.follow,
@@ -840,7 +839,7 @@ CREATE MATERIALIZED VIEW materialized_permission_view AS
 -- Name: nodes; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE nodes (
+CREATE TABLE public.nodes (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -865,7 +864,7 @@ CREATE TABLE nodes (
 -- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE nodes_id_seq
+CREATE SEQUENCE public.nodes_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -877,14 +876,14 @@ CREATE SEQUENCE nodes_id_seq
 -- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE nodes_id_seq OWNED BY nodes.id;
+ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
 
 
 --
 -- Name: permission_refresh_lock; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE permission_refresh_lock (
+CREATE TABLE public.permission_refresh_lock (
     id integer NOT NULL
 );
 
@@ -893,7 +892,7 @@ CREATE TABLE permission_refresh_lock (
 -- Name: permission_refresh_lock_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE permission_refresh_lock_id_seq
+CREATE SEQUENCE public.permission_refresh_lock_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -905,14 +904,14 @@ CREATE SEQUENCE permission_refresh_lock_id_seq
 -- Name: permission_refresh_lock_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE permission_refresh_lock_id_seq OWNED BY permission_refresh_lock.id;
+ALTER SEQUENCE public.permission_refresh_lock_id_seq OWNED BY public.permission_refresh_lock.id;
 
 
 --
 -- Name: pipeline_instances; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE pipeline_instances (
+CREATE TABLE public.pipeline_instances (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -937,7 +936,7 @@ CREATE TABLE pipeline_instances (
 -- Name: pipeline_instances_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE pipeline_instances_id_seq
+CREATE SEQUENCE public.pipeline_instances_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -949,14 +948,14 @@ CREATE SEQUENCE pipeline_instances_id_seq
 -- Name: pipeline_instances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE pipeline_instances_id_seq OWNED BY pipeline_instances.id;
+ALTER SEQUENCE public.pipeline_instances_id_seq OWNED BY public.pipeline_instances.id;
 
 
 --
 -- Name: pipeline_templates; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE pipeline_templates (
+CREATE TABLE public.pipeline_templates (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -975,7 +974,7 @@ CREATE TABLE pipeline_templates (
 -- Name: pipeline_templates_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE pipeline_templates_id_seq
+CREATE SEQUENCE public.pipeline_templates_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -987,14 +986,14 @@ CREATE SEQUENCE pipeline_templates_id_seq
 -- Name: pipeline_templates_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE pipeline_templates_id_seq OWNED BY pipeline_templates.id;
+ALTER SEQUENCE public.pipeline_templates_id_seq OWNED BY public.pipeline_templates.id;
 
 
 --
 -- Name: repositories; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE repositories (
+CREATE TABLE public.repositories (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -1011,7 +1010,7 @@ CREATE TABLE repositories (
 -- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE repositories_id_seq
+CREATE SEQUENCE public.repositories_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1023,14 +1022,14 @@ CREATE SEQUENCE repositories_id_seq
 -- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE repositories_id_seq OWNED BY repositories.id;
+ALTER SEQUENCE public.repositories_id_seq OWNED BY public.repositories.id;
 
 
 --
 -- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE schema_migrations (
+CREATE TABLE public.schema_migrations (
     version character varying(255) NOT NULL
 );
 
@@ -1039,7 +1038,7 @@ CREATE TABLE schema_migrations (
 -- Name: specimens; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE specimens (
+CREATE TABLE public.specimens (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -1057,7 +1056,7 @@ CREATE TABLE specimens (
 -- Name: specimens_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE specimens_id_seq
+CREATE SEQUENCE public.specimens_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1069,14 +1068,14 @@ CREATE SEQUENCE specimens_id_seq
 -- Name: specimens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE specimens_id_seq OWNED BY specimens.id;
+ALTER SEQUENCE public.specimens_id_seq OWNED BY public.specimens.id;
 
 
 --
 -- Name: traits; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE traits (
+CREATE TABLE public.traits (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -1094,7 +1093,7 @@ CREATE TABLE traits (
 -- Name: traits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE traits_id_seq
+CREATE SEQUENCE public.traits_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1106,14 +1105,14 @@ CREATE SEQUENCE traits_id_seq
 -- Name: traits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE traits_id_seq OWNED BY traits.id;
+ALTER SEQUENCE public.traits_id_seq OWNED BY public.traits.id;
 
 
 --
 -- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE users_id_seq
+CREATE SEQUENCE public.users_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1125,14 +1124,14 @@ CREATE SEQUENCE users_id_seq
 -- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE users_id_seq OWNED BY users.id;
+ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
 
 
 --
 -- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE virtual_machines (
+CREATE TABLE public.virtual_machines (
     id integer NOT NULL,
     uuid character varying(255) NOT NULL,
     owner_uuid character varying(255) NOT NULL,
@@ -1149,7 +1148,7 @@ CREATE TABLE virtual_machines (
 -- Name: virtual_machines_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE virtual_machines_id_seq
+CREATE SEQUENCE public.virtual_machines_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1161,14 +1160,14 @@ CREATE SEQUENCE virtual_machines_id_seq
 -- Name: virtual_machines_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE virtual_machines_id_seq OWNED BY virtual_machines.id;
+ALTER SEQUENCE public.virtual_machines_id_seq OWNED BY public.virtual_machines.id;
 
 
 --
 -- Name: workflows; Type: TABLE; Schema: public; Owner: -
 --
 
-CREATE TABLE workflows (
+CREATE TABLE public.workflows (
     id integer NOT NULL,
     uuid character varying(255),
     owner_uuid character varying(255),
@@ -1187,7 +1186,7 @@ CREATE TABLE workflows (
 -- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -
 --
 
-CREATE SEQUENCE workflows_id_seq
+CREATE SEQUENCE public.workflows_id_seq
     START WITH 1
     INCREMENT BY 1
     NO MINVALUE
@@ -1199,196 +1198,196 @@ CREATE SEQUENCE workflows_id_seq
 -- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
 --
 
-ALTER SEQUENCE workflows_id_seq OWNED BY workflows.id;
+ALTER SEQUENCE public.workflows_id_seq OWNED BY public.workflows.id;
 
 
 --
 -- Name: api_client_authorizations id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('api_client_authorizations_id_seq'::regclass);
+ALTER TABLE ONLY public.api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('public.api_client_authorizations_id_seq'::regclass);
 
 
 --
 -- Name: api_clients id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY api_clients ALTER COLUMN id SET DEFAULT nextval('api_clients_id_seq'::regclass);
+ALTER TABLE ONLY public.api_clients ALTER COLUMN id SET DEFAULT nextval('public.api_clients_id_seq'::regclass);
 
 
 --
 -- Name: authorized_keys id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY authorized_keys ALTER COLUMN id SET DEFAULT nextval('authorized_keys_id_seq'::regclass);
+ALTER TABLE ONLY public.authorized_keys ALTER COLUMN id SET DEFAULT nextval('public.authorized_keys_id_seq'::regclass);
 
 
 --
 -- Name: collections id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY collections ALTER COLUMN id SET DEFAULT nextval('collections_id_seq'::regclass);
+ALTER TABLE ONLY public.collections ALTER COLUMN id SET DEFAULT nextval('public.collections_id_seq'::regclass);
 
 
 --
 -- Name: commit_ancestors id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY commit_ancestors ALTER COLUMN id SET DEFAULT nextval('commit_ancestors_id_seq'::regclass);
+ALTER TABLE ONLY public.commit_ancestors ALTER COLUMN id SET DEFAULT nextval('public.commit_ancestors_id_seq'::regclass);
 
 
 --
 -- Name: commits id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY commits ALTER COLUMN id SET DEFAULT nextval('commits_id_seq'::regclass);
+ALTER TABLE ONLY public.commits ALTER COLUMN id SET DEFAULT nextval('public.commits_id_seq'::regclass);
 
 
 --
 -- Name: container_requests id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY container_requests ALTER COLUMN id SET DEFAULT nextval('container_requests_id_seq'::regclass);
+ALTER TABLE ONLY public.container_requests ALTER COLUMN id SET DEFAULT nextval('public.container_requests_id_seq'::regclass);
 
 
 --
 -- Name: containers id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY containers ALTER COLUMN id SET DEFAULT nextval('containers_id_seq'::regclass);
+ALTER TABLE ONLY public.containers ALTER COLUMN id SET DEFAULT nextval('public.containers_id_seq'::regclass);
 
 
 --
 -- Name: groups id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY groups ALTER COLUMN id SET DEFAULT nextval('groups_id_seq'::regclass);
+ALTER TABLE ONLY public.groups ALTER COLUMN id SET DEFAULT nextval('public.groups_id_seq'::regclass);
 
 
 --
 -- Name: humans id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY humans ALTER COLUMN id SET DEFAULT nextval('humans_id_seq'::regclass);
+ALTER TABLE ONLY public.humans ALTER COLUMN id SET DEFAULT nextval('public.humans_id_seq'::regclass);
 
 
 --
 -- Name: job_tasks id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY job_tasks ALTER COLUMN id SET DEFAULT nextval('job_tasks_id_seq'::regclass);
+ALTER TABLE ONLY public.job_tasks ALTER COLUMN id SET DEFAULT nextval('public.job_tasks_id_seq'::regclass);
 
 
 --
 -- Name: jobs id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY jobs ALTER COLUMN id SET DEFAULT nextval('jobs_id_seq'::regclass);
+ALTER TABLE ONLY public.jobs ALTER COLUMN id SET DEFAULT nextval('public.jobs_id_seq'::regclass);
 
 
 --
 -- Name: keep_disks id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY keep_disks ALTER COLUMN id SET DEFAULT nextval('keep_disks_id_seq'::regclass);
+ALTER TABLE ONLY public.keep_disks ALTER COLUMN id SET DEFAULT nextval('public.keep_disks_id_seq'::regclass);
 
 
 --
 -- Name: keep_services id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY keep_services ALTER COLUMN id SET DEFAULT nextval('keep_services_id_seq'::regclass);
+ALTER TABLE ONLY public.keep_services ALTER COLUMN id SET DEFAULT nextval('public.keep_services_id_seq'::regclass);
 
 
 --
 -- Name: links id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY links ALTER COLUMN id SET DEFAULT nextval('links_id_seq'::regclass);
+ALTER TABLE ONLY public.links ALTER COLUMN id SET DEFAULT nextval('public.links_id_seq'::regclass);
 
 
 --
 -- Name: logs id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY logs ALTER COLUMN id SET DEFAULT nextval('logs_id_seq'::regclass);
+ALTER TABLE ONLY public.logs ALTER COLUMN id SET DEFAULT nextval('public.logs_id_seq'::regclass);
 
 
 --
 -- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY nodes ALTER COLUMN id SET DEFAULT nextval('nodes_id_seq'::regclass);
+ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
 
 
 --
 -- Name: permission_refresh_lock id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY permission_refresh_lock ALTER COLUMN id SET DEFAULT nextval('permission_refresh_lock_id_seq'::regclass);
+ALTER TABLE ONLY public.permission_refresh_lock ALTER COLUMN id SET DEFAULT nextval('public.permission_refresh_lock_id_seq'::regclass);
 
 
 --
 -- Name: pipeline_instances id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY pipeline_instances ALTER COLUMN id SET DEFAULT nextval('pipeline_instances_id_seq'::regclass);
+ALTER TABLE ONLY public.pipeline_instances ALTER COLUMN id SET DEFAULT nextval('public.pipeline_instances_id_seq'::regclass);
 
 
 --
 -- Name: pipeline_templates id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY pipeline_templates ALTER COLUMN id SET DEFAULT nextval('pipeline_templates_id_seq'::regclass);
+ALTER TABLE ONLY public.pipeline_templates ALTER COLUMN id SET DEFAULT nextval('public.pipeline_templates_id_seq'::regclass);
 
 
 --
 -- Name: repositories id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY repositories ALTER COLUMN id SET DEFAULT nextval('repositories_id_seq'::regclass);
+ALTER TABLE ONLY public.repositories ALTER COLUMN id SET DEFAULT nextval('public.repositories_id_seq'::regclass);
 
 
 --
 -- Name: specimens id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY specimens ALTER COLUMN id SET DEFAULT nextval('specimens_id_seq'::regclass);
+ALTER TABLE ONLY public.specimens ALTER COLUMN id SET DEFAULT nextval('public.specimens_id_seq'::regclass);
 
 
 --
 -- Name: traits id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY traits ALTER COLUMN id SET DEFAULT nextval('traits_id_seq'::regclass);
+ALTER TABLE ONLY public.traits ALTER COLUMN id SET DEFAULT nextval('public.traits_id_seq'::regclass);
 
 
 --
 -- Name: users id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY users ALTER COLUMN id SET DEFAULT nextval('users_id_seq'::regclass);
+ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass);
 
 
 --
 -- Name: virtual_machines id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY virtual_machines ALTER COLUMN id SET DEFAULT nextval('virtual_machines_id_seq'::regclass);
+ALTER TABLE ONLY public.virtual_machines ALTER COLUMN id SET DEFAULT nextval('public.virtual_machines_id_seq'::regclass);
 
 
 --
 -- Name: workflows id; Type: DEFAULT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY workflows ALTER COLUMN id SET DEFAULT nextval('workflows_id_seq'::regclass);
+ALTER TABLE ONLY public.workflows ALTER COLUMN id SET DEFAULT nextval('public.workflows_id_seq'::regclass);
 
 
 --
 -- Name: api_client_authorizations api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY api_client_authorizations
+ALTER TABLE ONLY public.api_client_authorizations
     ADD CONSTRAINT api_client_authorizations_pkey PRIMARY KEY (id);
 
 
@@ -1396,7 +1395,7 @@ ALTER TABLE ONLY api_client_authorizations
 -- Name: api_clients api_clients_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY api_clients
+ALTER TABLE ONLY public.api_clients
     ADD CONSTRAINT api_clients_pkey PRIMARY KEY (id);
 
 
@@ -1404,7 +1403,7 @@ ALTER TABLE ONLY api_clients
 -- Name: authorized_keys authorized_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY authorized_keys
+ALTER TABLE ONLY public.authorized_keys
     ADD CONSTRAINT authorized_keys_pkey PRIMARY KEY (id);
 
 
@@ -1412,7 +1411,7 @@ ALTER TABLE ONLY authorized_keys
 -- Name: collections collections_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY collections
+ALTER TABLE ONLY public.collections
     ADD CONSTRAINT collections_pkey PRIMARY KEY (id);
 
 
@@ -1420,7 +1419,7 @@ ALTER TABLE ONLY collections
 -- Name: commit_ancestors commit_ancestors_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY commit_ancestors
+ALTER TABLE ONLY public.commit_ancestors
     ADD CONSTRAINT commit_ancestors_pkey PRIMARY KEY (id);
 
 
@@ -1428,7 +1427,7 @@ ALTER TABLE ONLY commit_ancestors
 -- Name: commits commits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY commits
+ALTER TABLE ONLY public.commits
     ADD CONSTRAINT commits_pkey PRIMARY KEY (id);
 
 
@@ -1436,7 +1435,7 @@ ALTER TABLE ONLY commits
 -- Name: container_requests container_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY container_requests
+ALTER TABLE ONLY public.container_requests
     ADD CONSTRAINT container_requests_pkey PRIMARY KEY (id);
 
 
@@ -1444,7 +1443,7 @@ ALTER TABLE ONLY container_requests
 -- Name: containers containers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY containers
+ALTER TABLE ONLY public.containers
     ADD CONSTRAINT containers_pkey PRIMARY KEY (id);
 
 
@@ -1452,7 +1451,7 @@ ALTER TABLE ONLY containers
 -- Name: groups groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY groups
+ALTER TABLE ONLY public.groups
     ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
 
 
@@ -1460,7 +1459,7 @@ ALTER TABLE ONLY groups
 -- Name: humans humans_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY humans
+ALTER TABLE ONLY public.humans
     ADD CONSTRAINT humans_pkey PRIMARY KEY (id);
 
 
@@ -1468,7 +1467,7 @@ ALTER TABLE ONLY humans
 -- Name: job_tasks job_tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY job_tasks
+ALTER TABLE ONLY public.job_tasks
     ADD CONSTRAINT job_tasks_pkey PRIMARY KEY (id);
 
 
@@ -1476,7 +1475,7 @@ ALTER TABLE ONLY job_tasks
 -- Name: jobs jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY jobs
+ALTER TABLE ONLY public.jobs
     ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);
 
 
@@ -1484,7 +1483,7 @@ ALTER TABLE ONLY jobs
 -- Name: keep_disks keep_disks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY keep_disks
+ALTER TABLE ONLY public.keep_disks
     ADD CONSTRAINT keep_disks_pkey PRIMARY KEY (id);
 
 
@@ -1492,7 +1491,7 @@ ALTER TABLE ONLY keep_disks
 -- Name: keep_services keep_services_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY keep_services
+ALTER TABLE ONLY public.keep_services
     ADD CONSTRAINT keep_services_pkey PRIMARY KEY (id);
 
 
@@ -1500,7 +1499,7 @@ ALTER TABLE ONLY keep_services
 -- Name: links links_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY links
+ALTER TABLE ONLY public.links
     ADD CONSTRAINT links_pkey PRIMARY KEY (id);
 
 
@@ -1508,7 +1507,7 @@ ALTER TABLE ONLY links
 -- Name: logs logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY logs
+ALTER TABLE ONLY public.logs
     ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
 
 
@@ -1516,7 +1515,7 @@ ALTER TABLE ONLY logs
 -- Name: nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY nodes
+ALTER TABLE ONLY public.nodes
     ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
 
 
@@ -1524,7 +1523,7 @@ ALTER TABLE ONLY nodes
 -- Name: permission_refresh_lock permission_refresh_lock_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY permission_refresh_lock
+ALTER TABLE ONLY public.permission_refresh_lock
     ADD CONSTRAINT permission_refresh_lock_pkey PRIMARY KEY (id);
 
 
@@ -1532,7 +1531,7 @@ ALTER TABLE ONLY permission_refresh_lock
 -- Name: pipeline_instances pipeline_instances_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY pipeline_instances
+ALTER TABLE ONLY public.pipeline_instances
     ADD CONSTRAINT pipeline_instances_pkey PRIMARY KEY (id);
 
 
@@ -1540,7 +1539,7 @@ ALTER TABLE ONLY pipeline_instances
 -- Name: pipeline_templates pipeline_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY pipeline_templates
+ALTER TABLE ONLY public.pipeline_templates
     ADD CONSTRAINT pipeline_templates_pkey PRIMARY KEY (id);
 
 
@@ -1548,7 +1547,7 @@ ALTER TABLE ONLY pipeline_templates
 -- Name: repositories repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY repositories
+ALTER TABLE ONLY public.repositories
     ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);
 
 
@@ -1556,7 +1555,7 @@ ALTER TABLE ONLY repositories
 -- Name: specimens specimens_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY specimens
+ALTER TABLE ONLY public.specimens
     ADD CONSTRAINT specimens_pkey PRIMARY KEY (id);
 
 
@@ -1564,7 +1563,7 @@ ALTER TABLE ONLY specimens
 -- Name: traits traits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY traits
+ALTER TABLE ONLY public.traits
     ADD CONSTRAINT traits_pkey PRIMARY KEY (id);
 
 
@@ -1572,7 +1571,7 @@ ALTER TABLE ONLY traits
 -- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY users
+ALTER TABLE ONLY public.users
     ADD CONSTRAINT users_pkey PRIMARY KEY (id);
 
 
@@ -1580,7 +1579,7 @@ ALTER TABLE ONLY users
 -- Name: virtual_machines virtual_machines_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY virtual_machines
+ALTER TABLE ONLY public.virtual_machines
     ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
 
 
@@ -1588,7 +1587,7 @@ ALTER TABLE ONLY virtual_machines
 -- Name: workflows workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -
 --
 
-ALTER TABLE ONLY workflows
+ALTER TABLE ONLY public.workflows
     ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);
 
 
@@ -1596,1190 +1595,1218 @@ ALTER TABLE ONLY workflows
 -- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX api_client_authorizations_search_index ON api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);
+CREATE INDEX api_client_authorizations_search_index ON public.api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);
 
 
 --
 -- Name: api_clients_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX api_clients_search_index ON api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);
+CREATE INDEX api_clients_search_index ON public.api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);
 
 
 --
 -- Name: authorized_keys_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX authorized_keys_search_index ON authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);
+CREATE INDEX authorized_keys_search_index ON public.authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);
 
 
 --
 -- Name: collection_index_on_properties; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX collection_index_on_properties ON collections USING gin (properties);
+CREATE INDEX collection_index_on_properties ON public.collections USING gin (properties);
 
 
 --
 -- Name: collections_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX collections_full_text_search_idx ON collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(file_names, ''::character varying))::text)));
+CREATE INDEX collections_full_text_search_idx ON public.collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, (''::character varying)::text))));
 
 
 --
 -- Name: collections_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX collections_search_index ON collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name);
+CREATE INDEX collections_search_index ON public.collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name);
 
 
 --
 -- Name: container_requests_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
+CREATE INDEX container_requests_full_text_search_idx ON public.container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
 
 
 --
 -- Name: container_requests_index_on_properties; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX container_requests_index_on_properties ON container_requests USING gin (properties);
+CREATE INDEX container_requests_index_on_properties ON public.container_requests USING gin (properties);
 
 
 --
 -- Name: container_requests_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX container_requests_search_index ON container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);
+CREATE INDEX container_requests_search_index ON public.container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);
 
 
 --
 -- Name: containers_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX containers_search_index ON containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
+CREATE INDEX containers_search_index ON public.containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
 
 
 --
 -- Name: group_index_on_properties; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX group_index_on_properties ON groups USING gin (properties);
+CREATE INDEX group_index_on_properties ON public.groups USING gin (properties);
 
 
 --
 -- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))));
+CREATE INDEX groups_full_text_search_idx ON public.groups USING gin (to_tsvector('english'::regconfig, (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))));
 
 
 --
 -- Name: groups_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX groups_search_index ON groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class);
+CREATE INDEX groups_search_index ON public.groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class);
 
 
 --
 -- Name: humans_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX humans_search_index ON humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
+CREATE INDEX humans_search_index ON public.humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
 
 
 --
 -- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_client_authorizations_on_api_client_id ON api_client_authorizations USING btree (api_client_id);
+CREATE INDEX index_api_client_authorizations_on_api_client_id ON public.api_client_authorizations USING btree (api_client_id);
 
 
 --
 -- Name: index_api_client_authorizations_on_api_token; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON api_client_authorizations USING btree (api_token);
+CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON public.api_client_authorizations USING btree (api_token);
 
 
 --
 -- Name: index_api_client_authorizations_on_expires_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_client_authorizations_on_expires_at ON api_client_authorizations USING btree (expires_at);
+CREATE INDEX index_api_client_authorizations_on_expires_at ON public.api_client_authorizations USING btree (expires_at);
 
 
 --
 -- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_client_authorizations_on_user_id ON api_client_authorizations USING btree (user_id);
+CREATE INDEX index_api_client_authorizations_on_user_id ON public.api_client_authorizations USING btree (user_id);
 
 
 --
 -- Name: index_api_client_authorizations_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON api_client_authorizations USING btree (uuid);
+CREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON public.api_client_authorizations USING btree (uuid);
 
 
 --
 -- Name: index_api_clients_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_clients_on_created_at ON api_clients USING btree (created_at);
+CREATE INDEX index_api_clients_on_created_at ON public.api_clients USING btree (created_at);
 
 
 --
 -- Name: index_api_clients_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_clients_on_modified_at ON api_clients USING btree (modified_at);
+CREATE INDEX index_api_clients_on_modified_at ON public.api_clients USING btree (modified_at);
 
 
 --
 -- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_api_clients_on_owner_uuid ON api_clients USING btree (owner_uuid);
+CREATE INDEX index_api_clients_on_owner_uuid ON public.api_clients USING btree (owner_uuid);
 
 
 --
 -- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_api_clients_on_uuid ON api_clients USING btree (uuid);
+CREATE UNIQUE INDEX index_api_clients_on_uuid ON public.api_clients USING btree (uuid);
 
 
 --
 -- Name: index_authkeys_on_user_and_expires_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_authkeys_on_user_and_expires_at ON authorized_keys USING btree (authorized_user_uuid, expires_at);
+CREATE INDEX index_authkeys_on_user_and_expires_at ON public.authorized_keys USING btree (authorized_user_uuid, expires_at);
 
 
 --
 -- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_authorized_keys_on_owner_uuid ON authorized_keys USING btree (owner_uuid);
+CREATE INDEX index_authorized_keys_on_owner_uuid ON public.authorized_keys USING btree (owner_uuid);
 
 
 --
 -- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON authorized_keys USING btree (uuid);
+CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON public.authorized_keys USING btree (uuid);
 
 
 --
 -- Name: index_collections_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_created_at ON collections USING btree (created_at);
+CREATE INDEX index_collections_on_created_at ON public.collections USING btree (created_at);
 
 
 --
 -- Name: index_collections_on_delete_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_delete_at ON collections USING btree (delete_at);
+CREATE INDEX index_collections_on_delete_at ON public.collections USING btree (delete_at);
 
 
 --
 -- Name: index_collections_on_is_trashed; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_is_trashed ON collections USING btree (is_trashed);
+CREATE INDEX index_collections_on_is_trashed ON public.collections USING btree (is_trashed);
 
 
 --
 -- Name: index_collections_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_modified_at ON collections USING btree (modified_at);
+CREATE INDEX index_collections_on_modified_at ON public.collections USING btree (modified_at);
 
 
 --
 -- Name: index_collections_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_modified_at_uuid ON collections USING btree (modified_at DESC, uuid);
+CREATE INDEX index_collections_on_modified_at_uuid ON public.collections USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_owner_uuid ON collections USING btree (owner_uuid);
+CREATE INDEX index_collections_on_owner_uuid ON public.collections USING btree (owner_uuid);
 
 
 --
 -- Name: index_collections_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON collections USING btree (owner_uuid, name) WHERE (is_trashed = false);
+CREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON public.collections USING btree (owner_uuid, name) WHERE (is_trashed = false);
 
 
 --
--- Name: index_collections_on_portable_data_hash; Type: INDEX; Schema: public; Owner: -
+-- Name: index_collections_on_portable_data_hash_and_trash_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_portable_data_hash ON collections USING btree (portable_data_hash);
+CREATE INDEX index_collections_on_portable_data_hash_and_trash_at ON public.collections USING btree (portable_data_hash, trash_at);
 
 
 --
 -- Name: index_collections_on_trash_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_collections_on_trash_at ON collections USING btree (trash_at);
+CREATE INDEX index_collections_on_trash_at ON public.collections USING btree (trash_at);
 
 
 --
 -- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_collections_on_uuid ON collections USING btree (uuid);
+CREATE UNIQUE INDEX index_collections_on_uuid ON public.collections USING btree (uuid);
 
 
 --
 -- Name: index_commit_ancestors_on_descendant_and_ancestor; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON commit_ancestors USING btree (descendant, ancestor);
+CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON public.commit_ancestors USING btree (descendant, ancestor);
 
 
 --
 -- Name: index_commits_on_repository_name_and_sha1; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON commits USING btree (repository_name, sha1);
+CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON public.commits USING btree (repository_name, sha1);
 
 
 --
 -- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_container_requests_on_container_uuid ON container_requests USING btree (container_uuid);
+CREATE INDEX index_container_requests_on_container_uuid ON public.container_requests USING btree (container_uuid);
 
 
 --
 -- Name: index_container_requests_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_container_requests_on_modified_at_uuid ON container_requests USING btree (modified_at DESC, uuid);
+CREATE INDEX index_container_requests_on_modified_at_uuid ON public.container_requests USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_container_requests_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_container_requests_on_owner_uuid ON container_requests USING btree (owner_uuid);
+CREATE INDEX index_container_requests_on_owner_uuid ON public.container_requests USING btree (owner_uuid);
 
 
 --
 -- Name: index_container_requests_on_requesting_container_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_container_requests_on_requesting_container_uuid ON container_requests USING btree (requesting_container_uuid);
+CREATE INDEX index_container_requests_on_requesting_container_uuid ON public.container_requests USING btree (requesting_container_uuid);
 
 
 --
 -- Name: index_container_requests_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_container_requests_on_uuid ON container_requests USING btree (uuid);
+CREATE UNIQUE INDEX index_container_requests_on_uuid ON public.container_requests USING btree (uuid);
 
 
 --
 -- Name: index_containers_on_auth_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_containers_on_auth_uuid ON containers USING btree (auth_uuid);
+CREATE INDEX index_containers_on_auth_uuid ON public.containers USING btree (auth_uuid);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_priority; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_priority ON public.containers USING btree (locked_by_uuid, priority);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_uuid ON public.containers USING btree (locked_by_uuid, uuid);
 
 
 --
 -- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at DESC, uuid);
+CREATE INDEX index_containers_on_modified_at_uuid ON public.containers USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_containers_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_containers_on_owner_uuid ON containers USING btree (owner_uuid);
+CREATE INDEX index_containers_on_owner_uuid ON public.containers USING btree (owner_uuid);
+
+
+--
+-- Name: index_containers_on_queued_state; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_queued_state ON public.containers USING btree (state, ((priority > 0)));
+
+
+--
+-- Name: index_containers_on_reuse_columns; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));
 
 
 --
 -- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_containers_on_secret_mounts_md5 ON containers USING btree (secret_mounts_md5);
+CREATE INDEX index_containers_on_secret_mounts_md5 ON public.containers USING btree (secret_mounts_md5);
 
 
 --
 -- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_containers_on_uuid ON containers USING btree (uuid);
+CREATE UNIQUE INDEX index_containers_on_uuid ON public.containers USING btree (uuid);
 
 
 --
 -- Name: index_groups_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_created_at ON groups USING btree (created_at);
+CREATE INDEX index_groups_on_created_at ON public.groups USING btree (created_at);
 
 
 --
 -- Name: index_groups_on_delete_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_delete_at ON groups USING btree (delete_at);
+CREATE INDEX index_groups_on_delete_at ON public.groups USING btree (delete_at);
 
 
 --
 -- Name: index_groups_on_group_class; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_group_class ON groups USING btree (group_class);
+CREATE INDEX index_groups_on_group_class ON public.groups USING btree (group_class);
 
 
 --
 -- Name: index_groups_on_is_trashed; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_is_trashed ON groups USING btree (is_trashed);
+CREATE INDEX index_groups_on_is_trashed ON public.groups USING btree (is_trashed);
 
 
 --
 -- Name: index_groups_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_modified_at ON groups USING btree (modified_at);
+CREATE INDEX index_groups_on_modified_at ON public.groups USING btree (modified_at);
 
 
 --
 -- Name: index_groups_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_modified_at_uuid ON groups USING btree (modified_at DESC, uuid);
+CREATE INDEX index_groups_on_modified_at_uuid ON public.groups USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_owner_uuid ON groups USING btree (owner_uuid);
+CREATE INDEX index_groups_on_owner_uuid ON public.groups USING btree (owner_uuid);
 
 
 --
 -- Name: index_groups_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON groups USING btree (owner_uuid, name) WHERE (is_trashed = false);
+CREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON public.groups USING btree (owner_uuid, name) WHERE (is_trashed = false);
 
 
 --
 -- Name: index_groups_on_trash_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_groups_on_trash_at ON groups USING btree (trash_at);
+CREATE INDEX index_groups_on_trash_at ON public.groups USING btree (trash_at);
 
 
 --
 -- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_groups_on_uuid ON groups USING btree (uuid);
+CREATE UNIQUE INDEX index_groups_on_uuid ON public.groups USING btree (uuid);
 
 
 --
 -- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_humans_on_owner_uuid ON humans USING btree (owner_uuid);
+CREATE INDEX index_humans_on_owner_uuid ON public.humans USING btree (owner_uuid);
 
 
 --
 -- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_humans_on_uuid ON humans USING btree (uuid);
+CREATE UNIQUE INDEX index_humans_on_uuid ON public.humans USING btree (uuid);
 
 
 --
 -- Name: index_job_tasks_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_created_at ON job_tasks USING btree (created_at);
+CREATE INDEX index_job_tasks_on_created_at ON public.job_tasks USING btree (created_at);
 
 
 --
 -- Name: index_job_tasks_on_created_by_job_task_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON job_tasks USING btree (created_by_job_task_uuid);
+CREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON public.job_tasks USING btree (created_by_job_task_uuid);
 
 
 --
 -- Name: index_job_tasks_on_job_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_job_uuid ON job_tasks USING btree (job_uuid);
+CREATE INDEX index_job_tasks_on_job_uuid ON public.job_tasks USING btree (job_uuid);
 
 
 --
 -- Name: index_job_tasks_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_modified_at ON job_tasks USING btree (modified_at);
+CREATE INDEX index_job_tasks_on_modified_at ON public.job_tasks USING btree (modified_at);
 
 
 --
 -- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_owner_uuid ON job_tasks USING btree (owner_uuid);
+CREATE INDEX index_job_tasks_on_owner_uuid ON public.job_tasks USING btree (owner_uuid);
 
 
 --
 -- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_sequence ON job_tasks USING btree (sequence);
+CREATE INDEX index_job_tasks_on_sequence ON public.job_tasks USING btree (sequence);
 
 
 --
 -- Name: index_job_tasks_on_success; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_job_tasks_on_success ON job_tasks USING btree (success);
+CREATE INDEX index_job_tasks_on_success ON public.job_tasks USING btree (success);
 
 
 --
 -- Name: index_job_tasks_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_job_tasks_on_uuid ON job_tasks USING btree (uuid);
+CREATE UNIQUE INDEX index_job_tasks_on_uuid ON public.job_tasks USING btree (uuid);
 
 
 --
 -- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_created_at ON jobs USING btree (created_at);
+CREATE INDEX index_jobs_on_created_at ON public.jobs USING btree (created_at);
 
 
 --
 -- Name: index_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_finished_at ON jobs USING btree (finished_at);
+CREATE INDEX index_jobs_on_finished_at ON public.jobs USING btree (finished_at);
 
 
 --
 -- Name: index_jobs_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_modified_at ON jobs USING btree (modified_at);
+CREATE INDEX index_jobs_on_modified_at ON public.jobs USING btree (modified_at);
 
 
 --
 -- Name: index_jobs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_modified_at_uuid ON jobs USING btree (modified_at DESC, uuid);
+CREATE INDEX index_jobs_on_modified_at_uuid ON public.jobs USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_jobs_on_output; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_output ON jobs USING btree (output);
+CREATE INDEX index_jobs_on_output ON public.jobs USING btree (output);
 
 
 --
 -- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_owner_uuid ON jobs USING btree (owner_uuid);
+CREATE INDEX index_jobs_on_owner_uuid ON public.jobs USING btree (owner_uuid);
 
 
 --
 -- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_script ON jobs USING btree (script);
+CREATE INDEX index_jobs_on_script ON public.jobs USING btree (script);
 
 
 --
 -- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_script_parameters_digest ON jobs USING btree (script_parameters_digest);
+CREATE INDEX index_jobs_on_script_parameters_digest ON public.jobs USING btree (script_parameters_digest);
 
 
 --
 -- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_jobs_on_started_at ON jobs USING btree (started_at);
+CREATE INDEX index_jobs_on_started_at ON public.jobs USING btree (started_at);
 
 
 --
 -- Name: index_jobs_on_submit_id; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_jobs_on_submit_id ON jobs USING btree (submit_id);
+CREATE UNIQUE INDEX index_jobs_on_submit_id ON public.jobs USING btree (submit_id);
 
 
 --
 -- Name: index_jobs_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_jobs_on_uuid ON jobs USING btree (uuid);
+CREATE UNIQUE INDEX index_jobs_on_uuid ON public.jobs USING btree (uuid);
 
 
 --
 -- Name: index_keep_disks_on_filesystem_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_keep_disks_on_filesystem_uuid ON keep_disks USING btree (filesystem_uuid);
+CREATE INDEX index_keep_disks_on_filesystem_uuid ON public.keep_disks USING btree (filesystem_uuid);
 
 
 --
 -- Name: index_keep_disks_on_last_ping_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_keep_disks_on_last_ping_at ON keep_disks USING btree (last_ping_at);
+CREATE INDEX index_keep_disks_on_last_ping_at ON public.keep_disks USING btree (last_ping_at);
 
 
 --
 -- Name: index_keep_disks_on_node_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_keep_disks_on_node_uuid ON keep_disks USING btree (node_uuid);
+CREATE INDEX index_keep_disks_on_node_uuid ON public.keep_disks USING btree (node_uuid);
 
 
 --
 -- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_keep_disks_on_owner_uuid ON keep_disks USING btree (owner_uuid);
+CREATE INDEX index_keep_disks_on_owner_uuid ON public.keep_disks USING btree (owner_uuid);
 
 
 --
 -- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_keep_disks_on_uuid ON keep_disks USING btree (uuid);
+CREATE UNIQUE INDEX index_keep_disks_on_uuid ON public.keep_disks USING btree (uuid);
 
 
 --
 -- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_keep_services_on_owner_uuid ON keep_services USING btree (owner_uuid);
+CREATE INDEX index_keep_services_on_owner_uuid ON public.keep_services USING btree (owner_uuid);
 
 
 --
 -- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_keep_services_on_uuid ON keep_services USING btree (uuid);
+CREATE UNIQUE INDEX index_keep_services_on_uuid ON public.keep_services USING btree (uuid);
 
 
 --
 -- Name: index_links_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_created_at ON links USING btree (created_at);
+CREATE INDEX index_links_on_created_at ON public.links USING btree (created_at);
 
 
 --
 -- Name: index_links_on_head_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_head_uuid ON links USING btree (head_uuid);
+CREATE INDEX index_links_on_head_uuid ON public.links USING btree (head_uuid);
 
 
 --
 -- Name: index_links_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_modified_at ON links USING btree (modified_at);
+CREATE INDEX index_links_on_modified_at ON public.links USING btree (modified_at);
 
 
 --
 -- Name: index_links_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_modified_at_uuid ON links USING btree (modified_at DESC, uuid);
+CREATE INDEX index_links_on_modified_at_uuid ON public.links USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_owner_uuid ON links USING btree (owner_uuid);
+CREATE INDEX index_links_on_owner_uuid ON public.links USING btree (owner_uuid);
 
 
 --
 -- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_links_on_tail_uuid ON links USING btree (tail_uuid);
+CREATE INDEX index_links_on_tail_uuid ON public.links USING btree (tail_uuid);
 
 
 --
 -- Name: index_links_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_links_on_uuid ON links USING btree (uuid);
+CREATE UNIQUE INDEX index_links_on_uuid ON public.links USING btree (uuid);
 
 
 --
 -- Name: index_logs_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_created_at ON logs USING btree (created_at);
+CREATE INDEX index_logs_on_created_at ON public.logs USING btree (created_at);
 
 
 --
 -- Name: index_logs_on_event_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_event_at ON logs USING btree (event_at);
+CREATE INDEX index_logs_on_event_at ON public.logs USING btree (event_at);
 
 
 --
 -- Name: index_logs_on_event_type; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_event_type ON logs USING btree (event_type);
+CREATE INDEX index_logs_on_event_type ON public.logs USING btree (event_type);
 
 
 --
 -- Name: index_logs_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_modified_at ON logs USING btree (modified_at);
+CREATE INDEX index_logs_on_modified_at ON public.logs USING btree (modified_at);
 
 
 --
 -- Name: index_logs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_modified_at_uuid ON logs USING btree (modified_at DESC, uuid);
+CREATE INDEX index_logs_on_modified_at_uuid ON public.logs USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_logs_on_object_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_object_owner_uuid ON logs USING btree (object_owner_uuid);
+CREATE INDEX index_logs_on_object_owner_uuid ON public.logs USING btree (object_owner_uuid);
 
 
 --
 -- Name: index_logs_on_object_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_object_uuid ON logs USING btree (object_uuid);
+CREATE INDEX index_logs_on_object_uuid ON public.logs USING btree (object_uuid);
 
 
 --
 -- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_owner_uuid ON logs USING btree (owner_uuid);
+CREATE INDEX index_logs_on_owner_uuid ON public.logs USING btree (owner_uuid);
 
 
 --
 -- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_logs_on_summary ON logs USING btree (summary);
+CREATE INDEX index_logs_on_summary ON public.logs USING btree (summary);
 
 
 --
 -- Name: index_logs_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_logs_on_uuid ON logs USING btree (uuid);
+CREATE UNIQUE INDEX index_logs_on_uuid ON public.logs USING btree (uuid);
 
 
 --
 -- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_nodes_on_created_at ON nodes USING btree (created_at);
+CREATE INDEX index_nodes_on_created_at ON public.nodes USING btree (created_at);
 
 
 --
 -- Name: index_nodes_on_hostname; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_nodes_on_hostname ON nodes USING btree (hostname);
+CREATE INDEX index_nodes_on_hostname ON public.nodes USING btree (hostname);
 
 
 --
 -- Name: index_nodes_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_nodes_on_modified_at ON nodes USING btree (modified_at);
+CREATE INDEX index_nodes_on_modified_at ON public.nodes USING btree (modified_at);
 
 
 --
 -- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_nodes_on_owner_uuid ON nodes USING btree (owner_uuid);
+CREATE INDEX index_nodes_on_owner_uuid ON public.nodes USING btree (owner_uuid);
 
 
 --
 -- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_nodes_on_slot_number ON nodes USING btree (slot_number);
+CREATE UNIQUE INDEX index_nodes_on_slot_number ON public.nodes USING btree (slot_number);
 
 
 --
 -- Name: index_nodes_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_nodes_on_uuid ON nodes USING btree (uuid);
+CREATE UNIQUE INDEX index_nodes_on_uuid ON public.nodes USING btree (uuid);
 
 
 --
 -- Name: index_pipeline_instances_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_instances_on_created_at ON pipeline_instances USING btree (created_at);
+CREATE INDEX index_pipeline_instances_on_created_at ON public.pipeline_instances USING btree (created_at);
 
 
 --
 -- Name: index_pipeline_instances_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_instances_on_modified_at ON pipeline_instances USING btree (modified_at);
+CREATE INDEX index_pipeline_instances_on_modified_at ON public.pipeline_instances USING btree (modified_at);
 
 
 --
 -- Name: index_pipeline_instances_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_instances_on_modified_at_uuid ON pipeline_instances USING btree (modified_at DESC, uuid);
+CREATE INDEX index_pipeline_instances_on_modified_at_uuid ON public.pipeline_instances USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_instances_on_owner_uuid ON pipeline_instances USING btree (owner_uuid);
+CREATE INDEX index_pipeline_instances_on_owner_uuid ON public.pipeline_instances USING btree (owner_uuid);
 
 
 --
 -- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON pipeline_instances USING btree (uuid);
+CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON public.pipeline_instances USING btree (uuid);
 
 
 --
 -- Name: index_pipeline_templates_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_templates_on_created_at ON pipeline_templates USING btree (created_at);
+CREATE INDEX index_pipeline_templates_on_created_at ON public.pipeline_templates USING btree (created_at);
 
 
 --
 -- Name: index_pipeline_templates_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_templates_on_modified_at ON pipeline_templates USING btree (modified_at);
+CREATE INDEX index_pipeline_templates_on_modified_at ON public.pipeline_templates USING btree (modified_at);
 
 
 --
 -- Name: index_pipeline_templates_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_templates_on_modified_at_uuid ON pipeline_templates USING btree (modified_at DESC, uuid);
+CREATE INDEX index_pipeline_templates_on_modified_at_uuid ON public.pipeline_templates USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_pipeline_templates_on_owner_uuid ON pipeline_templates USING btree (owner_uuid);
+CREATE INDEX index_pipeline_templates_on_owner_uuid ON public.pipeline_templates USING btree (owner_uuid);
 
 
 --
 -- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON pipeline_templates USING btree (uuid);
+CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON public.pipeline_templates USING btree (uuid);
 
 
 --
 -- Name: index_repositories_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_repositories_on_modified_at_uuid ON repositories USING btree (modified_at DESC, uuid);
+CREATE INDEX index_repositories_on_modified_at_uuid ON public.repositories USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_repositories_on_name ON repositories USING btree (name);
+CREATE UNIQUE INDEX index_repositories_on_name ON public.repositories USING btree (name);
 
 
 --
 -- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_repositories_on_owner_uuid ON repositories USING btree (owner_uuid);
+CREATE INDEX index_repositories_on_owner_uuid ON public.repositories USING btree (owner_uuid);
 
 
 --
 -- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_repositories_on_uuid ON repositories USING btree (uuid);
+CREATE UNIQUE INDEX index_repositories_on_uuid ON public.repositories USING btree (uuid);
 
 
 --
 -- Name: index_specimens_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_specimens_on_created_at ON specimens USING btree (created_at);
+CREATE INDEX index_specimens_on_created_at ON public.specimens USING btree (created_at);
 
 
 --
 -- Name: index_specimens_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_specimens_on_modified_at ON specimens USING btree (modified_at);
+CREATE INDEX index_specimens_on_modified_at ON public.specimens USING btree (modified_at);
 
 
 --
 -- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_specimens_on_owner_uuid ON specimens USING btree (owner_uuid);
+CREATE INDEX index_specimens_on_owner_uuid ON public.specimens USING btree (owner_uuid);
 
 
 --
 -- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_specimens_on_uuid ON specimens USING btree (uuid);
+CREATE UNIQUE INDEX index_specimens_on_uuid ON public.specimens USING btree (uuid);
 
 
 --
 -- Name: index_traits_on_name; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_traits_on_name ON traits USING btree (name);
+CREATE INDEX index_traits_on_name ON public.traits USING btree (name);
 
 
 --
 -- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_traits_on_owner_uuid ON traits USING btree (owner_uuid);
+CREATE INDEX index_traits_on_owner_uuid ON public.traits USING btree (owner_uuid);
 
 
 --
 -- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_traits_on_uuid ON traits USING btree (uuid);
+CREATE UNIQUE INDEX index_traits_on_uuid ON public.traits USING btree (uuid);
 
 
 --
 -- Name: index_users_on_created_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_users_on_created_at ON users USING btree (created_at);
+CREATE INDEX index_users_on_created_at ON public.users USING btree (created_at);
 
 
 --
 -- Name: index_users_on_modified_at; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_users_on_modified_at ON users USING btree (modified_at);
+CREATE INDEX index_users_on_modified_at ON public.users USING btree (modified_at);
 
 
 --
 -- Name: index_users_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_users_on_modified_at_uuid ON users USING btree (modified_at DESC, uuid);
+CREATE INDEX index_users_on_modified_at_uuid ON public.users USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_users_on_owner_uuid ON users USING btree (owner_uuid);
+CREATE INDEX index_users_on_owner_uuid ON public.users USING btree (owner_uuid);
 
 
 --
 -- Name: index_users_on_username; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_users_on_username ON users USING btree (username);
+CREATE UNIQUE INDEX index_users_on_username ON public.users USING btree (username);
 
 
 --
 -- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_users_on_uuid ON users USING btree (uuid);
+CREATE UNIQUE INDEX index_users_on_uuid ON public.users USING btree (uuid);
 
 
 --
 -- Name: index_virtual_machines_on_hostname; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_virtual_machines_on_hostname ON virtual_machines USING btree (hostname);
+CREATE INDEX index_virtual_machines_on_hostname ON public.virtual_machines USING btree (hostname);
 
 
 --
 -- Name: index_virtual_machines_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_virtual_machines_on_modified_at_uuid ON virtual_machines USING btree (modified_at DESC, uuid);
+CREATE INDEX index_virtual_machines_on_modified_at_uuid ON public.virtual_machines USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_virtual_machines_on_owner_uuid ON virtual_machines USING btree (owner_uuid);
+CREATE INDEX index_virtual_machines_on_owner_uuid ON public.virtual_machines USING btree (owner_uuid);
 
 
 --
 -- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btree (uuid);
+CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON public.virtual_machines USING btree (uuid);
 
 
 --
 -- Name: index_workflows_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_workflows_on_modified_at_uuid ON workflows USING btree (modified_at DESC, uuid);
+CREATE INDEX index_workflows_on_modified_at_uuid ON public.workflows USING btree (modified_at DESC, uuid);
 
 
 --
 -- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX index_workflows_on_owner_uuid ON workflows USING btree (owner_uuid);
+CREATE INDEX index_workflows_on_owner_uuid ON public.workflows USING btree (owner_uuid);
 
 
 --
 -- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX index_workflows_on_uuid ON workflows USING btree (uuid);
+CREATE UNIQUE INDEX index_workflows_on_uuid ON public.workflows USING btree (uuid);
 
 
 --
 -- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX job_tasks_search_index ON job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
+CREATE INDEX job_tasks_search_index ON public.job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
 
 
 --
 -- Name: jobs_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX jobs_full_text_search_idx ON jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text))));
+CREATE INDEX jobs_full_text_search_idx ON public.jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text))));
 
 
 --
 -- Name: jobs_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX jobs_search_index ON jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);
+CREATE INDEX jobs_search_index ON public.jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);
 
 
 --
 -- Name: keep_disks_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX keep_disks_search_index ON keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);
+CREATE INDEX keep_disks_search_index ON public.keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);
 
 
 --
 -- Name: keep_services_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX keep_services_search_index ON keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
+CREATE INDEX keep_services_search_index ON public.keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
 
 
 --
 -- Name: links_index_on_properties; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX links_index_on_properties ON links USING gin (properties);
+CREATE INDEX links_index_on_properties ON public.links USING gin (properties);
 
 
 --
 -- Name: links_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX links_search_index ON links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);
+CREATE INDEX links_search_index ON public.links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);
 
 
 --
 -- Name: links_tail_name_unique_if_link_class_name; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
+CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON public.links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
 
 
 --
 -- Name: logs_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX logs_search_index ON logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
+CREATE INDEX logs_search_index ON public.logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
 
 
 --
 -- Name: nodes_index_on_info; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX nodes_index_on_info ON nodes USING gin (info);
+CREATE INDEX nodes_index_on_info ON public.nodes USING gin (info);
 
 
 --
 -- Name: nodes_index_on_properties; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX nodes_index_on_properties ON nodes USING gin (properties);
+CREATE INDEX nodes_index_on_properties ON public.nodes USING gin (properties);
 
 
 --
 -- Name: nodes_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX nodes_search_index ON nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
+CREATE INDEX nodes_search_index ON public.nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
 
 
 --
 -- Name: permission_target_trashed; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX permission_target_trashed ON materialized_permission_view USING btree (trashed, target_uuid);
+CREATE INDEX permission_target_trashed ON public.materialized_permission_view USING btree (trashed, target_uuid);
 
 
 --
 -- Name: permission_target_user_trashed_level; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX permission_target_user_trashed_level ON materialized_permission_view USING btree (user_uuid, trashed, perm_level);
+CREATE INDEX permission_target_user_trashed_level ON public.materialized_permission_view USING btree (user_uuid, trashed, perm_level);
 
 
 --
 -- Name: pipeline_instances_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_instances_full_text_search_idx ON public.pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
 
 
 --
 -- Name: pipeline_instances_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX pipeline_instances_search_index ON pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);
+CREATE INDEX pipeline_instances_search_index ON public.pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);
 
 
 --
 -- Name: pipeline_template_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON pipeline_templates USING btree (owner_uuid, name);
+CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON public.pipeline_templates USING btree (owner_uuid, name);
 
 
 --
 -- Name: pipeline_templates_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_templates_full_text_search_idx ON public.pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
 
 
 --
 -- Name: pipeline_templates_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX pipeline_templates_search_index ON pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX pipeline_templates_search_index ON public.pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
 
 
 --
 -- Name: repositories_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX repositories_search_index ON repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX repositories_search_index ON public.repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
 
 
 --
 -- Name: specimens_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX specimens_search_index ON specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);
+CREATE INDEX specimens_search_index ON public.specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);
 
 
 --
 -- Name: traits_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX traits_search_index ON traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX traits_search_index ON public.traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
 
 
 --
 -- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE UNIQUE INDEX unique_schema_migrations ON schema_migrations USING btree (version);
+CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version);
 
 
 --
 -- Name: users_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX users_search_index ON users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
+CREATE INDEX users_search_index ON public.users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
 
 
 --
 -- Name: virtual_machines_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX virtual_machines_search_index ON virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
+CREATE INDEX virtual_machines_search_index ON public.virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
 
 
 --
 -- Name: workflows_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX workflows_full_text_search_idx ON workflows USING gin (to_tsvector('english'::regconfig, (((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text))));
+CREATE INDEX workflows_full_text_search_idx ON public.workflows USING gin (to_tsvector('english'::regconfig, (((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text))));
 
 
 --
 -- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX workflows_search_idx ON workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX workflows_search_idx ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
 
 
 --
@@ -3116,6 +3143,19 @@ INSERT INTO schema_migrations (version) VALUES ('20180501182859');
 
 INSERT INTO schema_migrations (version) VALUES ('20180514135529');
 
+INSERT INTO schema_migrations (version) VALUES ('20180607175050');
+
 INSERT INTO schema_migrations (version) VALUES ('20180608123145');
 
-INSERT INTO schema_migrations (version) VALUES ('20180607175050');
+INSERT INTO schema_migrations (version) VALUES ('20180806133039');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820130357');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820132617');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820135808');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824152014');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824155207');
+
index 73ad7606cc879ef58f7569c960196191c7fb7721..449d7d51626a1963ab39e83e3e95998f50d21b1e 100644 (file)
@@ -29,6 +29,7 @@ class CrunchDispatch
     @docker_bin = ENV['CRUNCH_JOB_DOCKER_BIN']
     @docker_run_args = ENV['CRUNCH_JOB_DOCKER_RUN_ARGS']
     @cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
+    @srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
 
     @arvados_internal = Rails.configuration.git_internal_dir
     if not File.exist? @arvados_internal
@@ -419,6 +420,10 @@ class CrunchDispatch
         cmd_args += ['--docker-run-args', @docker_run_args]
       end
 
+      if @srun_sync_timeout
+        cmd_args += ['--srun-sync-timeout', @srun_sync_timeout]
+      end
+
       if have_job_lock?(job)
         cmd_args << "--force-unlock"
       end
index 247708be47812d18907c0c6c6a028e85e3338bb3..e7cb21fc77e579dd75bd89543477425f0af1746b 100644 (file)
@@ -50,7 +50,7 @@ module LoadParam
 
   # Load params[:limit], params[:offset] and params[:order]
   # into @limit, @offset, @orders
-  def load_limit_offset_order_params
+  def load_limit_offset_order_params(fill_table_names: true)
     if params[:limit]
       unless params[:limit].to_s.match(/^\d+$/)
         raise ArgumentError.new("Invalid value for limit parameter")
@@ -96,10 +96,14 @@ module LoadParam
         # has used set_table_name to use an alternate table name from the Rails standard.
         # I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
         # would be a place to start if this ever becomes necessary.
-        if attr.match(/^[a-z][_a-z0-9]+$/) and
-            model_class.columns.collect(&:name).index(attr) and
-            ['asc','desc'].index direction.downcase
-          @orders << "#{table_name}.#{attr} #{direction.downcase}"
+        if (attr.match(/^[a-z][_a-z0-9]+$/) &&
+            model_class.columns.collect(&:name).index(attr) &&
+            ['asc','desc'].index(direction.downcase))
+          if fill_table_names
+            @orders << "#{table_name}.#{attr} #{direction.downcase}"
+          else
+            @orders << "#{attr} #{direction.downcase}"
+          end
         elsif attr.match(/^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/) and
             ['asc','desc'].index(direction.downcase) and
             ActiveRecord::Base.connection.tables.include?($1) and
index f4da283d746fcbecba6a03ce54fa041f0e3f58de..f78a3d34dc5d00b2d47ac8b4f9634d319c5462bc 100644 (file)
@@ -7,6 +7,12 @@ class SafeJSON
     return Oj.dump(o, mode: :compat)
   end
   def self.load(s)
+    if s.nil? or s == ''
+      # Oj 2.18.5 used to return nil. Not anymore on 3.6.4.
+      # Upgraded for performance issues (see #13803 and
+      # https://github.com/ohler55/oj/issues/441)
+      return nil
+    end
     Oj.strict_load(s, symbol_keys: false)
   end
 end
index 92bd7cf872cfeca1c53d38c5ea05d7836e929f4f..2073d8b1bacccfaa0422643a34ddfe5ed0144461 100644 (file)
@@ -275,6 +275,13 @@ user_foo_in_sharing_group:
   api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u
   expires_at: 2038-01-01 00:00:00
 
+user_bar_in_sharing_group:
+  uuid: zzzzz-gj3su-62hryf5fht531mz
+  api_client: untrusted
+  user: user_bar_in_sharing_group
+  api_token: 5vy55akwq85vghh80wc2cuxl4p8psay73lkpqf5c2cxvp6rmm6
+  expires_at: 2038-01-01 00:00:00
+
 user1_with_load:
   uuid: zzzzz-gj3su-357z32aux8dg2s1
   api_client: untrusted
index 43a4a13d4e7964b3562d6a0558b70185af5d0f39..5d3531eead8fb5a90c7ef4b7ef750a937da6ee90 100644 (file)
@@ -26,7 +26,7 @@ running:
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   name: running
   state: Committed
-  priority: 1
+  priority: 501
   created_at: <%= 2.minute.ago.to_s(:db) %>
   updated_at: <%= 1.minute.ago.to_s(:db) %>
   modified_at: <%= 1.minute.ago.to_s(:db) %>
@@ -304,7 +304,7 @@ completed_with_input_mounts:
   container_image: test
   cwd: test
   output_path: test
-  command: ["echo", "hello"]
+  command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
   runtime_constraints:
     vcpus: 1
     ram: 123
index 3f5d3a1e9d4de95640fad9938989b8f86305e608..757adcee1b979af4086d937cc928c1abb5042a1e 100644 (file)
@@ -97,7 +97,7 @@ completed:
   log: ea10d51bcf88862dbcc36eb292017dfd+45
   output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
   output_path: test
-  command: ["echo", "hello"]
+  command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
index 68cc76949afc5b21e0f7586fb777944788a9f6cd..92a1ced52841942b60f3898a58b5818d53b3b14f 100644 (file)
@@ -152,6 +152,14 @@ group_for_sharing_tests:
   description: Users who can share objects with each other
   group_class: role
 
+project_owned_by_foo:
+  uuid:  zzzzz-j7d0g-lsjm0ibr0ydwpzx
+  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+  name: project_owned_by_foo
+  group_class: project
+
 empty_project:
   uuid: zzzzz-j7d0g-9otoxmrksam74q6
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
index b2cd366540d81c36fc08c3b662b9cc3ed763ebd2..282e09049e63beab2e591ac71f38b47e9484261d 100644 (file)
@@ -32,7 +32,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
 
     post :create, {
            container_request: minimal_cr.merge(
-             secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}}),
+             secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}}),
          }
     assert_response :success
 
@@ -50,7 +50,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
     patch :update, {
             id: req.uuid,
             container_request: {
-              secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}},
+              secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}},
             },
           }
     assert_response :success
@@ -65,7 +65,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
   test "update without deleting secret_mounts" do
     authorize_with :active
     req = container_requests(:uncommitted)
-    req.update_attributes!(secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}})
+    req.update_attributes!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})
 
     patch :update, {
             id: req.uuid,
index 3442eda2447aa1e75ecc254b3ffcfb2392853a8f..05bcec2e268f44c9a83bf754b3a51a3796c96b9b 100644 (file)
@@ -116,6 +116,25 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     end
   end
 
+  test "list trashed collections and projects" do
+    authorize_with :active
+    get(:contents, {
+          format: :json,
+          include_trash: true,
+          filters: [
+            ['uuid', 'is_a', ['arvados#collection', 'arvados#group']],
+            ['is_trashed', '=', true],
+          ],
+          limit: 10000,
+        })
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, groups(:trashed_project).uuid
+    refute_includes found_uuids, groups(:aproject).uuid
+    assert_includes found_uuids, collections(:expired_collection).uuid
+    refute_includes found_uuids, collections(:w_a_z_file).uuid
+  end
+
   test "list objects in home project" do
     authorize_with :active
     get :contents, {
@@ -139,64 +158,59 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     assert_includes ids, collections(:baz_file_in_asubproject).uuid
   end
 
-  [['asc', :<=],
-   ['desc', :>=]].each do |order, operator|
-    test "user with project read permission can sort project collections #{order}" do
+  [
+    ['collections.name', 'asc', :<=, "name"],
+    ['collections.name', 'desc', :>=, "name"],
+    ['name', 'asc', :<=, "name"],
+    ['name', 'desc', :>=, "name"],
+    ['collections.created_at', 'asc', :<=, "created_at"],
+    ['collections.created_at', 'desc', :>=, "created_at"],
+    ['created_at', 'asc', :<=, "created_at"],
+    ['created_at', 'desc', :>=, "created_at"],
+  ].each do |column, order, operator, field|
+    test "user with project read permission can sort projects on #{column} #{order}" do
       authorize_with :project_viewer
       get :contents, {
         id: groups(:asubproject).uuid,
         format: :json,
         filters: [['uuid', 'is_a', "arvados#collection"]],
-        order: "collections.name #{order}"
+        order: "#{column} #{order}"
       }
-      sorted_names = json_response['items'].collect { |item| item["name"] }
-      # Here we avoid assuming too much about the database
-      # collation. Both "alice"<"Bob" and "alice">"Bob" can be
-      # correct. Hopefully it _is_ safe to assume that if "a" comes
-      # before "b" in the ascii alphabet, "aX">"bY" is never true for
-      # any strings X and Y.
-      reliably_sortable_names = sorted_names.select do |name|
-        name[0] >= 'a' and name[0] <= 'z'
-      end.uniq do |name|
-        name[0]
-      end
-      # Preserve order of sorted_names. But do not use &=. If
-      # sorted_names has out-of-order duplicates, we want to preserve
-      # them here, so we can detect them and fail the test below.
-      sorted_names.select! do |name|
-        reliably_sortable_names.include? name
-      end
-      actually_checked_anything = false
-      previous = nil
-      sorted_names.each do |entry|
-        if previous
-          assert_operator(previous, operator, entry,
-                          "Entries sorted incorrectly.")
-          actually_checked_anything = true
+      sorted_values = json_response['items'].collect { |item| item[field] }
+      if field == "name"
+        # Here we avoid assuming too much about the database
+        # collation. Both "alice"<"Bob" and "alice">"Bob" can be
+        # correct. Hopefully it _is_ safe to assume that if "a" comes
+        # before "b" in the ascii alphabet, "aX">"bY" is never true for
+        # any strings X and Y.
+        reliably_sortable_names = sorted_values.select do |name|
+          name[0] >= 'a' && name[0] <= 'z'
+        end.uniq do |name|
+          name[0]
+        end
+        # Preserve order of sorted_values. But do not use &=. If
+        # sorted_values has out-of-order duplicates, we want to preserve
+        # them here, so we can detect them and fail the test below.
+        sorted_values.select! do |name|
+          reliably_sortable_names.include? name
         end
-        previous = entry
       end
-      assert actually_checked_anything, "Didn't even find two names to compare."
+      assert_sorted(operator, sorted_values)
     end
   end
 
-  test 'list objects across multiple projects' do
-    authorize_with :project_viewer
-    get :contents, {
-      format: :json,
-      filters: [['uuid', 'is_a', 'arvados#specimen']]
-    }
-    assert_response :success
-    found_uuids = json_response['items'].collect { |i| i['uuid'] }
-    [[:in_aproject, true],
-     [:in_asubproject, true],
-     [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
-      if should_find
-        assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
-      else
-        refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+  def assert_sorted(operator, sorted_items)
+    actually_checked_anything = false
+    previous = nil
+    sorted_items.each do |entry|
+      if !previous.nil?
+        assert_operator(previous, operator, entry,
+                        "Entries sorted incorrectly.")
+        actually_checked_anything = true
       end
+      previous = entry
     end
+    assert actually_checked_anything, "Didn't even find two items to compare."
   end
 
   # Even though the project_viewer tests go through other controllers,
@@ -705,4 +719,61 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
       assert_not_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first
     end
   end
+
+  test 'get shared owned by another user' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], users(:user_foo_in_sharing_group).uuid
+  end
+
+  test 'get shared, owned by unreadable project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 0, json_response['included'].length
+  end
+
+  test 'get shared, owned by non-project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:group_for_sharing_tests).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], groups(:group_for_sharing_tests).uuid
+  end
+
 end
index ee2f699339f8a66fbc2efc6bd457b33b8c41411f..5109ea46a642a0853528d40986c2314610f050a1 100644 (file)
@@ -36,11 +36,11 @@ class CrossOriginTest < ActionDispatch::IntegrationTest
   ['/arvados/v1/collections',
    '/arvados/v1/users',
    '/arvados/v1/api_client_authorizations'].each do |path|
-    test "CORS headers are set and body is stub at OPTIONS #{path}" do
+    test "CORS headers are set and body is empty at OPTIONS #{path}" do
       options path, {}, {}
       assert_response :success
       assert_cors_headers
-      assert_equal '-', response.body
+      assert_equal '', response.body
     end
 
     test "CORS headers are set at authenticated GET #{path}" do
index c834250cb6caa89c28ff25ad942978dd14399949..6dbaa7550f55a8e49b035e6092c331304c6e4edb 100644 (file)
@@ -143,7 +143,7 @@ class ActiveSupport::TestCase
   end
 
   def self.slow_test(name, &block)
-    define_method(name, block) unless skip_slow_tests?
+    test(name, &block) unless skip_slow_tests?
   end
 end
 
index f266c096b475ca6306c9086d22029bdc6e22cb3e..81b49ff4fcce525b5e7fba88ff0c6f78087e7686 100644 (file)
@@ -69,7 +69,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     cr.container_image = "img3"
     cr.cwd = "/tmp3"
     cr.environment = {"BUP" => "BOP"}
-    cr.mounts = {"BAR" => "BAZ"}
+    cr.mounts = {"BAR" => {"kind" => "BAZ"}}
     cr.output_path = "/tmp4"
     cr.priority = 2
     cr.runtime_constraints = {"vcpus" => 4}
@@ -81,29 +81,33 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   [
-    {"vcpus" => 1},
-    {"vcpus" => 1, "ram" => nil},
-    {"vcpus" => 0, "ram" => 123},
-    {"vcpus" => "1", "ram" => "123"}
-  ].each do |invalid_constraints|
-    test "Create with #{invalid_constraints}" do
+    {"runtime_constraints" => {"vcpus" => 1}},
+    {"runtime_constraints" => {"vcpus" => 1, "ram" => nil}},
+    {"runtime_constraints" => {"vcpus" => 0, "ram" => 123}},
+    {"runtime_constraints" => {"vcpus" => "1", "ram" => "123"}},
+    {"mounts" => {"FOO" => "BAR"}},
+    {"mounts" => {"FOO" => {}}},
+    {"mounts" => {"FOO" => {"kind" => "tmp", "capacity" => 42.222}}},
+    {"command" => ["echo", 55]},
+    {"environment" => {"FOO" => 55}}
+  ].each do |value|
+    test "Create with invalid #{value}" do
       set_user_from_auth :active
       assert_raises(ActiveRecord::RecordInvalid) do
-        cr = create_minimal_req!(state: "Committed",
-                                 priority: 1,
-                                 runtime_constraints: invalid_constraints)
+        cr = create_minimal_req!({state: "Committed",
+               priority: 1}.merge(value))
         cr.save!
       end
     end
 
-    test "Update with #{invalid_constraints}" do
+    test "Update with invalid #{value}" do
       set_user_from_auth :active
       cr = create_minimal_req!(state: "Uncommitted", priority: 1)
       cr.save!
       assert_raises(ActiveRecord::RecordInvalid) do
         cr = ContainerRequest.find_by_uuid cr.uuid
-        cr.update_attributes!(state: "Committed",
-                              runtime_constraints: invalid_constraints)
+        cr.update_attributes!({state: "Committed",
+                               priority: 1}.merge(value))
       end
     end
   end
@@ -375,7 +379,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   [
-    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 1],
+    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 501],
     ['active_no_prefs', nil, 0],
   ].each do |token, expected, expected_priority|
     test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
index 7ee5921e0ca37842168608f9eafa63d16dd1d90f..83ab59b607b31a9e09b70e29c0c0792f09e5c559 100644 (file)
@@ -84,7 +84,7 @@ class ContainerTest < ActiveSupport::TestCase
   test "Container create" do
     act_as_system_user do
       c, _ = minimal_new(environment: {},
-                      mounts: {"BAR" => "FOO"},
+                      mounts: {"BAR" => {"kind" => "FOO"}},
                       output_path: "/tmp",
                       priority: 1,
                       runtime_constraints: {"vcpus" => 1, "ram" => 1})
@@ -101,7 +101,7 @@ class ContainerTest < ActiveSupport::TestCase
   test "Container valid priority" do
     act_as_system_user do
       c, _ = minimal_new(environment: {},
-                      mounts: {"BAR" => "FOO"},
+                      mounts: {"BAR" => {"kind" => "FOO"}},
                       output_path: "/tmp",
                       priority: 1,
                       runtime_constraints: {"vcpus" => 1, "ram" => 1})
@@ -133,8 +133,8 @@ class ContainerTest < ActiveSupport::TestCase
 
 
   test "Container serialized hash attributes sorted before save" do
-    env = {"C" => 3, "B" => 2, "A" => 1}
-    m = {"F" => {"kind" => 3}, "E" => {"kind" => 2}, "D" => {"kind" => 1}}
+    env = {"C" => "3", "B" => "2", "A" => "1"}
+    m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
     rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1}
     c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
     assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
index 279327ba18811ba8ad6339600cc124460f2fc35c..fc10393626be103c17b01b5b1bfde615ed470bc9 100644 (file)
@@ -10,7 +10,6 @@ import (
        "context"
        "flag"
        "fmt"
-       "log"
        "os"
        "os/exec"
        "os/signal"
@@ -21,6 +20,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/Sirupsen/logrus"
 )
 
 var version = "dev"
@@ -28,7 +28,7 @@ var version = "dev"
 func main() {
        err := doMain()
        if err != nil {
-               log.Fatalf("%q", err)
+               logrus.Fatalf("%q", err)
        }
 }
 
@@ -40,6 +40,14 @@ var (
 )
 
 func doMain() error {
+       logger := logrus.StandardLogger()
+       if os.Getenv("DEBUG") != "" {
+               logger.SetLevel(logrus.DebugLevel)
+       }
+       logger.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       }
+
        flags := flag.NewFlagSet("crunch-dispatch-local", flag.ExitOnError)
 
        pollInterval := flags.Int(
@@ -66,18 +74,19 @@ func doMain() error {
                return nil
        }
 
-       log.Printf("crunch-dispatch-local %s started", version)
+       logger.Printf("crunch-dispatch-local %s started", version)
 
        runningCmds = make(map[string]*exec.Cmd)
 
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
-               log.Printf("Error making Arvados client: %v", err)
+               logger.Errorf("error making Arvados client: %v", err)
                return err
        }
        arv.Retries = 25
 
        dispatcher := dispatch.Dispatcher{
+               Logger:       logger,
                Arv:          arv,
                RunContainer: run,
                PollPeriod:   time.Duration(*pollInterval) * time.Second,
@@ -92,7 +101,7 @@ func doMain() error {
        c := make(chan os.Signal, 1)
        signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
        sig := <-c
-       log.Printf("Received %s, shutting down", sig)
+       logger.Printf("Received %s, shutting down", sig)
        signal.Stop(c)
 
        cancel()
@@ -138,7 +147,7 @@ func run(dispatcher *dispatch.Dispatcher,
                cmd.Stderr = os.Stderr
                cmd.Stdout = os.Stderr
 
-               log.Printf("Starting container %v", uuid)
+               dispatcher.Logger.Printf("starting container %v", uuid)
 
                // Add this crunch job to the list of runningCmds only if we
                // succeed in starting crunch-run.
@@ -146,7 +155,7 @@ func run(dispatcher *dispatch.Dispatcher,
                runningCmdsMutex.Lock()
                if err := startCmd(container, cmd); err != nil {
                        runningCmdsMutex.Unlock()
-                       log.Printf("Error starting %v for %v: %q", *crunchRunCommand, uuid, err)
+                       dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
                        dispatcher.UpdateState(uuid, dispatch.Cancelled)
                } else {
                        runningCmds[uuid] = cmd
@@ -157,9 +166,9 @@ func run(dispatcher *dispatch.Dispatcher,
 
                        go func() {
                                if _, err := cmd.Process.Wait(); err != nil {
-                                       log.Printf("Error while waiting for crunch job to finish for %v: %q", uuid, err)
+                                       dispatcher.Logger.Warnf("error while waiting for crunch job to finish for %v: %q", uuid, err)
                                }
-                               log.Printf("sending done")
+                               dispatcher.Logger.Debugf("sending done")
                                done <- struct{}{}
                        }()
 
@@ -171,14 +180,14 @@ func run(dispatcher *dispatch.Dispatcher,
                                case c := <-status:
                                        // Interrupt the child process if priority changes to 0
                                        if (c.State == dispatch.Locked || c.State == dispatch.Running) && c.Priority == 0 {
-                                               log.Printf("Sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
+                                               dispatcher.Logger.Printf("sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
                                                cmd.Process.Signal(os.Interrupt)
                                        }
                                }
                        }
                        close(done)
 
-                       log.Printf("Finished container run for %v", uuid)
+                       dispatcher.Logger.Printf("finished container run for %v", uuid)
 
                        // Remove the crunch job from runningCmds
                        runningCmdsMutex.Lock()
@@ -191,11 +200,11 @@ func run(dispatcher *dispatch.Dispatcher,
        // If the container is not finalized, then change it to "Cancelled".
        err := dispatcher.Arv.Get("containers", uuid, nil, &container)
        if err != nil {
-               log.Printf("Error getting final container state: %v", err)
+               dispatcher.Logger.Warnf("error getting final container state: %v", err)
        }
        if container.State == dispatch.Locked || container.State == dispatch.Running {
-               log.Printf("After %s process termination, container state for %v is %q.  Updating it to %q",
-                       *crunchRunCommand, container.State, uuid, dispatch.Cancelled)
+               dispatcher.Logger.Warnf("after %q process termination, container state for %v is %q; updating it to %q",
+                       *crunchRunCommand, uuid, container.State, dispatch.Cancelled)
                dispatcher.UpdateState(uuid, dispatch.Cancelled)
        }
 
@@ -203,5 +212,5 @@ func run(dispatcher *dispatch.Dispatcher,
        for range status {
        }
 
-       log.Printf("Finalized container %v", uuid)
+       dispatcher.Logger.Printf("finalized container %v", uuid)
 }
index 1a2787c25c625d3af04ab51655879ab13c9cbf82..6da17ea86cc06ccdbce21c37d2b703b1f937fded 100644 (file)
@@ -8,12 +8,11 @@ import (
        "bytes"
        "context"
        "io"
-       "log"
        "net/http"
        "net/http/httptest"
        "os"
        "os/exec"
-       "strings"
+       "regexp"
        "testing"
        "time"
 
@@ -21,6 +20,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/Sirupsen/logrus"
        . "gopkg.in/check.v1"
 )
 
@@ -41,6 +41,7 @@ func (s *TestSuite) SetUpSuite(c *C) {
        initialArgs = os.Args
        arvadostest.StartAPI()
        runningCmds = make(map[string]*exec.Cmd)
+       logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
 }
 
 func (s *TestSuite) TearDownSuite(c *C) {
@@ -110,7 +111,7 @@ func (s *MockArvadosServerSuite) Test_APIErrorGettingContainers(c *C) {
        apiStubResponses := make(map[string]arvadostest.StubResponse)
        apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
 
-       testWithServerStub(c, apiStubResponses, "echo", "Error getting list of containers")
+       testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
 }
 
 func (s *MockArvadosServerSuite) Test_APIErrorUpdatingContainerState(c *C) {
@@ -133,7 +134,7 @@ func (s *MockArvadosServerSuite) Test_ContainerStillInRunningAfterRun(c *C) {
                arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Running", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
 
        testWithServerStub(c, apiStubResponses, "echo",
-               `After echo process termination, container state for Running is "zzzzz-dz642-xxxxxxxxxxxxxx2".  Updating it to "Cancelled"`)
+               `after \\"echo\\" process termination, container state for zzzzz-dz642-xxxxxxxxxxxxxx2 is \\"Running\\"; updating it to \\"Cancelled\\"`)
 }
 
 func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
@@ -144,7 +145,7 @@ func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
        apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3/lock"] =
                arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Locked", "priority":1}`)}
 
-       testWithServerStub(c, apiStubResponses, "nosuchcommand", "Error starting nosuchcommand for zzzzz-dz642-xxxxxxxxxxxxxx3")
+       testWithServerStub(c, apiStubResponses, "nosuchcommand", `error starting \\"nosuchcommand\\" for zzzzz-dz642-xxxxxxxxxxxxxx3`)
 }
 
 func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
@@ -165,15 +166,15 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        }
 
        buf := bytes.NewBuffer(nil)
-       log.SetOutput(io.MultiWriter(buf, os.Stderr))
-       defer log.SetOutput(os.Stderr)
+       logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+       defer logrus.SetOutput(os.Stderr)
 
        *crunchRunCommand = crunchCmd
 
        ctx, cancel := context.WithCancel(context.Background())
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
-               PollPeriod: time.Duration(1) * time.Second,
+               PollPeriod: time.Second / 20,
                RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
                        run(d, c, s)
                        cancel()
@@ -186,8 +187,9 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
                return cmd.Start()
        }
 
+       re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
        go func() {
-               for i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {
+               for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
                        time.Sleep(100 * time.Millisecond)
                }
                cancel()
index d1f19dd7b5702e2431471bc7ce2164f553a8cb11..084700d39bfad76b109078f29e81ecf82c40c5be 100644 (file)
@@ -23,9 +23,15 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/config"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/Sirupsen/logrus"
        "github.com/coreos/go-systemd/daemon"
 )
 
+type logger interface {
+       dispatch.Logger
+       Fatalf(string, ...interface{})
+}
+
 const initialNiceValue int64 = 10000
 
 var (
@@ -35,6 +41,7 @@ var (
 
 type Dispatcher struct {
        *dispatch.Dispatcher
+       logger  logrus.FieldLogger
        cluster *arvados.Cluster
        sqCheck *SqueueChecker
        slurm   Slurm
@@ -57,13 +64,23 @@ type Dispatcher struct {
 
        // Minimum time between two attempts to run the same container
        MinRetryPeriod arvados.Duration
+
+       // Batch size for container queries
+       BatchSize int64
 }
 
 func main() {
-       disp := &Dispatcher{}
+       logger := logrus.StandardLogger()
+       if os.Getenv("DEBUG") != "" {
+               logger.SetLevel(logrus.DebugLevel)
+       }
+       logger.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       }
+       disp := &Dispatcher{logger: logger}
        err := disp.Run(os.Args[0], os.Args[1:])
        if err != nil {
-               log.Fatal(err)
+               logrus.Fatalf("%s", err)
        }
 }
 
@@ -101,7 +118,7 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
                return nil
        }
 
-       log.Printf("crunch-dispatch-slurm %s started", version)
+       disp.logger.Printf("crunch-dispatch-slurm %s started", version)
 
        err := disp.readConfig(*configPath)
        if err != nil {
@@ -129,7 +146,7 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
                os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
                os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
        } else {
-               log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+               disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
        }
 
        if *dumpConfig {
@@ -138,7 +155,7 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
 
        siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
        if os.IsNotExist(err) {
-               log.Printf("warning: no cluster config (%s), proceeding with no node types defined", err)
+               disp.logger.Warnf("no cluster config (%s), proceeding with no node types defined", err)
        } else if err != nil {
                return fmt.Errorf("error loading config: %s", err)
        } else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
@@ -150,20 +167,26 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
 
 // setup() initializes private fields after configure().
 func (disp *Dispatcher) setup() {
+       if disp.logger == nil {
+               disp.logger = logrus.StandardLogger()
+       }
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
-               log.Fatalf("Error making Arvados client: %v", err)
+               disp.logger.Fatalf("Error making Arvados client: %v", err)
        }
        arv.Retries = 25
 
-       disp.slurm = &slurmCLI{}
+       disp.slurm = NewSlurmCLI()
        disp.sqCheck = &SqueueChecker{
+               Logger:         disp.logger,
                Period:         time.Duration(disp.PollPeriod),
                PrioritySpread: disp.PrioritySpread,
                Slurm:          disp.slurm,
        }
        disp.Dispatcher = &dispatch.Dispatcher{
                Arv:            arv,
+               Logger:         disp.logger,
+               BatchSize:      disp.BatchSize,
                RunContainer:   disp.runContainer,
                PollPeriod:     time.Duration(disp.PollPeriod),
                MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
@@ -252,9 +275,6 @@ func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []s
        crArgs = append(crArgs, container.UUID)
        crScript := strings.NewReader(execScript(crArgs))
 
-       disp.sqCheck.L.Lock()
-       defer disp.sqCheck.L.Unlock()
-
        sbArgs, err := disp.sbatchArgs(container)
        if err != nil {
                return err
@@ -324,7 +344,7 @@ func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
                case <-ctx.Done():
                        // Disappeared from squeue
                        if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
-                               log.Printf("Error getting final container state for %s: %s", ctr.UUID, err)
+                               log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
                        }
                        switch ctr.State {
                        case dispatch.Running:
@@ -355,10 +375,7 @@ func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
        }
 }
 func (disp *Dispatcher) scancel(ctr arvados.Container) {
-       disp.sqCheck.L.Lock()
        err := disp.slurm.Cancel(ctr.UUID)
-       disp.sqCheck.L.Unlock()
-
        if err != nil {
                log.Printf("scancel: %s", err)
                time.Sleep(time.Second)
index 23a8a0ca01124df89575c5724a4b6cb6650527fb..b76ece314d47806afcfb328ba12970b9171b58d5 100644 (file)
@@ -11,7 +11,6 @@ import (
        "fmt"
        "io"
        "io/ioutil"
-       "log"
        "net/http"
        "net/http/httptest"
        "os"
@@ -25,6 +24,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/Sirupsen/logrus"
        . "gopkg.in/check.v1"
 )
 
@@ -55,11 +55,12 @@ func (s *IntegrationSuite) TearDownTest(c *C) {
 }
 
 type slurmFake struct {
-       didBatch   [][]string
-       didCancel  []string
-       didRelease []string
-       didRenice  [][]string
-       queue      string
+       didBatch      [][]string
+       didCancel     []string
+       didRelease    []string
+       didRenice     [][]string
+       queue         string
+       rejectNice10K bool
        // If non-nil, run this func during the 2nd+ call to Cancel()
        onCancel func()
        // Error returned by Batch()
@@ -82,6 +83,9 @@ func (sf *slurmFake) Release(name string) error {
 
 func (sf *slurmFake) Renice(name string, nice int64) error {
        sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
+       if sf.rejectNice10K && nice > 10000 {
+               return errors.New("scontrol: error: Invalid nice value, must be between -10000 and 10000")
+       }
        return nil
 }
 
@@ -112,7 +116,7 @@ func (s *IntegrationSuite) integrationTest(c *C,
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
        c.Check(err, IsNil)
-       c.Check(len(containers.Items), Equals, 1)
+       c.Assert(len(containers.Items), Equals, 1)
 
        s.disp.CrunchRunCommand = []string{"echo"}
 
@@ -134,7 +138,11 @@ func (s *IntegrationSuite) integrationTest(c *C,
        }
 
        s.disp.slurm = &s.slurm
-       s.disp.sqCheck = &SqueueChecker{Period: 500 * time.Millisecond, Slurm: s.disp.slurm}
+       s.disp.sqCheck = &SqueueChecker{
+               Logger: logrus.StandardLogger(),
+               Period: 500 * time.Millisecond,
+               Slurm:  s.disp.slurm,
+       }
 
        err = s.disp.Dispatcher.Run(ctx)
        <-doneRun
@@ -242,7 +250,7 @@ func (s *StubbedSuite) TestAPIErrorGettingContainers(c *C) {
        apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
        apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
 
-       s.testWithServerStub(c, apiStubResponses, "echo", "Error getting list of containers")
+       s.testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
 }
 
 func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
@@ -260,8 +268,8 @@ func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arva
        }
 
        buf := bytes.NewBuffer(nil)
-       log.SetOutput(io.MultiWriter(buf, os.Stderr))
-       defer log.SetOutput(os.Stderr)
+       logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+       defer logrus.SetOutput(os.Stderr)
 
        s.disp.CrunchRunCommand = []string{crunchCmd}
 
index 9e9f45270f82d3450d27e380fe63924177e5501d..782be7d8c4e7226ced947bcd26c983ecb6b31de3 100644 (file)
@@ -20,7 +20,15 @@ type Slurm interface {
        Renice(name string, nice int64) error
 }
 
-type slurmCLI struct{}
+type slurmCLI struct{
+       runSemaphore chan bool
+}
+
+func NewSlurmCLI() *slurmCLI {
+       return &slurmCLI{
+              runSemaphore: make(chan bool, 3),
+       }
+}
 
 func (scli *slurmCLI) Batch(script io.Reader, args []string) error {
        return scli.run(script, "sbatch", args)
@@ -64,6 +72,8 @@ func (scli *slurmCLI) Renice(name string, nice int64) error {
 }
 
 func (scli *slurmCLI) run(stdin io.Reader, prog string, args []string) error {
+       scli.runSemaphore <- true
+       defer func() { <-scli.runSemaphore }()
        cmd := exec.Command(prog, args...)
        cmd.Stdin = stdin
        out, err := cmd.CombinedOutput()
index 742943f197580e186e7fd1f7b8084a1357f3661d..5aee7e087b2658945b2eebe1f2f309d67c351d16 100644 (file)
@@ -7,30 +7,34 @@ package main
 import (
        "bytes"
        "fmt"
-       "log"
        "sort"
        "strings"
        "sync"
        "time"
 )
 
+const slurm15NiceLimit int64 = 10000
+
 type slurmJob struct {
        uuid         string
        wantPriority int64
        priority     int64 // current slurm priority (incorporates nice value)
        nice         int64 // current slurm nice value
+       hitNiceLimit bool
 }
 
 // Squeue implements asynchronous polling monitor of the SLURM queue using the
 // command 'squeue'.
 type SqueueChecker struct {
+       Logger         logger
        Period         time.Duration
        PrioritySpread int64
        Slurm          Slurm
        queue          map[string]*slurmJob
        startOnce      sync.Once
        done           chan struct{}
-       sync.Cond
+       lock           sync.RWMutex
+       notify         sync.Cond
 }
 
 // HasUUID checks if a given container UUID is in the slurm queue.
@@ -39,11 +43,11 @@ type SqueueChecker struct {
 func (sqc *SqueueChecker) HasUUID(uuid string) bool {
        sqc.startOnce.Do(sqc.start)
 
-       sqc.L.Lock()
-       defer sqc.L.Unlock()
+       sqc.lock.RLock()
+       defer sqc.lock.RUnlock()
 
        // block until next squeue broadcast signaling an update.
-       sqc.Wait()
+       sqc.notify.Wait()
        _, exists := sqc.queue[uuid]
        return exists
 }
@@ -52,26 +56,31 @@ func (sqc *SqueueChecker) HasUUID(uuid string) bool {
 // container.
 func (sqc *SqueueChecker) SetPriority(uuid string, want int64) {
        sqc.startOnce.Do(sqc.start)
-       sqc.L.Lock()
-       defer sqc.L.Unlock()
-       job, ok := sqc.queue[uuid]
-       if !ok {
+
+       sqc.lock.RLock()
+       job := sqc.queue[uuid]
+       if job == nil {
                // Wait in case the slurm job was just submitted and
                // will appear in the next squeue update.
-               sqc.Wait()
-               if job, ok = sqc.queue[uuid]; !ok {
-                       return
-               }
+               sqc.notify.Wait()
+               job = sqc.queue[uuid]
+       }
+       needUpdate := job != nil && job.wantPriority != want
+       sqc.lock.RUnlock()
+
+       if needUpdate {
+               sqc.lock.Lock()
+               job.wantPriority = want
+               sqc.lock.Unlock()
        }
-       job.wantPriority = want
 }
 
 // adjust slurm job nice values as needed to ensure slurm priority
 // order matches Arvados priority order.
 func (sqc *SqueueChecker) reniceAll() {
-       sqc.L.Lock()
-       defer sqc.L.Unlock()
-
+       // This is slow (it shells out to scontrol many times) and no
+       // other goroutines update sqc.queue or any of the job fields
+       // we use here, so we don't acquire a lock.
        jobs := make([]*slurmJob, 0, len(sqc.queue))
        for _, j := range sqc.queue {
                if j.wantPriority == 0 {
@@ -79,7 +88,7 @@ func (sqc *SqueueChecker) reniceAll() {
                        // (perhaps it's not an Arvados job)
                        continue
                }
-               if j.priority == 0 {
+               if j.priority <= 2*slurm15NiceLimit {
                        // SLURM <= 15.x implements "hold" by setting
                        // priority to 0. If we include held jobs
                        // here, we'll end up trying to push other
@@ -103,10 +112,18 @@ func (sqc *SqueueChecker) reniceAll() {
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
-               if renice[i] == job.nice {
+               niceNew := renice[i]
+               if job.hitNiceLimit && niceNew > slurm15NiceLimit {
+                       niceNew = slurm15NiceLimit
+               }
+               if niceNew == job.nice {
                        continue
                }
-               sqc.Slurm.Renice(job.uuid, renice[i])
+               err := sqc.Slurm.Renice(job.uuid, niceNew)
+               if err != nil && niceNew > slurm15NiceLimit && strings.Contains(err.Error(), "Invalid nice value") {
+                       sqc.Logger.Warnf("container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15", job.uuid, slurm15NiceLimit)
+                       job.hitNiceLimit = true
+               }
        }
 }
 
@@ -122,18 +139,11 @@ func (sqc *SqueueChecker) Stop() {
 // queued). If it succeeds, it updates sqc.queue and wakes up any
 // goroutines that are waiting in HasUUID() or All().
 func (sqc *SqueueChecker) check() {
-       // Mutex between squeue sync and running sbatch or scancel.  This
-       // establishes a sequence so that squeue doesn't run concurrently with
-       // sbatch or scancel; the next update of squeue will occur only after
-       // sbatch or scancel has completed.
-       sqc.L.Lock()
-       defer sqc.L.Unlock()
-
        cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
        stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
        cmd.Stdout, cmd.Stderr = stdout, stderr
        if err := cmd.Run(); err != nil {
-               log.Printf("Error running %q %q: %s %q", cmd.Path, cmd.Args, err, stderr.String())
+               sqc.Logger.Warnf("Error running %q %q: %s %q", cmd.Path, cmd.Args, err, stderr.String())
                return
        }
 
@@ -146,9 +156,13 @@ func (sqc *SqueueChecker) check() {
                var uuid, state, reason string
                var n, p int64
                if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
-                       log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
+                       sqc.Logger.Warnf("ignoring unparsed line in squeue output: %q", line)
                        continue
                }
+
+               // No other goroutines write to jobs' priority or nice
+               // fields, so we can read and write them without
+               // locks.
                replacing, ok := sqc.queue[uuid]
                if !ok {
                        replacing = &slurmJob{uuid: uuid}
@@ -157,14 +171,17 @@ func (sqc *SqueueChecker) check() {
                replacing.nice = n
                newq[uuid] = replacing
 
-               if state == "PENDING" && ((reason == "BadConstraints" && p == 0) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
+               if state == "PENDING" && ((reason == "BadConstraints" && p <= 2*slurm15NiceLimit) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
                        // When using SLURM 14.x or 15.x, our queued
                        // jobs land in this state when "scontrol
                        // reconfigure" invalidates their feature
                        // constraints by clearing all node features.
                        // They stay in this state even after the
                        // features reappear, until we run "scontrol
-                       // release {jobid}".
+                       // release {jobid}". Priority is usually 0 in
+                       // this state, but sometimes (due to a race
+                       // with nice adjustments?) it's a small
+                       // positive value.
                        //
                        // "scontrol release" is silent and successful
                        // regardless of whether the features have
@@ -175,20 +192,22 @@ func (sqc *SqueueChecker) check() {
                        // "launch failed requeued held" seems to be
                        // another manifestation of this problem,
                        // resolved the same way.
-                       log.Printf("releasing held job %q", uuid)
+                       sqc.Logger.Printf("releasing held job %q (priority=%d, state=%q, reason=%q)", uuid, p, state, reason)
                        sqc.Slurm.Release(uuid)
-               } else if p < 1<<20 && replacing.wantPriority > 0 {
-                       log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
+               } else if state != "RUNNING" && p <= 2*slurm15NiceLimit && replacing.wantPriority > 0 {
+                       sqc.Logger.Warnf("job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
                }
        }
+       sqc.lock.Lock()
        sqc.queue = newq
-       sqc.Broadcast()
+       sqc.lock.Unlock()
+       sqc.notify.Broadcast()
 }
 
 // Initialize, and start a goroutine to call check() once per
 // squeue.Period until terminated by calling Stop().
 func (sqc *SqueueChecker) start() {
-       sqc.L = &sync.Mutex{}
+       sqc.notify.L = sqc.lock.RLocker()
        sqc.done = make(chan struct{})
        go func() {
                ticker := time.NewTicker(sqc.Period)
@@ -200,6 +219,15 @@ func (sqc *SqueueChecker) start() {
                        case <-ticker.C:
                                sqc.check()
                                sqc.reniceAll()
+                               select {
+                               case <-ticker.C:
+                                       // If this iteration took
+                                       // longer than sqc.Period,
+                                       // consume the next tick and
+                                       // wait. Otherwise we would
+                                       // starve other goroutines.
+                               default:
+                               }
                        }
                }
        }()
@@ -209,9 +237,9 @@ func (sqc *SqueueChecker) start() {
 // names reported by squeue.
 func (sqc *SqueueChecker) All() []string {
        sqc.startOnce.Do(sqc.start)
-       sqc.L.Lock()
-       defer sqc.L.Unlock()
-       sqc.Wait()
+       sqc.lock.RLock()
+       defer sqc.lock.RUnlock()
+       sqc.notify.Wait()
        var uuids []string
        for u := range sqc.queue {
                uuids = append(uuids, u)
index c9329fdf95bf87028346fb727b8521dc8edfa1cd..de674a1397a0fcc24a64d2c417b262e18e9554ae 100644 (file)
@@ -7,6 +7,7 @@ package main
 import (
        "time"
 
+       "github.com/Sirupsen/logrus"
        . "gopkg.in/check.v1"
 )
 
@@ -24,6 +25,7 @@ func (s *SqueueSuite) TestReleasePending(c *C) {
                queue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING BadConstraints\n",
        }
        sqc := &SqueueChecker{
+               Logger: logrus.StandardLogger(),
                Slurm:  slurm,
                Period: time.Hour,
        }
@@ -88,6 +90,7 @@ func (s *SqueueSuite) TestReniceAll(c *C) {
                        queue: test.squeue,
                }
                sqc := &SqueueChecker{
+                       Logger:         logrus.StandardLogger(),
                        Slurm:          slurm,
                        PrioritySpread: test.spread,
                        Period:         time.Hour,
@@ -103,6 +106,51 @@ func (s *SqueueSuite) TestReniceAll(c *C) {
        }
 }
 
+// If a limited nice range prevents desired priority adjustments, give
+// up and clamp nice to 10K.
+func (s *SqueueSuite) TestReniceInvalidNiceValue(c *C) {
+       uuids := []string{"zzzzz-dz642-fake0fake0fake0", "zzzzz-dz642-fake1fake1fake1", "zzzzz-dz642-fake2fake2fake2"}
+       slurm := &slurmFake{
+               queue:         uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 0 4294555222 PENDING Resources\n",
+               rejectNice10K: true,
+       }
+       sqc := &SqueueChecker{
+               Logger:         logrus.StandardLogger(),
+               Slurm:          slurm,
+               PrioritySpread: 1,
+               Period:         time.Hour,
+       }
+       sqc.startOnce.Do(sqc.start)
+       sqc.check()
+       sqc.SetPriority(uuids[0], 2)
+       sqc.SetPriority(uuids[1], 1)
+
+       // First attempt should renice to 555001, which will fail
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}})
+
+       // Next attempt should renice to 10K, which will succeed
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}})
+       // ...so we'll change the squeue response to reflect the
+       // updated priority+nice, and make sure sqc sees that...
+       slurm.queue = uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 10000 4294545222 PENDING Resources\n"
+       sqc.check()
+
+       // Next attempt should leave nice alone because it's already
+       // at the 10K limit
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}})
+
+       // Back to normal if desired nice value falls below 10K
+       slurm.queue = uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n"
+       sqc.check()
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}, {uuids[1], "9890"}})
+
+       sqc.Stop()
+}
+
 // If the given UUID isn't in the slurm queue yet, SetPriority()
 // should wait for it to appear on the very next poll, then give up.
 func (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {
@@ -111,6 +159,7 @@ func (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {
 
        slurm := &slurmFake{}
        sqc := &SqueueChecker{
+               Logger: logrus.StandardLogger(),
                Slurm:  slurm,
                Period: time.Hour,
        }
index 032d86284d5e0a9fc8a3d712a0283597ec29d765..bcfa5b8a39ed7c8680a5ec1ffed6b583193f1caf 100644 (file)
@@ -22,6 +22,7 @@ var exampleConfigFile = []byte(`
        "PollPeriod": "10s",
        "SbatchArguments": ["--partition=foo", "--exclude=node13"],
        "ReserveExtraRAM": 268435456,
+       "BatchSize": 10000
     }`)
 
 func usage(fs *flag.FlagSet) {
index 098c53f8a6a587816703ad6997ceb51eec7f0232..0a980b9ce9359cde4529928e78eba06ac74644f3 100644 (file)
@@ -742,6 +742,7 @@ func (runner *ContainerRunner) startCrunchstat() error {
                CgroupParent: runner.expectCgroupParent,
                CgroupRoot:   runner.cgroupRoot,
                PollPeriod:   runner.statInterval,
+               TempDir:      runner.parentTemp,
        }
        runner.statReporter.Start()
        return nil
index ad433bb3b532fea36610e975a14b6ca750f5b353..7e2dc01271f0f08e09129772badc8402cc1b786e 100644 (file)
@@ -107,7 +107,7 @@ func runCommand(argv []string, logger *log.Logger) error {
        }
 
        // Funnel stderr through our channel
-       stderr_pipe, err := cmd.StderrPipe()
+       stderrPipe, err := cmd.StderrPipe()
        if err != nil {
                logger.Fatalln("error in StderrPipe:", err)
        }
@@ -121,7 +121,7 @@ func runCommand(argv []string, logger *log.Logger) error {
        os.Stdin.Close()
        os.Stdout.Close()
 
-       copyPipeToChildLog(stderr_pipe, log.New(os.Stderr, "", 0))
+       copyPipeToChildLog(stderrPipe, log.New(os.Stderr, "", 0))
 
        return cmd.Wait()
 }
index 769771e7beb52cafd5fa11033921973de501e54c..2d58012fa805b506215d3ce5f1ee4e6699efb3b1 100644 (file)
@@ -925,7 +925,7 @@ class ProjectDirectory(Directory):
         with llfuse.lock_released:
             if not self._current_user:
                 self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
-            return self._current_user["uuid"] in self.project_object["writable_by"]
+            return self._current_user["uuid"] in self.project_object.get("writable_by", [])
 
     def persisted(self):
         return True
@@ -1049,35 +1049,55 @@ class SharedDirectory(Directory):
                 if not self.stale():
                     return
 
-                all_projects = arvados.util.list_all(
-                    self.api.groups().list, self.num_retries,
-                    filters=[['group_class','=','project']],
-                    select=["uuid", "owner_uuid"])
-                objects = {}
-                for ob in all_projects:
-                    objects[ob['uuid']] = ob
-
+                contents = {}
                 roots = []
                 root_owners = set()
-                current_uuid = self.current_user['uuid']
-                for ob in all_projects:
-                    if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
-                        roots.append(ob['uuid'])
-                        root_owners.add(ob['owner_uuid'])
-
-                lusers = arvados.util.list_all(
-                    self.api.users().list, self.num_retries,
-                    filters=[['uuid','in', list(root_owners)]])
-                lgroups = arvados.util.list_all(
-                    self.api.groups().list, self.num_retries,
-                    filters=[['uuid','in', list(root_owners)+roots]])
-
-                for l in lusers:
-                    objects[l["uuid"]] = l
-                for l in lgroups:
-                    objects[l["uuid"]] = l
+                objects = {}
+
+                methods = self.api._rootDesc.get('resources')["groups"]['methods']
+                if 'httpMethod' in methods.get('shared', {}):
+                    page = []
+                    while True:
+                        resp = self.api.groups().shared(filters=[['group_class', '=', 'project']]+page,
+                                                        order="uuid",
+                                                        limit=10000,
+                                                        count="none",
+                                                        include="owner_uuid").execute()
+                        if not resp["items"]:
+                            break
+                        page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
+                        for r in resp["items"]:
+                            objects[r["uuid"]] = r
+                            roots.append(r["uuid"])
+                        for r in resp["included"]:
+                            objects[r["uuid"]] = r
+                            root_owners.add(r["uuid"])
+                else:
+                    all_projects = arvados.util.list_all(
+                        self.api.groups().list, self.num_retries,
+                        filters=[['group_class','=','project']],
+                        select=["uuid", "owner_uuid"])
+                    for ob in all_projects:
+                        objects[ob['uuid']] = ob
+
+                    current_uuid = self.current_user['uuid']
+                    for ob in all_projects:
+                        if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
+                            roots.append(ob['uuid'])
+                            root_owners.add(ob['owner_uuid'])
+
+                    lusers = arvados.util.list_all(
+                        self.api.users().list, self.num_retries,
+                        filters=[['uuid','in', list(root_owners)]])
+                    lgroups = arvados.util.list_all(
+                        self.api.groups().list, self.num_retries,
+                        filters=[['uuid','in', list(root_owners)+roots]])
+
+                    for l in lusers:
+                        objects[l["uuid"]] = l
+                    for l in lgroups:
+                        objects[l["uuid"]] = l
 
-                contents = {}
                 for r in root_owners:
                     if r in objects:
                         obr = objects[r]
index 9ee99903c8d1e537d487a67d1c77d848fc93c807..8336b78f9ea9614af2796211d9ed89d58da741e8 100644 (file)
@@ -6,14 +6,16 @@ package main
 
 import (
        "sync"
-       "sync/atomic"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "github.com/hashicorp/golang-lru"
+       "github.com/prometheus/client_golang/prometheus"
 )
 
+const metricsUpdateInterval = time.Second / 10
+
 type cache struct {
        TTL                  arvados.Duration
        UUIDTTL              arvados.Duration
@@ -22,21 +24,74 @@ type cache struct {
        MaxPermissionEntries int
        MaxUUIDEntries       int
 
-       stats       cacheStats
+       registry    *prometheus.Registry
+       metrics     cacheMetrics
        pdhs        *lru.TwoQueueCache
        collections *lru.TwoQueueCache
        permissions *lru.TwoQueueCache
        setupOnce   sync.Once
 }
 
-type cacheStats struct {
-       Requests          uint64 `json:"Cache.Requests"`
-       CollectionBytes   uint64 `json:"Cache.CollectionBytes"`
-       CollectionEntries int    `json:"Cache.CollectionEntries"`
-       CollectionHits    uint64 `json:"Cache.CollectionHits"`
-       PDHHits           uint64 `json:"Cache.UUIDHits"`
-       PermissionHits    uint64 `json:"Cache.PermissionHits"`
-       APICalls          uint64 `json:"Cache.APICalls"`
+type cacheMetrics struct {
+       requests          prometheus.Counter
+       collectionBytes   prometheus.Gauge
+       collectionEntries prometheus.Gauge
+       collectionHits    prometheus.Counter
+       pdhHits           prometheus.Counter
+       permissionHits    prometheus.Counter
+       apiCalls          prometheus.Counter
+}
+
+func (m *cacheMetrics) setup(reg *prometheus.Registry) {
+       m.requests = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "requests",
+               Help:      "Number of targetID-to-manifest lookups handled.",
+       })
+       reg.MustRegister(m.requests)
+       m.collectionHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "hits",
+               Help:      "Number of pdh-to-manifest cache hits.",
+       })
+       reg.MustRegister(m.collectionHits)
+       m.pdhHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "pdh_hits",
+               Help:      "Number of uuid-to-pdh cache hits.",
+       })
+       reg.MustRegister(m.pdhHits)
+       m.permissionHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "permission_hits",
+               Help:      "Number of targetID-to-permission cache hits.",
+       })
+       reg.MustRegister(m.permissionHits)
+       m.apiCalls = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "api_calls",
+               Help:      "Number of outgoing API calls made by cache.",
+       })
+       reg.MustRegister(m.apiCalls)
+       m.collectionBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "cached_manifest_bytes",
+               Help:      "Total size of all manifests in cache.",
+       })
+       reg.MustRegister(m.collectionBytes)
+       m.collectionEntries = prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "cached_manifests",
+               Help:      "Number of manifests in cache.",
+       })
+       reg.MustRegister(m.collectionEntries)
 }
 
 type cachedPDH struct {
@@ -67,23 +122,26 @@ func (c *cache) setup() {
        if err != nil {
                panic(err)
        }
+
+       reg := c.registry
+       if reg == nil {
+               reg = prometheus.NewRegistry()
+       }
+       c.metrics.setup(reg)
+       go func() {
+               for range time.Tick(metricsUpdateInterval) {
+                       c.updateGauges()
+               }
+       }()
 }
 
-var selectPDH = map[string]interface{}{
-       "select": []string{"portable_data_hash"},
+func (c *cache) updateGauges() {
+       c.metrics.collectionBytes.Set(float64(c.collectionBytes()))
+       c.metrics.collectionEntries.Set(float64(c.collections.Len()))
 }
 
-func (c *cache) Stats() cacheStats {
-       c.setupOnce.Do(c.setup)
-       return cacheStats{
-               Requests:          atomic.LoadUint64(&c.stats.Requests),
-               CollectionBytes:   c.collectionBytes(),
-               CollectionEntries: c.collections.Len(),
-               CollectionHits:    atomic.LoadUint64(&c.stats.CollectionHits),
-               PDHHits:           atomic.LoadUint64(&c.stats.PDHHits),
-               PermissionHits:    atomic.LoadUint64(&c.stats.PermissionHits),
-               APICalls:          atomic.LoadUint64(&c.stats.APICalls),
-       }
+var selectPDH = map[string]interface{}{
+       "select": []string{"portable_data_hash"},
 }
 
 // Update saves a modified version (fs) to an existing collection
@@ -99,7 +157,7 @@ func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvad
        }
        var updated arvados.Collection
        defer c.pdhs.Remove(coll.UUID)
-       err := client.RequestAndDecode(&updated, "PATCH", "/arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
+       err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
        if err == nil {
                c.collections.Add(client.AuthToken+"\000"+coll.PortableDataHash, &cachedCollection{
                        expire:     time.Now().Add(time.Duration(c.TTL)),
@@ -111,8 +169,7 @@ func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvad
 
 func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceReload bool) (*arvados.Collection, error) {
        c.setupOnce.Do(c.setup)
-
-       atomic.AddUint64(&c.stats.Requests, 1)
+       c.metrics.requests.Inc()
 
        permOK := false
        permKey := arv.ApiToken + "\000" + targetID
@@ -123,7 +180,7 @@ func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceRelo
                        c.permissions.Remove(permKey)
                } else {
                        permOK = true
-                       atomic.AddUint64(&c.stats.PermissionHits, 1)
+                       c.metrics.permissionHits.Inc()
                }
        }
 
@@ -136,7 +193,7 @@ func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceRelo
                        c.pdhs.Remove(targetID)
                } else {
                        pdh = ent.pdh
-                       atomic.AddUint64(&c.stats.PDHHits, 1)
+                       c.metrics.pdhHits.Inc()
                }
        }
 
@@ -152,7 +209,7 @@ func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceRelo
                // likely, the cached PDH is still correct; if so,
                // _and_ the current token has permission, we can
                // use our cached manifest.
-               atomic.AddUint64(&c.stats.APICalls, 1)
+               c.metrics.apiCalls.Inc()
                var current arvados.Collection
                err := arv.Get("collections", targetID, selectPDH, &current)
                if err != nil {
@@ -180,7 +237,7 @@ func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceRelo
        }
 
        // Collection manifest is not cached.
-       atomic.AddUint64(&c.stats.APICalls, 1)
+       c.metrics.apiCalls.Inc()
        err := arv.Get("collections", targetID, nil, &collection)
        if err != nil {
                return nil, err
@@ -261,16 +318,15 @@ func (c *cache) collectionBytes() uint64 {
 }
 
 func (c *cache) lookupCollection(key string) *arvados.Collection {
-       if ent, cached := c.collections.Get(key); !cached {
+       e, cached := c.collections.Get(key)
+       if !cached {
+               return nil
+       }
+       ent := e.(*cachedCollection)
+       if ent.expire.Before(time.Now()) {
+               c.collections.Remove(key)
                return nil
-       } else {
-               ent := ent.(*cachedCollection)
-               if ent.expire.Before(time.Now()) {
-                       c.collections.Remove(key)
-                       return nil
-               } else {
-                       atomic.AddUint64(&c.stats.CollectionHits, 1)
-                       return ent.collection
-               }
        }
+       c.metrics.collectionHits.Inc()
+       return ent.collection
 }
index cddeaf489763500b9e7230a75c2b19a4c25f40cf..d147573eec72d402faec43c21da86a010f13dc94 100644 (file)
@@ -5,17 +5,36 @@
 package main
 
 import (
+       "bytes"
+
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/common/expfmt"
        "gopkg.in/check.v1"
 )
 
+func (s *UnitSuite) checkCacheMetrics(c *check.C, reg *prometheus.Registry, regs ...string) {
+       mfs, err := reg.Gather()
+       c.Check(err, check.IsNil)
+       buf := &bytes.Buffer{}
+       enc := expfmt.NewEncoder(buf, expfmt.FmtText)
+       for _, mf := range mfs {
+               c.Check(enc.Encode(mf), check.IsNil)
+       }
+       mm := buf.String()
+       for _, reg := range regs {
+               c.Check(mm, check.Matches, `(?ms).*collectioncache_`+reg+`\n.*`)
+       }
+}
+
 func (s *UnitSuite) TestCache(c *check.C) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, check.Equals, nil)
 
        cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
 
        // Hit the same collection 5 times using the same token. Only
        // the first req should cause an API call; the next 4 should
@@ -29,11 +48,12 @@ func (s *UnitSuite) TestCache(c *check.C) {
                c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooPdh)
                c.Check(coll.ManifestText[:2], check.Equals, ". ")
        }
-       c.Check(cache.Stats().Requests, check.Equals, uint64(5))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(4))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(1))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 5",
+               "hits 4",
+               "permission_hits 4",
+               "pdh_hits 4",
+               "api_calls 1")
 
        // Hit the same collection 2 more times, this time requesting
        // it by PDH and using a different token. The first req should
@@ -49,11 +69,12 @@ func (s *UnitSuite) TestCache(c *check.C) {
        c.Check(coll2.ManifestText[:2], check.Equals, ". ")
        c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
 
-       c.Check(cache.Stats().Requests, check.Equals, uint64(5+1))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+0))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+0))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 6",
+               "hits 4",
+               "permission_hits 4",
+               "pdh_hits 4",
+               "api_calls 2")
 
        coll2, err = cache.Get(arv, arvadostest.FooPdh, false)
        c.Check(err, check.Equals, nil)
@@ -61,11 +82,12 @@ func (s *UnitSuite) TestCache(c *check.C) {
        c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
        c.Check(coll2.ManifestText[:2], check.Equals, ". ")
 
-       c.Check(cache.Stats().Requests, check.Equals, uint64(5+2))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+1))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+1))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 7",
+               "hits 5",
+               "permission_hits 5",
+               "pdh_hits 4",
+               "api_calls 2")
 
        // Alternating between two collections N times should produce
        // only 2 more API calls.
@@ -80,11 +102,12 @@ func (s *UnitSuite) TestCache(c *check.C) {
                _, err := cache.Get(arv, target, false)
                c.Check(err, check.Equals, nil)
        }
-       c.Check(cache.Stats().Requests, check.Equals, uint64(5+2+20))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+1+18))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+1+18))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0+18))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1+2))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 27",
+               "hits 23",
+               "permission_hits 23",
+               "pdh_hits 22",
+               "api_calls 4")
 }
 
 func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
@@ -92,17 +115,19 @@ func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
        c.Assert(err, check.Equals, nil)
 
        cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
 
        for _, forceReload := range []bool{false, true, false, true} {
                _, err := cache.Get(arv, arvadostest.FooPdh, forceReload)
                c.Check(err, check.Equals, nil)
        }
 
-       c.Check(cache.Stats().Requests, check.Equals, uint64(4))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(3))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(1))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(0))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(3))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 4",
+               "hits 3",
+               "permission_hits 1",
+               "pdh_hits 0",
+               "api_calls 3")
 }
 
 func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
@@ -110,15 +135,17 @@ func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
        c.Assert(err, check.Equals, nil)
 
        cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
 
        for _, forceReload := range []bool{false, true, false, true} {
                _, err := cache.Get(arv, arvadostest.FooCollection, forceReload)
                c.Check(err, check.Equals, nil)
        }
 
-       c.Check(cache.Stats().Requests, check.Equals, uint64(4))
-       c.Check(cache.Stats().CollectionHits, check.Equals, uint64(3))
-       c.Check(cache.Stats().PermissionHits, check.Equals, uint64(1))
-       c.Check(cache.Stats().PDHHits, check.Equals, uint64(3))
-       c.Check(cache.Stats().APICalls, check.Equals, uint64(3))
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 4",
+               "hits 3",
+               "permission_hits 1",
+               "pdh_hits 3",
+               "api_calls 3")
 }
index 3814a459d53c46c8b92d7dc40d8fd8cd13ee6ae4..0e2f17c35b85df02b98df4d3e29a974d18deb17d 100644 (file)
@@ -74,7 +74,7 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
        var newCollection arvados.Collection
        arv := arvados.NewClientFromEnv()
        arv.AuthToken = arvadostest.ActiveToken
-       err = arv.RequestAndDecode(&newCollection, "POST", "/arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
+       err = arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
        c.Assert(err, check.IsNil)
 
        readPath, writePath, pdhPath := pathFunc(newCollection)
index 89cd26ac49a8b76fcf0053633ca26917477c9478..d65156f98781f99cd3fbc4a20b2f0ba144ea8f97 100644 (file)
 // avoids redirecting requests to keep-web if they depend on
 // TrustAllContent being enabled.
 //
+// Metrics
+//
+// Keep-web exposes request metrics in Prometheus text-based format at
+// /metrics. The same information is also available as JSON at
+// /metrics.json.
+//
 package main
index 517ec1a2a26e96967ad50bec925a65b1f6149f6a..912398fa64db5d8b18605178f14a77884e234f1d 100644 (file)
@@ -31,6 +31,7 @@ import (
 
 type handler struct {
        Config        *Config
+       MetricsAPI    http.Handler
        clientPool    *arvadosclient.ClientPool
        setupOnce     sync.Once
        healthHandler http.Handler
@@ -90,14 +91,7 @@ func (h *handler) setup() {
 }
 
 func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
-       status := struct {
-               cacheStats
-               Version string
-       }{
-               cacheStats: h.Config.Cache.Stats(),
-               Version:    version,
-       }
-       json.NewEncoder(w).Encode(status)
+       json.NewEncoder(w).Encode(struct{ Version string }{version})
 }
 
 // updateOnSuccess wraps httpserver.ResponseWriter. If the handler
@@ -141,6 +135,11 @@ func (uos *updateOnSuccess) WriteHeader(code int) {
 }
 
 var (
+       corsAllowHeadersHeader = strings.Join([]string{
+               "Authorization", "Content-Type", "Range",
+               // WebDAV request headers:
+               "Depth", "Destination", "If", "Lock-Token", "Overwrite", "Timeout",
+       }, ", ")
        writeMethod = map[string]bool{
                "COPY":   true,
                "DELETE": true,
@@ -183,6 +182,9 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
                remoteAddr = xff + "," + remoteAddr
        }
+       if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
+               r.URL.Scheme = xfp
+       }
 
        w := httpserver.WrapResponseWriter(wOrig)
        defer func() {
@@ -209,7 +211,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                        statusCode = http.StatusMethodNotAllowed
                        return
                }
-               w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Range")
+               w.Header().Set("Access-Control-Allow-Headers", corsAllowHeadersHeader)
                w.Header().Set("Access-Control-Allow-Methods", "COPY, DELETE, GET, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PUT, RMCOL")
                w.Header().Set("Access-Control-Allow-Origin", "*")
                w.Header().Set("Access-Control-Max-Age", "86400")
@@ -256,6 +258,9 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        } else if r.URL.Path == "/status.json" {
                h.serveStatus(w, r)
                return
+       } else if strings.HasPrefix(r.URL.Path, "/metrics") {
+               h.MetricsAPI.ServeHTTP(w, r)
+               return
        } else if siteFSDir[pathParts[0]] {
                useSiteFS = true
        } else if len(pathParts) >= 1 && strings.HasPrefix(pathParts[0], "c=") {
@@ -773,6 +778,7 @@ func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, loc
                u = newu
        }
        redir := (&url.URL{
+               Scheme:   r.URL.Scheme,
                Host:     r.Host,
                Path:     u.Path,
                RawQuery: redirQuery.Encode(),
index f86f81bfa15e5a1c20fed2f68a796f029ae3a966..bced67ed208012dfafa69fdd7a3e6dd2395b641a 100644 (file)
@@ -29,7 +29,7 @@ type UnitSuite struct{}
 
 func (s *UnitSuite) TestCORSPreflight(c *check.C) {
        h := handler{Config: DefaultConfig()}
-       u, _ := url.Parse("http://keep-web.example/c=" + arvadostest.FooCollection + "/foo")
+       u := mustParseURL("http://keep-web.example/c=" + arvadostest.FooCollection + "/foo")
        req := &http.Request{
                Method:     "OPTIONS",
                Host:       u.Host,
@@ -48,7 +48,7 @@ func (s *UnitSuite) TestCORSPreflight(c *check.C) {
        c.Check(resp.Body.String(), check.Equals, "")
        c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
        c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "COPY, DELETE, GET, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PUT, RMCOL")
-       c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range")
+       c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout")
 
        // Check preflight for a disallowed request
        resp = httptest.NewRecorder()
@@ -70,8 +70,7 @@ func (s *UnitSuite) TestInvalidUUID(c *check.C) {
                "http://" + bogusID + ".keep-web/t=" + token + "/" + bogusID + "/foo",
        } {
                c.Log(trial)
-               u, err := url.Parse(trial)
-               c.Assert(err, check.IsNil)
+               u := mustParseURL(trial)
                req := &http.Request{
                        Method:     "GET",
                        Host:       u.Host,
@@ -513,7 +512,7 @@ func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, ho
        if resp.Code != http.StatusSeeOther {
                return resp
        }
-       c.Check(resp.Body.String(), check.Matches, `.*href="//`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
+       c.Check(resp.Body.String(), check.Matches, `.*href="http://`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
        cookies := (&http.Response{Header: resp.Header()}).Cookies()
 
        u, _ = u.Parse(resp.Header().Get("Location"))
index e51376c3bc35cc10a92bf5a6f7c646a18bea3476..68ff8a7b013c2d685299eae2dc7c7da1d84f5606 100644 (file)
@@ -5,7 +5,10 @@
 package main
 
 import (
+       "net/http"
+
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
 )
 
 type server struct {
@@ -14,7 +17,12 @@ type server struct {
 }
 
 func (srv *server) Start() error {
-       srv.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(nil, &handler{Config: srv.Config}))
+       h := &handler{Config: srv.Config}
+       reg := prometheus.NewRegistry()
+       h.Config.Cache.registry = reg
+       mh := httpserver.Instrument(reg, nil, httpserver.AddRequestIDs(httpserver.LogRequests(nil, h)))
+       h.MetricsAPI = mh.ServeAPI(http.NotFoundHandler())
+       srv.Handler = mh
        srv.Addr = srv.Config.Listen
        return srv.Server.Start()
 }
index ee585ad5b212af1f12f2bad3f162f8c1c11f3a2f..7e738cb9f3467a63c5da91cbac253429f0dc5cad 100644 (file)
@@ -6,10 +6,12 @@ package main
 
 import (
        "crypto/md5"
+       "encoding/json"
        "fmt"
        "io"
        "io/ioutil"
        "net"
+       "net/http"
        "os"
        "os/exec"
        "strings"
@@ -294,6 +296,101 @@ func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...
        return
 }
 
+func (s *IntegrationSuite) TestMetrics(c *check.C) {
+       origin := "http://" + s.testServer.Addr
+       req, _ := http.NewRequest("GET", origin+"/notfound", nil)
+       _, err := http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       req, _ = http.NewRequest("GET", origin+"/by_id/", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp, err := http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       for i := 0; i < 2; i++ {
+               req, _ = http.NewRequest("GET", origin+"/foo", nil)
+               req.Host = arvadostest.FooCollection + ".example.com"
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp, err = http.DefaultClient.Do(req)
+               c.Assert(err, check.IsNil)
+               c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+               buf, _ := ioutil.ReadAll(resp.Body)
+               c.Check(buf, check.DeepEquals, []byte("foo"))
+               resp.Body.Close()
+       }
+
+       s.testServer.Config.Cache.updateGauges()
+
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       type summary struct {
+               SampleCount string  `json:"sample_count"`
+               SampleSum   float64 `json:"sample_sum"`
+               Quantile    []struct {
+                       Quantile float64
+                       Value    float64
+               }
+       }
+       type counter struct {
+               Value int64
+       }
+       type gauge struct {
+               Value float64
+       }
+       var ents []struct {
+               Name   string
+               Help   string
+               Type   string
+               Metric []struct {
+                       Label []struct {
+                               Name  string
+                               Value string
+                       }
+                       Counter counter
+                       Gauge   gauge
+                       Summary summary
+               }
+       }
+       json.NewDecoder(resp.Body).Decode(&ents)
+       summaries := map[string]summary{}
+       gauges := map[string]gauge{}
+       counters := map[string]counter{}
+       for _, e := range ents {
+               for _, m := range e.Metric {
+                       labels := map[string]string{}
+                       for _, lbl := range m.Label {
+                               labels[lbl.Name] = lbl.Value
+                       }
+                       summaries[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Summary
+                       counters[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Counter
+                       gauges[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Gauge
+               }
+       }
+       c.Check(summaries["request_duration_seconds/get/200"].SampleSum, check.Not(check.Equals), 0)
+       c.Check(summaries["request_duration_seconds/get/200"].SampleCount, check.Equals, "3")
+       c.Check(summaries["request_duration_seconds/get/404"].SampleCount, check.Equals, "1")
+       c.Check(summaries["time_to_status_seconds/get/404"].SampleCount, check.Equals, "1")
+       c.Check(counters["arvados_keepweb_collectioncache_requests//"].Value, check.Equals, int64(2))
+       c.Check(counters["arvados_keepweb_collectioncache_api_calls//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_permission_hits//"].Value, check.Equals, int64(1))
+       c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
+       // FooCollection's cached manifest size is 45 ("1f4b0....+45") plus one 51-byte blob signature
+       c.Check(gauges["arvados_keepweb_collectioncache_cached_manifest_bytes//"].Value, check.Equals, float64(45+51))
+
+       // If the Host header indicates a collection, /metrics.json
+       // refers to a file in the collection -- the metrics handler
+       // must not intercept that route.
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       req.Host = strings.Replace(arvadostest.FooCollectionPDH, "+", "-", -1) + ".example.com"
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
 func (s *IntegrationSuite) SetUpSuite(c *check.C) {
        arvadostest.StartAPI()
        arvadostest.StartKeep(2, true)
index 0a2b9eb988dce96c4791f7cbbaf0c602d5b16980..62db198dd9b9ef27618b9bfd04262b32ac2736f0 100644 (file)
@@ -30,7 +30,6 @@ func (s *UnitSuite) TestStatus(c *check.C) {
        var status map[string]interface{}
        err := json.NewDecoder(resp.Body).Decode(&status)
        c.Check(err, check.IsNil)
-       c.Check(status["Cache.Requests"], check.Equals, float64(0))
        c.Check(status["Version"], check.Not(check.Equals), "")
 }
 
index e87fa4afd0db660c16af8a7ec78e68027620c531..dc70d968e2992a16581694ac70bbf42ba92f93ba 100644 (file)
@@ -617,7 +617,7 @@ func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
        kc := runProxy(c, nil, false)
        defer closeListener()
 
-       // Point keepproxy to a non-existant keepstore
+       // Point keepproxy at a non-existent keepstore
        locals := map[string]string{
                TestProxyUUID: "http://localhost:12345",
        }
index 3db20e29ce64eaca4d8c5ddd58389595d2f9ce16..1f8c7e31a2997ac2884ae2936ea174a0d859e017 100644 (file)
@@ -9,17 +9,11 @@ import (
        "encoding/json"
        "fmt"
        "io/ioutil"
-       "net/http"
-       "strconv"
        "strings"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
-       "git.curoverse.com/arvados.git/sdk/go/stats"
        "github.com/Sirupsen/logrus"
-       "github.com/golang/protobuf/jsonpb"
-       "github.com/prometheus/client_golang/prometheus"
-       "github.com/prometheus/client_golang/prometheus/promhttp"
 )
 
 type Config struct {
@@ -54,8 +48,6 @@ type Config struct {
 
        ManagementToken string `doc: The secret key that must be provided by monitoring services
 wishing to access the health check endpoint (/_health).`
-
-       metrics
 }
 
 var (
@@ -161,62 +153,6 @@ func (cfg *Config) Start() error {
        return nil
 }
 
-type metrics struct {
-       registry     *prometheus.Registry
-       reqDuration  *prometheus.SummaryVec
-       timeToStatus *prometheus.SummaryVec
-       exportProm   http.Handler
-}
-
-func (*metrics) Levels() []logrus.Level {
-       return logrus.AllLevels
-}
-
-func (m *metrics) Fire(ent *logrus.Entry) error {
-       if tts, ok := ent.Data["timeToStatus"].(stats.Duration); !ok {
-       } else if method, ok := ent.Data["reqMethod"].(string); !ok {
-       } else if code, ok := ent.Data["respStatusCode"].(int); !ok {
-       } else {
-               m.timeToStatus.WithLabelValues(strconv.Itoa(code), strings.ToLower(method)).Observe(time.Duration(tts).Seconds())
-       }
-       return nil
-}
-
-func (m *metrics) setup() {
-       m.registry = prometheus.NewRegistry()
-       m.timeToStatus = prometheus.NewSummaryVec(prometheus.SummaryOpts{
-               Name: "time_to_status_seconds",
-               Help: "Summary of request TTFB.",
-       }, []string{"code", "method"})
-       m.reqDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
-               Name: "request_duration_seconds",
-               Help: "Summary of request duration.",
-       }, []string{"code", "method"})
-       m.registry.MustRegister(m.timeToStatus)
-       m.registry.MustRegister(m.reqDuration)
-       m.exportProm = promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{
-               ErrorLog: log,
-       })
-       log.AddHook(m)
-}
-
-func (m *metrics) exportJSON(w http.ResponseWriter, req *http.Request) {
-       jm := jsonpb.Marshaler{Indent: "  "}
-       mfs, _ := m.registry.Gather()
-       w.Write([]byte{'['})
-       for i, mf := range mfs {
-               if i > 0 {
-                       w.Write([]byte{','})
-               }
-               jm.Marshal(w, mf)
-       }
-       w.Write([]byte{']'})
-}
-
-func (m *metrics) Instrument(next http.Handler) http.Handler {
-       return promhttp.InstrumentHandlerDuration(m.reqDuration, next)
-}
-
 // VolumeTypes is built up by init() funcs in the source files that
 // define the volume types.
 var VolumeTypes = []func() VolumeWithExamples{}
index fb327a386b0f33fdae30f1e0d3e4f880c8d0bfa1..c31ab9c2e38fde497f451e95c2a99437735e4455 100644 (file)
@@ -86,17 +86,11 @@ func MakeRESTRouter() http.Handler {
        // 400 Bad Request.
        rtr.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
 
-       theConfig.metrics.setup()
-
        rtr.limiter = httpserver.NewRequestLimiter(theConfig.MaxRequests, rtr)
 
-       mux := http.NewServeMux()
-       mux.Handle("/", theConfig.metrics.Instrument(
-               httpserver.AddRequestIDs(httpserver.LogRequests(nil, rtr.limiter))))
-       mux.HandleFunc("/metrics.json", theConfig.metrics.exportJSON)
-       mux.Handle("/metrics", theConfig.metrics.exportProm)
-
-       return mux
+       stack := httpserver.Instrument(nil, nil,
+               httpserver.AddRequestIDs(httpserver.LogRequests(nil, rtr.limiter)))
+       return stack.ServeAPI(stack)
 }
 
 // BadRequestHandler is a HandleFunc to address bad requests.
index bdab58927bdc243605b8cf1d7e95b34d2f610272..f78084dbcf0ce0c9ae5cf2c9f02724217f9ff2bb 100644 (file)
@@ -235,6 +235,10 @@ func (v *S3Volume) Start() error {
        }
 
        client := s3.New(auth, region)
+       if region.EC2Endpoint.Signer == aws.V4Signature {
+               // Currently affects only eu-central-1
+               client.Signature = aws.V4Signature
+       }
        client.ConnectTimeout = time.Duration(v.ConnectTimeout)
        client.ReadTimeout = time.Duration(v.ReadTimeout)
        v.bucket = &s3bucket{
index 4fb31a742e91ea76372f6fa8986748dde7414d21..f2c5735985a7131129c38469e2183ffb70ef10f6 100644 (file)
@@ -24,7 +24,7 @@ Gem::Specification.new do |s|
   s.files       = ["bin/arvados-login-sync", "agpl-3.0.txt"]
   s.executables << "arvados-login-sync"
   s.required_ruby_version = '>= 2.1.0'
-  s.add_runtime_dependency 'arvados', '~> 0.1', '>= 0.1.20150615153458'
+  s.add_runtime_dependency 'arvados', '~> 1.2.0', '>= 1.2.0'
   s.homepage    =
     'https://arvados.org'
 end
index b4fec5096d5a8e2767169fce3910f45136ddaee3..77c515d565e8113681c2dad610d103fae4156a15 100644 (file)
@@ -243,12 +243,15 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
         return super(ComputeNodeShutdownActor, self)._finished()
 
     def cancel_shutdown(self, reason, **kwargs):
+        if not self.cancellable:
+            return False
         if self.cancel_reason is not None:
             # already cancelled
-            return
+            return False
         self.cancel_reason = reason
         self._logger.info("Shutdown cancelled: %s.", reason)
         self._finished(success_flag=False)
+        return True
 
     def _cancel_on_exception(orig_func):
         @functools.wraps(orig_func)
@@ -282,6 +285,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
         self._logger.info("Starting shutdown")
         arv_node = self._arvados_node()
         if self._cloud.destroy_node(self.cloud_node):
+            self.cancellable = False
             self._logger.info("Shutdown success")
             if arv_node:
                 self._later.clean_arvados_node(arv_node)
@@ -335,7 +339,7 @@ class ComputeNodeMonitorActor(config.actor_class):
     def __init__(self, cloud_node, cloud_node_start_time, shutdown_timer,
                  timer_actor, update_actor, cloud_client,
                  arvados_node=None, poll_stale_after=600, node_stale_after=3600,
-                 boot_fail_after=1800
+                 boot_fail_after=1800, consecutive_idle_count=0
     ):
         super(ComputeNodeMonitorActor, self).__init__()
         self._later = self.actor_ref.tell_proxy()
@@ -350,6 +354,8 @@ class ComputeNodeMonitorActor(config.actor_class):
         self.boot_fail_after = boot_fail_after
         self.subscribers = set()
         self.arvados_node = None
+        self.consecutive_idle_count = consecutive_idle_count
+        self.consecutive_idle = 0
         self._later.update_arvados_node(arvados_node)
         self.last_shutdown_opening = None
         self._later.consider_shutdown()
@@ -456,8 +462,14 @@ class ComputeNodeMonitorActor(config.actor_class):
         else:
             boot_grace = "boot exceeded"
 
-        # API server side not implemented yet.
-        idle_grace = 'idle exceeded'
+        if crunch_worker_state == "idle":
+            # Must report as "idle" at least "consecutive_idle_count" times
+            if self.consecutive_idle < self.consecutive_idle_count:
+                idle_grace = 'idle wait'
+            else:
+                idle_grace = 'idle exceeded'
+        else:
+            idle_grace = 'not idle'
 
         node_state = (crunch_worker_state, window, boot_grace, idle_grace)
         t = transitions[node_state]
@@ -517,4 +529,8 @@ class ComputeNodeMonitorActor(config.actor_class):
         if arvados_node is not None:
             self.arvados_node = arvados_node
             self._update.sync_node(self.cloud_node, self.arvados_node)
+            if self.arvados_node['crunch_worker_state'] == "idle":
+                self.consecutive_idle += 1
+            else:
+                self.consecutive_idle = 0
             self._later.consider_shutdown()
index 9e38d13eb7f4788d8af485a7e5b4b6589c9f324c..48d19f592bbdb0b87d905bac377c849000b59ef1 100644 (file)
@@ -35,8 +35,10 @@ class BaseComputeNodeDriver(RetryMixin):
         return driver_class(**auth_kwargs)
 
     @RetryMixin._retry()
-    def _set_sizes(self):
-        self.sizes = {sz.id: sz for sz in self.real.list_sizes()}
+    def sizes(self):
+        if self._sizes is None:
+            self._sizes = {sz.id: sz for sz in self.real.list_sizes()}
+        return self._sizes
 
     def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
                  driver_class, retry_wait=1, max_retry_wait=180):
@@ -73,7 +75,7 @@ class BaseComputeNodeDriver(RetryMixin):
                 if new_pair is not None:
                     self.create_kwargs[new_pair[0]] = new_pair[1]
 
-        self._set_sizes()
+        self._sizes = None
 
     def _init_ping_host(self, ping_host):
         self.ping_host = ping_host
index ae554327ca20d929a92b595da54e32ba05e6485f..35c8b5a8c97db40b4d15a7ad20e20acaefdc605a 100644 (file)
@@ -89,8 +89,8 @@ echo %s > /var/tmp/arv-node-data/meta-data/instance-type
         for n in nodes:
             # Need to populate Node.size
             if not n.size:
-                n.size = self.sizes[n.extra["properties"]["hardwareProfile"]["vmSize"]]
-            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size')
+                n.size = self.sizes()[n.extra["properties"]["hardwareProfile"]["vmSize"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size') or n.size.id
         return nodes
 
     def broken(self, cloud_node):
index 2829b9c0b1bead892aa83d10cf01f6aaa4f7e9a3..14845ac12fe31414e84749190593556516b6b224 100644 (file)
@@ -41,7 +41,7 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         nodelist = super(ComputeNodeDriver, self).list_nodes()
         for node in nodelist:
             self._ensure_private_ip(node)
-            node.size = self.sizes["1"]
+            node.size = self.sizes()["1"]
         return nodelist
 
     def create_node(self, size, arvados_node):
index 2b1564279717d0d0159bd3c07b307b4b5675c98f..418a9f9d85499b64e592d614cfa793af70e694ca 100644 (file)
@@ -110,8 +110,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         nodes = super(ComputeNodeDriver, self).list_nodes()
         for n in nodes:
             if not n.size:
-                n.size = self.sizes[n.extra["instance_type"]]
-            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size')
+                n.size = self.sizes()[n.extra["instance_type"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size') or n.size.id
         return nodes
 
     @classmethod
index be39ecba6bf4b3cfb4ef6e0e5dd7c1168dc86ddd..23a1017316656cfe4323646ac9bba5e793f915cb 100644 (file)
@@ -38,7 +38,6 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         super(ComputeNodeDriver, self).__init__(
             auth_kwargs, list_kwargs, create_kwargs,
             driver_class)
-        self._sizes_by_id = {sz.id: sz for sz in self.sizes.itervalues()}
         self._disktype_links = {dt.name: self._object_link(dt)
                                 for dt in self.real.ex_list_disktypes()}
 
@@ -120,9 +119,9 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
             # It's supposed to be the actual size object.  Check that it's not,
             # and monkeypatch the results when that's the case.
             if not hasattr(node.size, 'id'):
-                node.size = self._sizes_by_id[node.size]
+                node.size = self.sizes()[node.size]
             # Get arvados-assigned cloud size id
-            node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size')
+            node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size') or node.size.id
         return nodelist
 
     @classmethod
index 8c6757e51c451c253e5f16f57570d85bb52f3d7a..4857e891a77b010987221142ce7bb12ebaa048e8 100644 (file)
@@ -51,13 +51,17 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
             'Daemon': {'min_nodes': '0',
                        'max_nodes': '1',
                        'poll_time': '60',
+                       'cloudlist_poll_time': '0',
+                       'nodelist_poll_time': '0',
+                       'wishlist_poll_time': '0',
                        'max_poll_time': '300',
                        'poll_stale_after': '600',
                        'max_total_price': '0',
                        'boot_fail_after': str(sys.maxint),
                        'node_stale_after': str(60 * 60 * 2),
                        'watchdog': '600',
-                       'node_mem_scaling': '0.95'},
+                       'node_mem_scaling': '0.95',
+                       'consecutive_idle_count': '2'},
             'Manage': {'address': '127.0.0.1',
                        'port': '-1',
                        'ManagementToken': ''},
index 911798e08f937ded2d10e30b8b8fe7d64edd8f6b..1edf4dc4792e5b7a9f638a17599c66c76410ab84 100644 (file)
@@ -112,7 +112,8 @@ class NodeManagerDaemonActor(actor_class):
                  node_setup_class=dispatch.ComputeNodeSetupActor,
                  node_shutdown_class=dispatch.ComputeNodeShutdownActor,
                  node_actor_class=dispatch.ComputeNodeMonitorActor,
-                 max_total_price=0):
+                 max_total_price=0,
+                 consecutive_idle_count=1):
         super(NodeManagerDaemonActor, self).__init__()
         self._node_setup = node_setup_class
         self._node_shutdown = node_shutdown_class
@@ -133,6 +134,7 @@ class NodeManagerDaemonActor(actor_class):
         self.poll_stale_after = poll_stale_after
         self.boot_fail_after = boot_fail_after
         self.node_stale_after = node_stale_after
+        self.consecutive_idle_count = consecutive_idle_count
         self.last_polls = {}
         for poll_name in ['server_wishlist', 'arvados_nodes', 'cloud_nodes']:
             poll_actor = locals()[poll_name + '_actor']
@@ -173,7 +175,8 @@ class NodeManagerDaemonActor(actor_class):
             poll_stale_after=self.poll_stale_after,
             node_stale_after=self.node_stale_after,
             cloud_client=self._cloud_driver,
-            boot_fail_after=self.boot_fail_after)
+            boot_fail_after=self.boot_fail_after,
+            consecutive_idle_count=self.consecutive_idle_count)
         actorTell = actor.tell_proxy()
         actorTell.subscribe(self._later.node_can_shutdown)
         self._cloud_nodes_actor.subscribe_to(cloud_node.id,
@@ -390,22 +393,25 @@ class NodeManagerDaemonActor(actor_class):
         nodes_wanted = self._nodes_wanted(cloud_size)
         if nodes_wanted < 1:
             return None
-        arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
-        self._logger.info("Want %i more %s nodes.  Booting a node.",
-                          nodes_wanted, cloud_size.id)
-        new_setup = self._node_setup.start(
-            timer_actor=self._timer,
-            arvados_client=self._new_arvados(),
-            arvados_node=arvados_node,
-            cloud_client=self._new_cloud(),
-            cloud_size=self.server_calculator.find_size(cloud_size.id)).proxy()
-        self.booting[new_setup.actor_ref.actor_urn] = new_setup
-        self.sizes_booting[new_setup.actor_ref.actor_urn] = cloud_size
-
-        if arvados_node is not None:
-            self.arvados_nodes[arvados_node['uuid']].assignment_time = (
-                time.time())
-        new_setup.subscribe(self._later.node_setup_finished)
+
+        if not self.cancel_node_shutdown(cloud_size):
+            arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
+            self._logger.info("Want %i more %s nodes.  Booting a node.",
+                              nodes_wanted, cloud_size.id)
+            new_setup = self._node_setup.start(
+                timer_actor=self._timer,
+                arvados_client=self._new_arvados(),
+                arvados_node=arvados_node,
+                cloud_client=self._new_cloud(),
+                cloud_size=self.server_calculator.find_size(cloud_size.id))
+            self.booting[new_setup.actor_urn] = new_setup.proxy()
+            self.sizes_booting[new_setup.actor_urn] = cloud_size
+
+            if arvados_node is not None:
+                self.arvados_nodes[arvados_node['uuid']].assignment_time = (
+                    time.time())
+            new_setup.tell_proxy().subscribe(self._later.node_setup_finished)
+
         if nodes_wanted > 1:
             self._later.start_node(cloud_size)
 
@@ -456,13 +462,28 @@ class NodeManagerDaemonActor(actor_class):
         if (nodes_excess < 1) or not self.booting:
             return None
         for key, node in self.booting.iteritems():
-            if node and node.cloud_size.get().id == size.id and node.stop_if_no_cloud_node().get():
-                del self.booting[key]
-                del self.sizes_booting[key]
+            try:
+                if node and node.cloud_size.get().id == size.id and node.stop_if_no_cloud_node().get(2):
+                    del self.booting[key]
+                    del self.sizes_booting[key]
+                    if nodes_excess > 1:
+                        self._later.stop_booting_node(size)
+                    return
+            except pykka.Timeout:
+                pass
 
-                if nodes_excess > 1:
-                    self._later.stop_booting_node(size)
-                break
+    @_check_poll_freshness
+    def cancel_node_shutdown(self, size):
+        # Go through shutdown actors and see if there are any of the appropriate size that can be cancelled
+        for record in self.cloud_nodes.nodes.itervalues():
+            try:
+                if (record.shutdown_actor is not None and
+                    record.cloud_node.size.id == size.id and
+                    record.shutdown_actor.cancel_shutdown("Node size is in wishlist").get(2)):
+                        return True
+            except (pykka.ActorDeadError, pykka.Timeout) as e:
+                pass
+        return False
 
     def _begin_node_shutdown(self, node_actor, cancellable):
         cloud_node_obj = node_actor.cloud_node.get()
index 1020b4a80ced597911b886c40789dea39f1d5598..7ca9c9553721f0fa1291273bfeff5f5f9f7d0e78 100644 (file)
@@ -37,7 +37,10 @@ class ServerCalculator(object):
             self.scratch = 0
             self.cores = 0
             self.bandwidth = 0
-            self.price = 9999999
+            # price is multiplied by 1000 to get the node weight
+            # the maximum node weight is                  4294967280
+            # so use invalid node weight 4294967 * 1000 = 4294967000
+            self.price = 4294967
             self.preemptible = False
             self.extra = {}
 
index f65e0806ec56df96f81c5bed87f657b15355fdec..34ea9adb3da4dd17c8aa48505dff72b12cb8b936 100644 (file)
@@ -83,16 +83,20 @@ def launch_pollers(config, server_calculator):
     poll_time = config.getfloat('Daemon', 'poll_time')
     max_poll_time = config.getint('Daemon', 'max_poll_time')
 
+    cloudlist_poll_time = config.getfloat('Daemon', 'cloudlist_poll_time') or poll_time
+    nodelist_poll_time = config.getfloat('Daemon', 'nodelist_poll_time') or poll_time
+    wishlist_poll_time = config.getfloat('Daemon', 'wishlist_poll_time') or poll_time
+
     timer = TimedCallBackActor.start(poll_time / 10.0).tell_proxy()
     cloud_node_poller = CloudNodeListMonitorActor.start(
-        config.new_cloud_client(), timer, server_calculator, poll_time, max_poll_time).tell_proxy()
+        config.new_cloud_client(), timer, server_calculator, cloudlist_poll_time, max_poll_time).tell_proxy()
     arvados_node_poller = ArvadosNodeListMonitorActor.start(
-        config.new_arvados_client(), timer, poll_time, max_poll_time).tell_proxy()
+        config.new_arvados_client(), timer, nodelist_poll_time, max_poll_time).tell_proxy()
     job_queue_poller = JobQueueMonitorActor.start(
         config.new_arvados_client(), timer, server_calculator,
         config.getboolean('Arvados', 'jobs_queue'),
         config.getboolean('Arvados', 'slurm_queue'),
-        poll_time, max_poll_time
+        wishlist_poll_time, max_poll_time
     ).tell_proxy()
     return timer, cloud_node_poller, arvados_node_poller, job_queue_poller
 
@@ -144,7 +148,8 @@ def main(args=None):
             config.getint('Daemon', 'boot_fail_after'),
             config.getint('Daemon', 'node_stale_after'),
             node_setup, node_shutdown, node_monitor,
-            max_total_price=config.getfloat('Daemon', 'max_total_price')).tell_proxy()
+            max_total_price=config.getfloat('Daemon', 'max_total_price'),
+            consecutive_idle_count=config.getint('Daemon', 'consecutive_idle_count'),).tell_proxy()
 
         watchdog = WatchdogActor.start(config.getint('Daemon', 'watchdog'),
                             cloud_node_poller.actor_ref,
index efd2445175589f761165aa7ff5746be7ab4b6f44..8ba68018d5840466698cf8a0cf19546887bf143b 100644 (file)
@@ -65,6 +65,15 @@ boot_fail_after = 1800
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
@@ -74,6 +83,7 @@ node_mem_scaling = 0.95
 # File path for Certificate Authorities
 certs_file = /etc/ssl/certs/ca-certificates.crt
 
+
 [Logging]
 # Log file path
 file = /var/log/arvados/node-manager.log
index 117f9b224bff2f4ca567b64e09de81b4ed34c692..f5329ebe16213ad1d7fa37aff09212efce299603 100644 (file)
@@ -65,6 +65,15 @@ boot_fail_after = 1800
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
index 8a244a444487052cd1543d9135448703741ca3e0..acd3fd1e3e6ab6a36720670f439cc2061f2c574f 100644 (file)
@@ -54,6 +54,15 @@ poll_stale_after = 600
 # an Arvados node that hasn't been updated for this long.
 node_stale_after = 14400
 
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
 # Scaling factor to be applied to nodes' available RAM size. Usually there's a
 # variable discrepancy between the advertised RAM value on cloud nodes and the
 # actual amount available.
index a11a6d807ef9348d9a17deac9e0c2092ed929f46..e5deac85d257057292466eadf6fae1e7c5edb8c3 100644 (file)
 # a snapshot of internal state.
 
 # Management server listening address (default 127.0.0.1)
-#address = 0.0.0.0
+address = 0.0.0.0
 
 # Management server port number (default -1, server is disabled)
-#port = 8989
+port = 8989
+
+MangementToken = xxx
 
 [Daemon]
 # The dispatcher can customize the start and stop procedure for
index 69a29019e78cb1ab7de2a8fdc41de85c8abc645a..1ba2957ee5544c9346bbb00bc3f0e2ad9e51276a 100755 (executable)
@@ -115,7 +115,10 @@ def node_shutdown(g):
     global compute_nodes
     if g.group(1) in compute_nodes:
         del compute_nodes[g.group(1)]
-    return 0
+        return 0
+    else:
+        return 1
+
 
 def jobs_req(g):
     global all_jobs
index 778c9aeaf5ffdbbcecaf90ac8072ace7210ce4a5..aee3cbdac8928cb8237357b9250d595bba349ba9 100644 (file)
@@ -424,7 +424,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.make_actor()
         self.shutdowns._set_state(True, 600)
         self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT),
-                          (False, "node state is ('unpaired', 'open', 'boot wait', 'idle exceeded')"))
+                          (False, "node state is ('unpaired', 'open', 'boot wait', 'not idle')"))
 
     def test_shutdown_when_invalid_cloud_node_size(self):
         self.make_mocks(1)
@@ -438,7 +438,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
     def test_shutdown_without_arvados_node(self):
         self.make_actor(start_time=0)
         self.shutdowns._set_state(True, 600)
-        self.assertEquals((True, "node state is ('down', 'open', 'boot exceeded', 'idle exceeded')"),
+        self.assertEquals((True, "node state is ('down', 'open', 'boot exceeded', 'not idle')"),
                           self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_shutdown_missing(self):
@@ -447,7 +447,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
                                               last_ping_at='1970-01-01T01:02:03.04050607Z')
         self.make_actor(10, arv_node)
         self.shutdowns._set_state(True, 600)
-        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'idle exceeded')"),
+        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'not idle')"),
                           self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_shutdown_running_broken(self):
@@ -456,7 +456,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.make_actor(12, arv_node)
         self.shutdowns._set_state(True, 600)
         self.cloud_client.broken.return_value = True
-        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'idle exceeded')"),
+        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'not idle')"),
                           self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_shutdown_missing_broken(self):
@@ -466,7 +466,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.make_actor(11, arv_node)
         self.shutdowns._set_state(True, 600)
         self.cloud_client.broken.return_value = True
-        self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT), (True, "node state is ('down', 'open', 'boot wait', 'idle exceeded')"))
+        self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT), (True, "node state is ('down', 'open', 'boot wait', 'not idle')"))
 
     def test_no_shutdown_when_window_closed(self):
         self.make_actor(3, testutil.arvados_node_mock(3, job_uuid=None))
@@ -476,7 +476,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
     def test_no_shutdown_when_node_running_job(self):
         self.make_actor(4, testutil.arvados_node_mock(4, job_uuid=True))
         self.shutdowns._set_state(True, 600)
-        self.assertEquals((False, "node state is ('busy', 'open', 'boot wait', 'idle exceeded')"),
+        self.assertEquals((False, "node state is ('busy', 'open', 'boot wait', 'not idle')"),
                           self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_shutdown_when_node_state_unknown(self):
@@ -490,7 +490,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.make_actor(5, testutil.arvados_node_mock(
             5, crunch_worker_state='fail'))
         self.shutdowns._set_state(True, 600)
-        self.assertEquals((True, "node state is ('fail', 'open', 'boot wait', 'idle exceeded')"),
+        self.assertEquals((True, "node state is ('fail', 'open', 'boot wait', 'not idle')"),
                           self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_no_shutdown_when_node_state_stale(self):
index d09cbf72359610ac08afa428e39f024d3086835c..1b6e4ca8da4aa24bfb45f8382e7b5d7700cd2bf2 100644 (file)
@@ -620,10 +620,26 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         monitor = self.monitor_list()[0].proxy()
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.assertTrue(self.node_shutdown.start.called)
+        getmock = mock.MagicMock()
+        getmock.get.return_value = False
+        self.last_shutdown.cancel_shutdown.return_value = getmock
         self.daemon.update_server_wishlist(
             [testutil.MockSize(6)]).get(self.TIMEOUT)
         self.busywait(lambda: self.node_setup.start.called)
 
+    def test_nodes_shutting_down_cancelled(self):
+        size = testutil.MockSize(6)
+        cloud_node = testutil.cloud_node_mock(6, size=size)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(6, crunch_worker_state='down')],
+                         avail_sizes=[(size, {"cores":1})])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(6)]).get(self.TIMEOUT)
+        self.busywait(lambda: self.last_shutdown.cancel_shutdown.called)
+
     def test_nodes_shutting_down_not_replaced_at_max_nodes(self):
         cloud_node = testutil.cloud_node_mock(7)
         self.make_daemon([cloud_node], [testutil.arvados_node_mock(7)],
index d4cf5114399576387eddd8f169005d5a142a8d0f..ead1ec20c6a1de471e82f2e82ad0c24b5e3a4b93 100644 (file)
@@ -12,7 +12,7 @@ import (
 
 type wsConfig struct {
        Client       arvados.Client
-       Postgres     pgConfig
+       Postgres     arvados.PostgreSQLConnection
        PostgresPool int
        Listen       string
        LogLevel     string
@@ -30,7 +30,7 @@ func defaultConfig() wsConfig {
                Client: arvados.Client{
                        APIHost: "localhost:443",
                },
-               Postgres: pgConfig{
+               Postgres: arvados.PostgreSQLConnection{
                        "dbname":                    "arvados_production",
                        "user":                      "arvados",
                        "password":                  "xyzzy",
index 9acfca50e4db639c04dda22a7040d2e91a1c1c4c..309dab7a403e54cc5cb24daaf312dc1b5baa72f2 100644 (file)
@@ -8,7 +8,6 @@ import (
        "context"
        "database/sql"
        "strconv"
-       "strings"
        "sync"
        "sync/atomic"
        "time"
@@ -17,21 +16,6 @@ import (
        "github.com/lib/pq"
 )
 
-type pgConfig map[string]string
-
-func (c pgConfig) ConnectionString() string {
-       s := ""
-       for k, v := range c {
-               s += k
-               s += "='"
-               s += strings.Replace(
-                       strings.Replace(v, `\`, `\\`, -1),
-                       `'`, `\'`, -1)
-               s += "' "
-       }
-       return s
-}
-
 type pgEventSource struct {
        DataSource   string
        MaxOpenConns int
index ea6063a0c3a718dde7baa52a7a9aa5504b0e5f16..ac5d130d61bdd85dfc568bf91c37b983994ae40c 100644 (file)
@@ -7,10 +7,12 @@ package main
 import (
        "database/sql"
        "fmt"
+       "os"
+       "path/filepath"
        "sync"
        "time"
 
-       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        check "gopkg.in/check.v1"
 )
 
@@ -18,30 +20,20 @@ var _ = check.Suite(&eventSourceSuite{})
 
 type eventSourceSuite struct{}
 
-func testDBConfig() pgConfig {
-       var railsDB struct {
-               Test struct {
-                       Database string
-                       Username string
-                       Password string
-                       Host     string
-               }
-       }
-       err := config.LoadFile(&railsDB, "../api/config/database.yml")
+func testDBConfig() arvados.PostgreSQLConnection {
+       cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
        if err != nil {
                panic(err)
        }
-       cfg := pgConfig{
-               "dbname":   railsDB.Test.Database,
-               "host":     railsDB.Test.Host,
-               "password": railsDB.Test.Password,
-               "user":     railsDB.Test.Username,
+       cc, err := cfg.GetCluster("zzzzz")
+       if err != nil {
+               panic(err)
        }
-       return cfg
+       return cc.PostgreSQL.Connection
 }
 
 func testDB() *sql.DB {
-       db, err := sql.Open("postgres", testDBConfig().ConnectionString())
+       db, err := sql.Open("postgres", testDBConfig().String())
        if err != nil {
                panic(err)
        }
@@ -52,7 +44,7 @@ func (*eventSourceSuite) TestEventSource(c *check.C) {
        cfg := testDBConfig()
        db := testDB()
        pges := &pgEventSource{
-               DataSource: cfg.ConnectionString(),
+               DataSource: cfg.String(),
                QueueSize:  4,
        }
        go pges.Run()
index 36ce7ae59f15cf9ec2a6fd1609aad1fdd2acd23d..eda7ff2a486a0f9ae59ddc12bf696e3e7a8059c5 100644 (file)
@@ -48,7 +48,7 @@ func (srv *server) setup() {
 
        srv.listener = ln
        srv.eventSource = &pgEventSource{
-               DataSource:   srv.wsConfig.Postgres.ConnectionString(),
+               DataSource:   srv.wsConfig.Postgres.String(),
                MaxOpenConns: srv.wsConfig.PostgresPool,
                QueueSize:    srv.wsConfig.ServerEventQueue,
        }
index 1ac0e76c373cd3240175a5c3c81c00aeb44b138e..374692689a7027544bd26e4233c4b65dd4e00189 100644 (file)
@@ -89,7 +89,7 @@ ADD fuse.conf /etc/
 ADD crunch-setup.sh gitolite.rc \
     keep-setup.sh common.sh createusers.sh \
     logger runsu.sh waitforpostgres.sh \
-    application_yml_override.py api-setup.sh \
+    yml_override.py api-setup.sh \
     go-setup.sh \
     /usr/local/lib/arvbox/
 
index 1618c11e42d4e2378cc7ca076f10b2ba67a9b44f..6dd6a65695559a1e0024a0d2af4693632bf6da2e 100755 (executable)
@@ -33,6 +33,11 @@ if ! test -s /var/lib/arvados/blob_signing_key ; then
 fi
 blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
 
+if ! test -s /var/lib/arvados/management_token ; then
+    ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/management_token
+fi
+management_token=$(cat /var/lib/arvados/management_token)
+
 # self signed key will be created by SSO server script.
 test -s /var/lib/arvados/self-signed.key
 
@@ -66,9 +71,10 @@ $RAILS_ENV:
   default_collection_replication: 1
   docker_image_formats: ["v2"]
   keep_web_service_url: http://$localip:${services[keep-web]}/
+  ManagementToken: $management_token
 EOF
 
-(cd config && /usr/local/lib/arvbox/application_yml_override.py)
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
 
 if ! test -f /var/lib/arvados/api_database_pw ; then
     ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
index 319889baef28152a1a933bdc25cab7aa85491e1a..a82a964ea9c2f7cec5f16fd474664e89acc2a45c 100644 (file)
@@ -20,7 +20,9 @@ fi
 declare -A services
 services=(
   [workbench]=80
-  [api]=8000
+  [api]=8004
+  [controller]=8003
+  [controller-ssl]=8000
   [sso]=8900
   [composer]=4200
   [arv-git-httpd]=9001
index b3ec5cd10441f695522c50500a2e64fd3f6d8f5d..a36e5891bcef2c717ff4a0e1a2b51b3036428ed9 100755 (executable)
@@ -19,7 +19,7 @@ else
   RAILS_ENV=development
 fi
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /usr/src/arvados/services/api/superuser_token)
 export CRUNCH_JOB_BIN=/usr/src/arvados/sdk/cli/bin/crunch-job
index 8ef66a60687ce817e46308311dbcd4d80c6691ad..f16cb44b7f56de46ab0e4be35ade64a4f3693ff1 100755 (executable)
@@ -19,7 +19,7 @@ fi
 
 mkdir -p /var/lib/arvados/$1
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 
@@ -42,13 +42,20 @@ else
     echo $UUID > /var/lib/arvados/$1-uuid
 fi
 
+management_token=$(cat /var/lib/arvados/management_token)
+
 set +e
 killall -HUP keepproxy
 
-exec /usr/local/bin/keepstore \
-     -listen=:$2 \
-     -enforce-permissions=true \
-     -blob-signing-key-file=/var/lib/arvados/blob_signing_key \
-     -data-manager-token-file=/var/lib/arvados/superuser_token \
-     -max-buffers=20 \
-     -volume=/var/lib/arvados/$1
+cat >/var/lib/arvados/$1.yml <<EOF
+Listen: ":$2"
+BlobSigningKeyFile: /var/lib/arvados/blob_signing_key
+SystemAuthTokenFile: /var/lib/arvados/superuser_token
+ManagementToken: $management_token
+MaxBuffers: 20
+Volumes:
+  - Type: Directory
+    Root: /var/lib/arvados/$1
+EOF
+
+exec /usr/local/bin/keepstore -config=/var/lib/arvados/$1.yml
index f7ab6be6a03549fb84ead7628ebd98648f4e5750..f052b5d636cf6095ce12b004d40ec87d4fd2812c 100755 (executable)
@@ -31,6 +31,4 @@ if test "$1" = "--only-setup" ; then
     exit
 fi
 
-exec bundle exec passenger start --port=${services[api]} \
-                  --ssl --ssl-certificate=/var/lib/arvados/self-signed.pem \
-                  --ssl-certificate-key=/var/lib/arvados/self-signed.key
+exec bundle exec passenger start --port=${services[api]}
index 1383f7140f4ed961637d8c8ef160bfb3b575d317..9339f2328c6a9ee8a5e3058e537cb212ddbd0c00 100755 (executable)
@@ -16,7 +16,7 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export PATH="$PATH:/var/lib/arvados/git/bin"
 cd ~git
index abd350f073c0f449b37b25362185b9b24a963136..f00b7f776ae5748a944cf44006b29e25a023ceda 100755 (executable)
@@ -18,5 +18,5 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-echo "apiEndPoint: https://${localip}:${services[api]}" > /usr/src/composer/src/composer.yml
+echo "apiEndPoint: https://${localip}:${services[controller-ssl]}" > /usr/src/composer/src/composer.yml
 exec node_modules/.bin/ng serve --host 0.0.0.0 --port 4200 --env=webdev
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/log/run b/tools/arvbox/lib/arvbox/docker/service/controller/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/run b/tools/arvbox/lib/arvbox/docker/service/controller/run
new file mode 100755 (executable)
index 0000000..c2afc17
--- /dev/null
@@ -0,0 +1,50 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/cmd/arvados-server"
+install $GOPATH/bin/arvados-server /usr/local/bin
+(cd /usr/local/bin && ln -sf arvados-server arvados-controller)
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+mkdir -p /etc/arvados
+
+cat >/var/lib/arvados/cluster_config.yml <<EOF
+Clusters:
+  ${uuid_prefix}:
+    NodeProfiles:
+      "*":
+        arvados-controller:
+          Listen: ":${services[controller]}" # choose a port
+        arvados-api-server:
+          Listen: ":${services[api]}" # must match Rails server port in your Nginx config
+    PostgreSQL:
+      ConnectionPool: 32 # max concurrent connections per arvados server daemon
+      Connection:
+        # All parameters here are passed to the PG client library in a connection string;
+        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+        Host: localhost
+        User: arvados
+        Password: ${database_pw}
+        DBName: arvados_development
+        client_encoding: utf8
+EOF
+
+/usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
+
+cp /var/lib/arvados/cluster_config.yml /etc/arvados/config.yml
+
+exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-controller
index decbccddeeecce662a0e353da0dd01c26ce91021..87c427cd29ae0140b34d086f788a2df6e7aa4a48 100755 (executable)
@@ -23,7 +23,7 @@ exec /usr/local/bin/crunch-run -container-enable-networking=always -container-ne
 EOF
 chmod +x /usr/local/bin/crunch-run.sh
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 
deleted file mode 120000 (symlink)
index a388c8b67bf16bbb16601007540e58f1372ebc85..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
new file mode 100755 (executable)
index 0000000000000000000000000000000000000000..e83db3f169c6379b4778aff6c2dd9b3a160ca913
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+sv stop doc
index 183ff2abfd5e4e162c5b0102c298991adeb33cdf..ea66cfd7a2155d3cb7db85e064e54d915bf7eb84 100755 (executable)
@@ -18,34 +18,5 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-set -u
-
-cat <<EOF >/var/lib/arvados/doc-nginx.conf
-worker_processes auto;
-pid /var/lib/arvados/doc-nginx.pid;
-
-error_log stderr;
-daemon off;
-
-events {
-       worker_connections 64;
-}
-
-http {
-     access_log off;
-     include /etc/nginx/mime.types;
-     default_type application/octet-stream;
-     server {
-            listen ${services[doc]} default_server;
-            listen [::]:${services[doc]} default_server;
-            root /usr/src/arvados/doc/.site;
-            index index.html;
-            server_name _;
-     }
-}
-EOF
-
 cd /usr/src/arvados/doc
-bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[api]} arvados_workbench_host=http://$localip
-
-exec nginx -c /var/lib/arvados/doc-nginx.conf
+bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
index a38e49a0deadb9b36e771e69f6ca98f6e9b87ff4..eea0e120b29917d31f25016da47e94394804a8c5 100755 (executable)
@@ -10,7 +10,7 @@ set -eux -o pipefail
 
 mkdir -p /var/lib/arvados/git
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 
@@ -112,7 +112,7 @@ cat > config/arvados-clients.yml <<EOF
 $RAILS_ENV:
   gitolite_url: /var/lib/arvados/git/repositories/gitolite-admin.git
   gitolite_tmp: /var/lib/arvados/git
-  arvados_api_host: $localip:${services[api]}
+  arvados_api_host: $localip:${services[controller-ssl]}
   arvados_api_token: "$ARVADOS_API_TOKEN"
   arvados_api_host_insecure: true
   gitolite_arvados_git_user_key: "$git_user_key"
index 70f2470b9fe7decd8a03efdfb09d5da8ab52f372..b539b6ae1eb5405d88e6e65044a73a34c548b721 100755 (executable)
@@ -16,7 +16,7 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 
index 199247b7a0e2bfc6dcabdd929dc5177275f730bc..bf802d45f3d8bdb9f13868bb39f66136ab34f42c 100755 (executable)
@@ -17,7 +17,7 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/log/run b/tools/arvbox/lib/arvbox/docker/service/nginx/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/run b/tools/arvbox/lib/arvbox/docker/service/nginx/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/run-service b/tools/arvbox/lib/arvbox/docker/service/nginx/run-service
new file mode 100755 (executable)
index 0000000..a55660e
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cat <<EOF >/var/lib/arvados/nginx.conf
+worker_processes auto;
+pid /var/lib/arvados/nginx.pid;
+
+error_log stderr;
+daemon off;
+
+events {
+       worker_connections 64;
+}
+
+http {
+     access_log off;
+     include /etc/nginx/mime.types;
+     default_type application/octet-stream;
+     server {
+            listen ${services[doc]} default_server;
+            listen [::]:${services[doc]} default_server;
+            root /usr/src/arvados/doc/.site;
+            index index.html;
+            server_name _;
+     }
+
+  upstream controller {
+    server localhost:${services[controller]};
+  }
+  server {
+    listen *:${services[controller-ssl]} ssl default_server;
+    server_name controller;
+    ssl_certificate "/var/lib/arvados/self-signed.pem";
+    ssl_certificate_key "/var/lib/arvados/self-signed.key";
+    location  / {
+      proxy_pass http://controller;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+}
+
+EOF
+
+exec nginx -c /var/lib/arvados/nginx.conf
deleted file mode 120000 (symlink)
index a388c8b67bf16bbb16601007540e58f1372ebc85..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
new file mode 100755 (executable)
index 0000000000000000000000000000000000000000..904476af742fdaa43b3a6266be590c79c8bc53de
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+sv stop ready
index 6d5fe243e09d6ec639f166ba618001e94bc7f4d8..7766fb7ec77b687c7339bfe04ca9d15677ac089a 100755 (executable)
@@ -45,7 +45,7 @@ if ! (ps x | grep -v grep | grep "crunch-dispatch") > /dev/null ; then
     waiting="$waiting crunch-dispatch"
 fi
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 
 vm_ok=0
@@ -93,5 +93,3 @@ echo "Your Arvados-in-a-box is ready!"
 echo "Workbench is running at http://$localip"
 
 rm -r /tmp/arvbox-ready
-
-sv stop ready >/dev/null
index ab20d5758c96a5f298e3ce25e5248611a3446e21..28140594926be5381737bd85adef390d5fb6f209 100755 (executable)
@@ -47,7 +47,7 @@ $RAILS_ENV:
   allow_account_registration: true
 EOF
 
-(cd config && /usr/local/lib/arvbox/application_yml_override.py)
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
 
 if ! test -f /var/lib/arvados/sso_database_pw ; then
     ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/sso_database_pw
index 2b571a820abe2bcb572977dd9ab62e62f413b5b2..863de73410236941e98e9b6a4f8fd747c84e8ae0 100755 (executable)
@@ -14,7 +14,7 @@ git config --system "credential.http://$localip:${services[arv-git-httpd]}/.help
 
 cd /usr/src/arvados/services/login-sync
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 export ARVADOS_VIRTUAL_MACHINE_UUID=$(cat /var/lib/arvados/vm-uuid)
index 134f767dc03da690542f7ff4e7204b65f6db6e11..065c557011c482c2c646b864d774dfccc6ad72b0 100755 (executable)
@@ -18,7 +18,7 @@ fi
 
 set -u
 
-export ARVADOS_API_HOST=$localip:${services[api]}
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
 export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
 export ARVADOS_VIRTUAL_MACHINE_UUID=$(cat /var/lib/arvados/vm-uuid)
index 2d01d907985c0c9ca6e0cf1e39969e1b4ce2d7fd..ebdf266c6b0a981710fa598f87968a2022047149 100755 (executable)
@@ -27,7 +27,7 @@ database_pw=$(cat /var/lib/arvados/api_database_pw)
 
 cat >/var/lib/arvados/arvados-ws.yml <<EOF
 Client:
-  APIHost: $localip:${services[api]}
+  APIHost: $localip:${services[controller-ssl]}
   Insecure: true
 Postgres:
   dbname: arvados_$RAILS_ENV
index 09d77e01d0f6a28548b32e44787a38a5b8a610ad..366096ace7a24b28f7286f24d13d941bde368846 100755 (executable)
@@ -40,8 +40,8 @@ fi
 cat >config/application.yml <<EOF
 $RAILS_ENV:
   secret_token: $secret_token
-  arvados_login_base: https://$localip:${services[api]}/login
-  arvados_v1_base: https://$localip:${services[api]}/arvados/v1
+  arvados_login_base: https://$localip:${services[controller-ssl]}/login
+  arvados_v1_base: https://$localip:${services[controller-ssl]}/arvados/v1
   arvados_insecure_https: true
   keep_web_download_url: http://$localip:${services[keep-web]}/c=%{uuid_or_pdh}
   keep_web_url: http://$localip:${services[keep-web]}/c=%{uuid_or_pdh}
@@ -52,4 +52,4 @@ EOF
 
 bundle exec rake assets:precompile
 
-(cd config && /usr/local/lib/arvbox/application_yml_override.py)
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
similarity index 79%
rename from tools/arvbox/lib/arvbox/docker/application_yml_override.py
rename to tools/arvbox/lib/arvbox/docker/yml_override.py
index bec067a8843fa6225239ae11d74d9d6a09a54b2f..b44acf4c3ab1fd9a3b4da433c936c6c079cebf6b 100755 (executable)
@@ -4,14 +4,17 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 import yaml
+import sys
+
+fn = sys.argv[1]
 
 try:
-    with open("application.yml.override") as f:
+    with open(fn+".override") as f:
         b = yaml.load(f)
 except IOError:
     exit()
 
-with open("application.yml") as f:
+with open(fn) as f:
     a = yaml.load(f)
 
 def recursiveMerge(a, b):
@@ -23,5 +26,5 @@ def recursiveMerge(a, b):
     else:
         return b
 
-with open("application.yml", "w") as f:
+with open(fn, "w") as f:
     yaml.dump(recursiveMerge(a, b), f)
index f18d4e464cdf34ed86c0be1a4631aadc598179df..aa6b2d773dfb1e47969794dd816a81b179539163 100644 (file)
                        "revision": "7a0fa49edf48165190530c675167e2f319a05268",
                        "revisionTime": "2018-06-25T08:58:08Z"
                },
+               {
+                       "checksumSHA1": "8UEp6v0Dczw/SlasE0DivB0mAHA=",
+                       "path": "github.com/gogo/protobuf/jsonpb",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
+               },
                {
                        "checksumSHA1": "wn2shNJMwRZpvuvkf1s7h0wvqHI=",
                        "path": "github.com/gogo/protobuf/proto",
                        "revisionTime": "2018-01-04T10:21:28Z"
                },
                {
-                       "checksumSHA1": "iVfdaLxIDjfk2KLP8dCMIbsxZZM=",
-                       "path": "github.com/golang/protobuf/jsonpb",
-                       "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
-                       "revisionTime": "2017-11-13T18:07:20Z"
+                       "checksumSHA1": "HPVQZu059/Rfw2bAWM538bVTcUc=",
+                       "path": "github.com/gogo/protobuf/sortkeys",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
                },
                {
-                       "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=",
-                       "path": "github.com/golang/protobuf/proto",
-                       "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
-                       "revisionTime": "2017-11-13T18:07:20Z"
+                       "checksumSHA1": "SkxU1+wPGUJyLyQENrZtr2/OUBs=",
+                       "path": "github.com/gogo/protobuf/types",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
                },
                {
-                       "checksumSHA1": "Ylq6kq3KWBy6mu68oyEwenhNMdg=",
-                       "path": "github.com/golang/protobuf/ptypes/struct",
+                       "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=",
+                       "path": "github.com/golang/protobuf/proto",
                        "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
                        "revisionTime": "2017-11-13T18:07:20Z"
                },