Merge branch 'master' of git.curoverse.com:arvados into 13076-r-autogen-api
authorFuad Muhic <fmuhic@capeannenterprises.com>
Mon, 26 Mar 2018 15:07:07 +0000 (17:07 +0200)
committerFuad Muhic <fmuhic@capeannenterprises.com>
Mon, 26 Mar 2018 15:22:16 +0000 (17:22 +0200)
Arvados-DCO-1.1-Signed-off-by: Fuad Muhic <fmuhic@capeannenterprises.com>

92 files changed:
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/models/container_work_unit.rb
apps/workbench/app/views/pipeline_instances/_running_component.html.erb
apps/workbench/app/views/work_units/_component_detail.html.erb
apps/workbench/config/application.default.yml
apps/workbench/config/initializers/mime_types.rb
apps/workbench/test/controllers/disabled_api_test.rb
apps/workbench/test/controllers/healthcheck_controller_test.rb
apps/workbench/test/integration/anonymous_access_test.rb
apps/workbench/test/test_helper.rb
build/build.list
build/package-testing/test-package-python-cwltest.sh [new symlink]
build/package-testing/test-package-python27-python-cwltest.sh [new file with mode: 0755]
build/package-testing/test-packages-debian9.sh [changed from file to symlink]
build/run-build-packages-one-target.sh
build/run-build-packages.sh
build/run-tests.sh
doc/_config.yml
doc/_includes/_tutorial_expectations.liquid
doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
doc/install/migrate-docker19.html.textile.liquid
doc/sdk/python/arvados-fuse.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/cookbook.html.textile.liquid
doc/user/cwl/cwl-extensions.html.textile.liquid
doc/user/tutorials/tutorial-keep-mount.html.textile.liquid
lib/dispatchcloud/node_size.go
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arv-cwl-schema.yml
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/crunch_script.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/secret_test_job.yml [new file with mode: 0644]
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/wf/secret_job.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/secret_wf.cwl [new file with mode: 0644]
sdk/go/arvados/keep_service.go
sdk/python/arvados/__init__.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/migrate19.py
sdk/python/arvados/commands/put.py
sdk/python/tests/test_arv_put.py
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/container_requests_controller.rb
services/api/app/controllers/arvados/v1/containers_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/models/arvados_model.rb
services/api/app/models/blob.rb
services/api/app/models/commit.rb
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/app/models/job.rb
services/api/config/initializers/lograge.rb
services/api/config/routes.rb
services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180313180114_change_container_priority_bigint.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/request_error.rb [new file with mode: 0644]
services/api/test/fixtures/containers.yml
services/api/test/functional/arvados/v1/container_requests_controller_test.rb
services/api/test/functional/arvados/v1/containers_controller_test.rb
services/api/test/helpers/container_test_helper.rb [new file with mode: 0644]
services/api/test/unit/container_request_test.rb
services/api/test/unit/container_test.rb
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-dispatch-slurm/slurm.go
services/crunch-dispatch-slurm/squeue.go
services/crunch-dispatch-slurm/squeue_test.go
services/crunch-run/crunchrun.go
services/crunch-run/crunchrun_test.go
services/crunch-run/logging_test.go
services/crunch-run/upload.go
services/crunch-run/upload_test.go
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keep-balance/block_state.go
services/keep-balance/keep_service.go
services/keepstore/azure_blob_volume.go
services/keepstore/handlers.go
services/keepstore/mounts_test.go
services/keepstore/pull_worker_test.go
services/keepstore/volume.go
services/nodemanager/arvnodeman/jobqueue.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/service/workbench/run
tools/arvbox/lib/arvbox/docker/service/workbench/run-service

index 788dcf65764b75d345e4be5a7a408259307edd19..1e9a5cc70d8ece81b21d6c15233499dd31a35002 100644 (file)
@@ -4,13 +4,16 @@
 
 source 'https://rubygems.org'
 
-gem 'rails', '~> 4.1'
+# Having a dependency '~> 4.1' make rails 4.2.10 to be installed when it's
+# supposed to avoid that.
+gem 'rails', '< 4.2'
 gem 'arvados', '>= 0.1.20150511150219'
 
 gem 'activerecord-nulldb-adapter'
 gem 'multi_json'
 gem 'oj'
 gem 'sass'
+gem 'mime-types'
 
 # Note: keeping this out of the "group :assets" section "may" allow us
 # to use Coffescript for UJS responses. It also prevents a
@@ -37,11 +40,13 @@ group :development do
 end
 
 group :test, :diagnostics, :performance do
-  gem 'minitest', '~> 5.0'
-  gem 'selenium-webdriver'
-  gem 'capybara'
-  gem 'poltergeist'
-  gem 'headless'
+  gem 'minitest', '~> 5.10.3'
+  # Selenium-webdriver 3.x is producing problems like the one described here:
+  # https://stackoverflow.com/questions/41310586/ruby-selenium-webdriver-unable-to-find-mozilla-geckodriver
+  gem 'selenium-webdriver', '~> 2.53.1'
+  gem 'capybara', '~> 2.5.0'
+  gem 'poltergeist', '~> 1.5.1'
+  gem 'headless', '~> 1.0.2'
 end
 
 group :test, :performance do
@@ -61,7 +66,7 @@ gem 'bootstrap-sass', '~> 3.1.0'
 gem 'bootstrap-x-editable-rails'
 gem 'bootstrap-tab-history-rails'
 
-gem 'angularjs-rails'
+gem 'angularjs-rails', '~> 1.3.8'
 
 gem 'less'
 gem 'less-rails'
index 8ffc1d33b50c8175fe3804f281390dc09f67d2f8..93a92ce11cb85f57d43237c49d4fec7f07f9842f 100644 (file)
@@ -8,45 +8,46 @@ GIT
 GEM
   remote: https://rubygems.org/
   specs:
-    RedCloth (4.2.9)
-    actionmailer (4.1.12)
-      actionpack (= 4.1.12)
-      actionview (= 4.1.12)
+    RedCloth (4.3.2)
+    actionmailer (4.1.16)
+      actionpack (= 4.1.16)
+      actionview (= 4.1.16)
       mail (~> 2.5, >= 2.5.4)
-    actionpack (4.1.12)
-      actionview (= 4.1.12)
-      activesupport (= 4.1.12)
+    actionpack (4.1.16)
+      actionview (= 4.1.16)
+      activesupport (= 4.1.16)
       rack (~> 1.5.2)
       rack-test (~> 0.6.2)
-    actionview (4.1.12)
-      activesupport (= 4.1.12)
+    actionview (4.1.16)
+      activesupport (= 4.1.16)
       builder (~> 3.1)
       erubis (~> 2.7.0)
-    activemodel (4.1.12)
-      activesupport (= 4.1.12)
+    activemodel (4.1.16)
+      activesupport (= 4.1.16)
       builder (~> 3.1)
-    activerecord (4.1.12)
-      activemodel (= 4.1.12)
-      activesupport (= 4.1.12)
+    activerecord (4.1.16)
+      activemodel (= 4.1.16)
+      activesupport (= 4.1.16)
       arel (~> 5.0.0)
-    activerecord-nulldb-adapter (0.3.1)
+    activerecord-nulldb-adapter (0.3.8)
       activerecord (>= 2.0.0)
-    activesupport (4.1.12)
+    activesupport (4.1.16)
       i18n (~> 0.6, >= 0.6.9)
       json (~> 1.7, >= 1.7.7)
       minitest (~> 5.1)
       thread_safe (~> 0.1)
       tzinfo (~> 1.1)
-    addressable (2.4.0)
+    addressable (2.5.2)
+      public_suffix (>= 2.0.2, < 4.0)
     andand (1.3.3)
-    angularjs-rails (1.3.8)
+    angularjs-rails (1.3.15)
     arel (5.0.1.20140414130214)
-    arvados (0.1.20160420143004)
-      activesupport (>= 3, < 4.2.6)
+    arvados (0.1.20180302192246)
+      activesupport (>= 3)
       andand (~> 1.3, >= 1.3.3)
-      google-api-client (>= 0.7, < 0.9)
+      google-api-client (>= 0.7, < 0.8.9)
       i18n (~> 0)
-      json (~> 1.7, >= 1.7.7)
+      json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
     autoparse (0.3.3)
       addressable (>= 2.3.1)
@@ -59,11 +60,8 @@ GEM
     bootstrap-x-editable-rails (1.5.1.1)
       railties (>= 3.0)
     builder (3.2.3)
-    byebug (3.5.1)
-      columnize (~> 0.8)
-      debugger-linecache (~> 1.2)
-      slop (~> 3.6)
-    capistrano (2.15.5)
+    byebug (10.0.0)
+    capistrano (2.15.9)
       highline
       net-scp (>= 1.0.0)
       net-sftp (>= 2.0.0)
@@ -75,36 +73,29 @@ GEM
       rack (>= 1.0.0)
       rack-test (>= 0.5.4)
       xpath (~> 2.0)
-    childprocess (0.5.6)
+    childprocess (0.8.0)
       ffi (~> 1.0, >= 1.0.11)
     cliver (0.3.2)
-    coffee-rails (4.1.0)
+    coffee-rails (4.2.2)
       coffee-script (>= 2.2.0)
-      railties (>= 4.0.0, < 5.0)
-    coffee-script (2.3.0)
+      railties (>= 4.0.0)
+    coffee-script (2.4.1)
       coffee-script-source
       execjs
-    coffee-script-source (1.8.0)
-    columnize (0.9.0)
+    coffee-script-source (1.12.2)
     commonjs (0.2.7)
     concurrent-ruby (1.0.5)
-    daemon_controller (1.2.0)
-    debugger-linecache (1.2.0)
-    deep_merge (1.0.1)
+    deep_merge (1.2.1)
     docile (1.1.5)
     erubis (2.7.0)
     execjs (2.7.0)
     extlib (0.9.16)
-    faraday (0.9.2)
+    faraday (0.14.0)
       multipart-post (>= 1.2, < 3)
-    fast_stack (0.1.0)
-      rake
-      rake-compiler
-    ffi (1.9.10)
-    flamegraph (0.1.0)
-      fast_stack
-    google-api-client (0.8.6)
-      activesupport (>= 3.2)
+    ffi (1.9.23)
+    flamegraph (0.9.5)
+    google-api-client (0.8.7)
+      activesupport (>= 3.2, < 5.0)
       addressable (~> 2.3)
       autoparse (~> 0.3)
       extlib (~> 0.9)
@@ -114,70 +105,76 @@ GEM
       multi_json (~> 1.10)
       retriable (~> 1.4)
       signet (~> 0.6)
-    googleauth (0.5.1)
-      faraday (~> 0.9)
-      jwt (~> 1.4)
+    googleauth (0.6.2)
+      faraday (~> 0.12)
+      jwt (>= 1.4, < 3.0)
       logging (~> 2.0)
       memoist (~> 0.12)
       multi_json (~> 1.11)
       os (~> 0.9)
       signet (~> 0.7)
+    grease (0.3.1)
     headless (1.0.2)
-    highline (1.6.21)
-    httpclient (2.8.2.4)
-    i18n (0.9.0)
+    highline (1.7.10)
+    httpclient (2.8.3)
+    i18n (0.9.5)
       concurrent-ruby (~> 1.0)
-    jquery-rails (3.1.2)
+    jquery-rails (3.1.4)
       railties (>= 3.0, < 5.0)
       thor (>= 0.14, < 2.0)
     json (1.8.6)
-    jwt (1.5.4)
+    jwt (1.5.6)
     launchy (2.4.3)
       addressable (~> 2.3)
     less (2.6.0)
       commonjs (~> 0.2.7)
-    less-rails (2.6.0)
-      actionpack (>= 3.1)
+    less-rails (3.0.0)
+      actionpack (>= 4.0)
+      grease
       less (~> 2.6.0)
-    libv8 (3.16.14.7)
+      sprockets (> 2, < 4)
+      tilt
+    libv8 (3.16.14.19)
     little-plugger (1.1.4)
-    logging (2.1.0)
+    logging (2.2.2)
       little-plugger (~> 1.1)
       multi_json (~> 1.10)
-    lograge (0.7.1)
-      actionpack (>= 4, < 5.2)
-      activesupport (>= 4, < 5.2)
-      railties (>= 4, < 5.2)
+    lograge (0.9.0)
+      actionpack (>= 4)
+      activesupport (>= 4)
+      railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    mail (2.6.3)
-      mime-types (>= 1.16, < 3)
-    memoist (0.14.0)
+    mail (2.7.0)
+      mini_mime (>= 0.1.1)
+    memoist (0.16.0)
     metaclass (0.0.4)
-    mime-types (2.99)
-    mini_portile (0.6.2)
+    mime-types (3.1)
+      mime-types-data (~> 3.2015)
+    mime-types-data (3.2016.0521)
+    mini_mime (1.0.0)
+    mini_portile2 (2.3.0)
     minitest (5.10.3)
-    mocha (1.1.0)
+    mocha (1.3.0)
       metaclass (~> 0.0.1)
-    morrisjs-rails (0.5.1)
-      railties (> 3.1, < 5)
-    multi_json (1.12.1)
+    morrisjs-rails (0.5.1.2)
+      railties (> 3.1, < 6)
+    multi_json (1.13.1)
     multipart-post (2.0.0)
     net-scp (1.2.1)
       net-ssh (>= 2.6.5)
     net-sftp (2.1.2)
       net-ssh (>= 2.6.5)
-    net-ssh (2.9.2)
-    net-ssh-gateway (1.2.0)
-      net-ssh (>= 2.6.5)
-    nokogiri (1.6.6.4)
-      mini_portile (~> 0.6.0)
+    net-ssh (4.2.0)
+    net-ssh-gateway (2.0.0)
+      net-ssh (>= 4.0.0)
+    nokogiri (1.8.2)
+      mini_portile2 (~> 2.3.0)
     npm-rails (0.2.1)
       rails (>= 3.2)
-    oj (2.11.2)
+    oj (3.5.0)
     os (0.9.6)
-    passenger (4.0.57)
-      daemon_controller (>= 1.2.0)
+    passenger (5.2.1)
       rack
       rake (>= 0.8.1)
     piwik_analytics (1.0.2)
@@ -189,89 +186,95 @@ GEM
       cliver (~> 0.3.1)
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
+    public_suffix (3.0.2)
     rack (1.5.5)
-    rack-mini-profiler (0.9.2)
-      rack (>= 1.1.3)
+    rack-mini-profiler (0.10.7)
+      rack (>= 1.2.0)
     rack-test (0.6.3)
       rack (>= 1.0)
-    rails (4.1.12)
-      actionmailer (= 4.1.12)
-      actionpack (= 4.1.12)
-      actionview (= 4.1.12)
-      activemodel (= 4.1.12)
-      activerecord (= 4.1.12)
-      activesupport (= 4.1.12)
+    rails (4.1.16)
+      actionmailer (= 4.1.16)
+      actionpack (= 4.1.16)
+      actionview (= 4.1.16)
+      activemodel (= 4.1.16)
+      activerecord (= 4.1.16)
+      activesupport (= 4.1.16)
       bundler (>= 1.3.0, < 2.0)
-      railties (= 4.1.12)
+      railties (= 4.1.16)
       sprockets-rails (~> 2.0)
-    rails-perftest (0.0.5)
-    railties (4.1.12)
-      actionpack (= 4.1.12)
-      activesupport (= 4.1.12)
+    rails-perftest (0.0.7)
+    railties (4.1.16)
+      actionpack (= 4.1.16)
+      activesupport (= 4.1.16)
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.2.1)
-    rake-compiler (0.9.5)
-      rake
+    rake (12.3.0)
     raphael-rails (2.1.2)
-    ref (1.0.5)
-    request_store (1.3.2)
+    rb-fsevent (0.10.3)
+    rb-inotify (0.9.10)
+      ffi (>= 0.5.0, < 2)
+    ref (2.0.0)
+    request_store (1.4.0)
+      rack (>= 1.4)
     retriable (1.4.1)
     ruby-debug-passenger (0.2.0)
-    ruby-prof (0.15.2)
-    rubyzip (1.1.7)
-    rvm-capistrano (1.5.5)
+    ruby-prof (0.17.0)
+    rubyzip (1.2.1)
+    rvm-capistrano (1.5.6)
       capistrano (~> 2.15.4)
     safe_yaml (1.0.4)
-    sass (3.4.9)
-    sass-rails (5.0.1)
-      railties (>= 4.0.0, < 5.0)
+    sass (3.5.5)
+      sass-listen (~> 4.0.0)
+    sass-listen (4.0.0)
+      rb-fsevent (~> 0.9, >= 0.9.4)
+      rb-inotify (~> 0.9, >= 0.9.7)
+    sass-rails (5.0.7)
+      railties (>= 4.0.0, < 6)
       sass (~> 3.1)
       sprockets (>= 2.8, < 4.0)
       sprockets-rails (>= 2.0, < 4.0)
-      tilt (~> 1.1)
-    selenium-webdriver (2.53.1)
+      tilt (>= 1.1, < 3)
+    selenium-webdriver (2.53.4)
       childprocess (~> 0.5)
-      multi_json (~> 1.0)
       rubyzip (~> 1.0)
       websocket (~> 1.0)
-    signet (0.7.2)
+    signet (0.8.1)
       addressable (~> 2.3)
       faraday (~> 0.9)
-      jwt (~> 1.5)
+      jwt (>= 1.5, < 3.0)
       multi_json (~> 1.10)
-    simplecov (0.9.1)
+    simplecov (0.15.1)
       docile (~> 1.1.0)
-      multi_json (~> 1.0)
-      simplecov-html (~> 0.8.0)
-    simplecov-html (0.8.0)
+      json (>= 1.8, < 3)
+      simplecov-html (~> 0.10.0)
+    simplecov-html (0.10.2)
     simplecov-rcov (0.2.3)
       simplecov (>= 0.4.1)
-    slop (3.6.0)
-    sprockets (3.2.0)
-      rack (~> 1.0)
-    sprockets-rails (2.3.2)
+    sprockets (3.7.1)
+      concurrent-ruby (~> 1.0)
+      rack (> 1, < 3)
+    sprockets-rails (2.3.3)
       actionpack (>= 3.0)
       activesupport (>= 3.0)
       sprockets (>= 2.8, < 4.0)
-    sshkey (1.6.1)
-    therubyracer (0.12.1)
-      libv8 (~> 3.16.14.0)
+    sshkey (1.9.0)
+    therubyracer (0.12.3)
+      libv8 (~> 3.16.14.15)
       ref
     thor (0.20.0)
     thread_safe (0.3.6)
-    tilt (1.4.1)
-    tzinfo (1.2.4)
+    tilt (2.0.8)
+    tzinfo (1.2.5)
       thread_safe (~> 0.1)
     uglifier (2.7.2)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
-    websocket (1.2.2)
-    websocket-driver (0.5.1)
+    websocket (1.2.5)
+    websocket-driver (0.7.0)
       websocket-extensions (>= 0.1.0)
-    websocket-extensions (0.1.1)
+    websocket-extensions (0.1.3)
     wiselinks (1.2.1)
-    xpath (2.0.0)
+    xpath (2.1.0)
       nokogiri (~> 1.3)
 
 PLATFORMS
@@ -281,24 +284,25 @@ DEPENDENCIES
   RedCloth
   activerecord-nulldb-adapter
   andand
-  angularjs-rails
+  angularjs-rails (~> 1.3.8)
   arvados (>= 0.1.20150511150219)
   bootstrap-sass (~> 3.1.0)
   bootstrap-tab-history-rails
   bootstrap-x-editable-rails
   byebug
-  capybara
+  capybara (~> 2.5.0)
   coffee-rails
   deep_merge
   flamegraph
-  headless
+  headless (~> 1.0.2)
   httpclient (~> 2.5)
   jquery-rails
   less
   less-rails
   lograge
   logstash-event
-  minitest (~> 5.0)
+  mime-types
+  minitest (~> 5.10.3)
   mocha
   morrisjs-rails
   multi_json
@@ -306,9 +310,9 @@ DEPENDENCIES
   oj
   passenger
   piwik_analytics
-  poltergeist
+  poltergeist (~> 1.5.1)
   rack-mini-profiler
-  rails (~> 4.1)
+  rails (< 4.2)
   rails-perftest
   raphael-rails
   ruby-debug-passenger
@@ -317,7 +321,7 @@ DEPENDENCIES
   safe_yaml
   sass
   sass-rails
-  selenium-webdriver
+  selenium-webdriver (~> 2.53.1)
   simplecov (~> 0.7)
   simplecov-rcov
   sshkey
@@ -327,4 +331,4 @@ DEPENDENCIES
   wiselinks
 
 BUNDLED WITH
-   1.16.0
+   1.16.1
index dbc81c52a376940231094dbf4415e5625016814f..7f1052ebcd6d279a0d6afec10ce78af3a4fd5902 100644 (file)
@@ -23,7 +23,7 @@ class ContainerWorkUnit < ProxyWorkUnit
     items = []
     container_uuid = if @proxied.is_a?(Container) then uuid else get(:container_uuid) end
     if container_uuid
-      cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts)
+      cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts secret_mounts)
       my_children = @child_proxies || ContainerRequest.select(cols).where(requesting_container_uuid: container_uuid).results if !my_children
       my_child_containers = my_children.map(&:container_uuid).compact.uniq
       grandchildren = {}
index 10030e58e896479d12afa262f7f51b2c7d63c9b1..6e8785aa83d1a85710926e88dad69b8d11576d1f 100644 (file)
@@ -80,9 +80,9 @@ SPDX-License-Identifier: AGPL-3.0 %>
             <%# column offset 8 %>
             <div class="col-md-4 text-overflow-ellipsis">
               <% if pj[:output_uuid] %>
-                <%= link_to_arvados_object_if_readable(pj[:output_uuid], 'Output data not available', friendly_name: true) %>
+                <%= link_to_arvados_object_if_readable(pj[:output_uuid], "#{pj[:output_uuid]} (Unavailable)", friendly_name: true) %>
               <% elsif current_job[:output] %>
-                <%= link_to_arvados_object_if_readable(current_job[:output], 'Output data not available', link_text: "Output of #{pj[:name]}") %>
+                <%= link_to_arvados_object_if_readable(current_job[:output], "#{current_job[:output]} (Unavailable)", link_text: "Output of #{pj[:name]}") %>
               <% else %>
                 No output.
               <% end %>
index 20e3d4d2cfb129268d251fa5cb3e6cba39753253..e48a91ec1b767a24e98c789bbe9907d79026514b 100644 (file)
@@ -66,7 +66,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
                       <%= render_localized_date(val) %>
                     <% elsif k == :outputs and val.any? %>
                       <% if val.size == 1 %>
-                        <%= link_to_arvados_object_if_readable(val[0], 'Output data not available', friendly_name: true) %>
+                        <%= link_to_arvados_object_if_readable(val[0], "#{val[0]} (Unavailable)", friendly_name: true) %>
                       <% else %>
                         <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
                       <% end %>
index 76f7a3081751df228bb32b810b50208902aac8bc..137bba09e8365188a9d2cb8ac5ed49bb59f1b48c 100644 (file)
@@ -212,7 +212,7 @@ common:
   # would be enabled in a collection's show page.
   # It is sufficient to list only applications here.
   # No need to list text and image types.
-  application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, sh, vnd.realvnc.bed, xml, xsl]
+  application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
 
   # the maximum number of bytes to load in the log viewer
   log_viewer_max_bytes: 1000000
index 0e7b1c798106854c7786ee3832f33ddb06df0847..69781a1bee0b1c376e3857684fb94b6b49bda25b 100644 (file)
@@ -17,6 +17,7 @@ include MIME
   %w(go),
   %w(r),
   %w(sam),
+  %w(python py),
 ].each do |suffixes|
   if (MIME::Types.type_for(suffixes[0]).first.nil?)
     MIME::Types.add(MIME::Type.new(["application/#{suffixes[0]}", suffixes]))
index 90cb23b90daf7b74a124048e57ba7348e2b6a41e..913f2b972834fce585bab576ddab67b576344221 100644 (file)
@@ -6,6 +6,9 @@ require 'test_helper'
 require 'helpers/share_object_helper'
 
 class DisabledApiTest < ActionController::TestCase
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, false
+
   test "dashboard recent processes when pipeline_instance index API is disabled" do
     @controller = ProjectsController.new
 
index 9a63a29e8f9677ec8a53426b374e42d255996c56..45726e5a646f8aea9f7b8d855ab880cde1135d74 100644 (file)
@@ -5,6 +5,9 @@
 require 'test_helper'
 
 class HealthcheckControllerTest < ActionController::TestCase
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, false
+
   [
     [false, nil, 404, 'disabled'],
     [true, nil, 401, 'authorization required'],
index 6971c39f3385f59674a681e7ccb38cf1f0d9f2b4..8d772b087f54f02d0f2172d6820bec3823d36baf 100644 (file)
@@ -259,7 +259,7 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
       if objects_readable
         assert_selector 'a[href="#Log"]', text: 'Log'
         assert_no_selector 'a[data-toggle="disabled"]', text: 'Log'
-        assert_no_text 'Output data not available'
+        assert_no_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
         if pipeline_page
           assert_text 'This pipeline was created from'
           job_id = object['components']['foo']['job']['uuid']
@@ -274,7 +274,7 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
         end
       else
         assert_selector 'a[data-toggle="disabled"]', text: 'Log'
-        assert_text 'Output data not available'
+        assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
         assert_text object['job']
         if pipeline_page
           assert_no_text 'This pipeline was created from'  # template is not readable
@@ -282,7 +282,7 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
           assert_text 'Log unavailable'
         end
         find(:xpath, "//a[@href='#Log']").click
-        assert_text 'Output data not available'
+        assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
         assert_no_text expect_log_text
       end
     end
index 503db0818dbed0f5d15a47023a5cfba5430141d0..60dadec61d86fc74b3ea6769c48248e709643252 100644 (file)
@@ -85,9 +85,11 @@ module ApiFixtureLoader
         file = IO.read(path)
         trim_index = file.index('# Test Helper trims the rest of the file')
         file = file[0, trim_index] if trim_index
-        YAML.load(file)
+        YAML.load(file).each do |name, ob|
+          ob.reject! { |k, v| k.start_with?('secret_') }
+        end
       end
-      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key].deep_dup }
+      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key] }.deep_dup
     end
   end
   def api_fixture(name, *keys)
index 84ef784d44c77fde399cf8e2fb53dcd03b2e1ded..da7c0305c77bcdba3493dae43d0b3081fd8fd417 100644 (file)
@@ -5,6 +5,7 @@
 #distribution(s)|name|version|iteration|type|architecture|extra fpm arguments
 debian8,debian9,ubuntu1204,centos7|python-gflags|2.0|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
+debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|apache-libcloud|2.3.0|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
@@ -13,7 +14,7 @@ debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|uritemplate|3.0.0|2|pyt
 debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
 debian8,debian9,ubuntu1204,centos7|ws4py|0.3.5|2|python|all
 debian8,debian9,ubuntu1204,centos7|pykka|1.2.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404|six|1.10.0|2|python|all
+debian8,debian9,ubuntu1204,ubuntu1404,centos7|six|1.10.0|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.3|3|python|amd64
 debian8,debian9,ubuntu1204,centos7|pycrypto|2.6.1|3|python|amd64
 debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
@@ -40,8 +41,9 @@ centos7|pbr|0.11.1|2|python|all
 centos7|pyparsing|2.1.10|2|python|all
 centos7|keepalive|0.5|2|python|all
 debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
+debian8,ubuntu1404,centos7|subprocess32|3.2.7|2|python|all
 all|ruamel.yaml|0.13.7|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
-all|cwltest|1.0.20170809112706|3|python|all|--depends 'python-futures >= 3.0.5'
+all|cwltest|1.0.20180209171722|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32'
 all|junit-xml|1.7|3|python|all
 all|rdflib-jsonld|0.4.0|2|python|all
 all|futures|3.0.5|2|python|all
diff --git a/build/package-testing/test-package-python-cwltest.sh b/build/package-testing/test-package-python-cwltest.sh
new file mode 120000 (symlink)
index 0000000..9b6545b
--- /dev/null
@@ -0,0 +1 @@
+test-package-python27-python-cwltest.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python27-python-cwltest.sh b/build/package-testing/test-package-python27-python-cwltest.sh
new file mode 100755 (executable)
index 0000000..395cefc
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec python <<EOF
+import cwltest
+EOF
deleted file mode 100755 (executable)
index b4ea35c574b20a776960aeefad4d4e4d324a347e..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-set -eu
-
-# Multiple .deb based distros symlink to this script, so extract the target
-# from the invocation path.
-target=$(echo $0 | sed 's/.*test-packages-\([^.]*\)\.sh.*/\1/')
-
-export ARV_PACKAGES_DIR="/arvados/packages/$target"
-
-dpkg-query --show > "$ARV_PACKAGES_DIR/$1.before"
-
-apt-get -qq update
-apt-get --assume-yes --allow-unauthenticated install "$1"
-
-dpkg-query --show > "$ARV_PACKAGES_DIR/$1.after"
-
-set +e
-diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" > "$ARV_PACKAGES_DIR/$1.diff"
-set -e
-
-mkdir -p /tmp/opts
-cd /tmp/opts
-
-export ARV_PACKAGES_DIR="/arvados/packages/$target"
-
-dpkg-deb -x $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) .
-
-while read so && [ -n "$so" ]; do
-    echo
-    echo "== Packages dependencies for $so =="
-    ldd "$so" | awk '($3 ~ /^\//){print $3}' | sort -u | xargs dpkg -S | cut -d: -f1 | sort -u
-done <<EOF
-$(find -name '*.so')
-EOF
-
-exec /jenkins/package-testing/common-test-packages.sh "$1"
new file mode 120000 (symlink)
index 0000000000000000000000000000000000000000..54ce94c357439331502e2926fc95dfed001b9edd
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
index c981b2a9ef679509bb24f7ecd25d005fb9d50920..31a546fd350d60cf65bdb0237f93f2b17335eede 100755 (executable)
@@ -21,6 +21,8 @@ Syntax:
     Build only a specific package
 --only-test <package>
     Test only a specific package
+--force-test
+    Test even if there is no new untested package
 --build-version <string>
     Version to build (default:
     \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
@@ -49,7 +51,7 @@ if ! [[ -d "$WORKSPACE" ]]; then
 fi
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
-    help,debug,test-packages,target:,command:,only-test:,only-build:,build-version: \
+    help,debug,test-packages,target:,command:,only-test:,force-test,only-build:,build-version: \
     -- "" "$@")
 if [ $? -ne 0 ]; then
     exit 1
@@ -74,6 +76,9 @@ while [ $# -gt 0 ]; do
             test_packages=1
             packages="$2"; shift
             ;;
+        --force-test)
+            FORCE_TEST=true
+            ;;
         --only-build)
             ONLY_BUILD="$2"; shift
             ;;
@@ -204,6 +209,14 @@ if [[ -n "$test_packages" ]]; then
         if [[ -n "$ONLY_BUILD" ]] && [[ "$p" != "$ONLY_BUILD" ]]; then
             continue
         fi
+        if [[ -e "${WORKSPACE}/packages/.last_test_${TARGET}" ]] && [[ -z "$FORCE_TEST" ]]; then
+          MATCH=`find ${WORKSPACE}/packages/ -newer ${WORKSPACE}/packages/.last_test_${TARGET} -regex .*${TARGET}/$p.*`
+          if [[ "$MATCH" == "" ]]; then
+            # No new package has been built that needs testing
+            echo "Skipping $p test because no new package was built since the last test."
+            continue
+          fi
+        fi
         echo
         echo "START: $p test on $IMAGE" >&2
         if docker run --rm \
@@ -220,6 +233,8 @@ if [[ -n "$test_packages" ]]; then
             echo "ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
         fi
     done
+
+    touch ${WORKSPACE}/packages/.last_test_${TARGET}
 else
     echo
     echo "START: build packages on $IMAGE" >&2
index c56b74088f070c3c252b7c9f84e92eb5e3fea78c..497545dfacf7f3cb92fe11e393c581af50ebcf61 100755 (executable)
@@ -486,23 +486,8 @@ if [[ "$?" == "0" ]]; then
   fpm_build $WORKSPACE/tools/crunchstat-summary ${PYTHON2_PKG_PREFIX}-crunchstat-summary 'Curoverse, Inc.' 'python' "$crunchstat_summary_version" "--url=https://arvados.org" "--description=Crunchstat-summary reads Arvados Crunch log files and summarize resource usage" --iteration "$iteration"
 fi
 
-# Forked libcloud
-if test_package_presence "$PYTHON2_PKG_PREFIX"-apache-libcloud "$LIBCLOUD_PIN" python 2
-then
-  LIBCLOUD_DIR=$(mktemp -d)
-  (
-      cd $LIBCLOUD_DIR
-      git clone $DASHQ_UNLESS_DEBUG https://github.com/curoverse/libcloud.git .
-      git checkout $DASHQ_UNLESS_DEBUG apache-libcloud-$LIBCLOUD_PIN
-      # libcloud is absurdly noisy without -q, so force -q here
-      OLD_DASHQ_UNLESS_DEBUG=$DASHQ_UNLESS_DEBUG
-      DASHQ_UNLESS_DEBUG=-q
-      handle_python_package
-      DASHQ_UNLESS_DEBUG=$OLD_DASHQ_UNLESS_DEBUG
-  )
-  fpm_build $LIBCLOUD_DIR "$PYTHON2_PKG_PREFIX"-apache-libcloud "" python "" --iteration 2
-  rm -rf $LIBCLOUD_DIR
-fi
+## if libcloud becomes our own fork see
+## https://dev.arvados.org/issues/12268#note-27
 
 # Python 2 dependencies
 declare -a PIP_DOWNLOAD_SWITCHES=(--no-deps)
index 9081d99bb545cac5ff14272e63e8f842292585da..b89c8d9e5bc51c6f0acd1b1938ad1cd540192f15 100755 (executable)
@@ -492,12 +492,12 @@ setup_virtualenv() {
     fi
     if [[ $("$venvdest/bin/python" --version 2>&1) =~ \ 3\.[012]\. ]]; then
         # pip 8.0.0 dropped support for python 3.2, e.g., debian wheezy
-        "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7,<8'
+        "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7,<8'
     else
-        "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7'
+        "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
     fi
     # ubuntu1404 can't seem to install mock via tests_require, but it can do this.
-    "$venvdest/bin/pip" install 'mock>=1.0' 'pbr<1.7.0'
+    "$venvdest/bin/pip" install --no-cache-dir 'mock>=1.0' 'pbr<1.7.0'
 }
 
 export PERLINSTALLBASE
@@ -751,8 +751,8 @@ do_install_once() {
         cd "$WORKSPACE/$1" \
             && "${3}python" setup.py sdist rotate --keep=1 --match .tar.gz \
             && cd "$WORKSPACE" \
-            && "${3}pip" install --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
-            && "${3}pip" install --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
+            && "${3}pip" install --no-cache-dir --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
+            && "${3}pip" install --no-cache-dir --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
     elif [[ "$2" != "" ]]
     then
         "install_$2"
index 9be14fdd50eff031ff8818038fba9ad383e29177..011be51062e0bf2352d150531555dbde1292242d 100644 (file)
@@ -93,6 +93,7 @@ navbar:
       - sdk/python/example.html.textile.liquid
       - sdk/python/python.html.textile.liquid
       - sdk/python/crunch-utility-libraries.html.textile.liquid
+      - sdk/python/arvados-fuse.html.textile.liquid
       - sdk/python/events.html.textile.liquid
       - sdk/python/cookbook.html.textile.liquid
     - CLI:
index 38b7657fe758f6d08812e371a0f4de777c8be48b..6c4fbeb1f3adf32f6ed3a365aeb511a5d4de3d04 100644 (file)
@@ -5,5 +5,5 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin' %}
-This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "FUSE Driver":{{site.baseurl}}/sdk/python/arvados-fuse.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
 {% include 'notebox_end' %}
index 027f813f6304ebadfdebc9b9672acc6c48ced69a..d8a62a9c962fd8bd0e7d7fb5b397627e6d17aaac 100644 (file)
@@ -26,7 +26,7 @@ On Red Hat-based systems:
 On Debian-based systems:
 
 <notextile>
-<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client crunch-run arvados-docker-cleaner</span>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
 </code></pre>
 </notextile>
 
index 76e2d13acac70fae75f0a7183cc0a1084f13926c..688850c2922869dbffa8e2c0a902c81c9dd1382c 100644 (file)
@@ -20,7 +20,7 @@ Usage:
 # Install arvados/migrate-docker19 image: @docker pull arvados/migrate-docker19:1.0@. If you're unable to do this, you can run @arvados/docker/migrate-docker19/build.sh@ to create @arvados/migrate-docker19@ Docker image.
 # Make sure you have the right modules installed: @sudo modprobe overlayfs bridge br_netfilter nf_nat@
 # Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
-# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tmp-dir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
+# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tempdir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
 # Run @arv-migrate-docker19 --dry-run@ from the Arvados Python SDK on the host (not in a container). This will print out some information useful for the migration.
 # Finally to make the migration run @arv-migrate-docker19@ from the Arvados Python SDK on the host (not in a container).
 
diff --git a/doc/sdk/python/arvados-fuse.html.textile.liquid b/doc/sdk/python/arvados-fuse.html.textile.liquid
new file mode 100644 (file)
index 0000000..77852fd
--- /dev/null
@@ -0,0 +1,64 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Arvados FUSE driver
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados FUSE driver is a Python utility that allows you to see the Keep service as a normal filesystem, so that data can be accessed using standard tools. This driver requires the Python SDK installed in order to access Arvados services.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the @arv-mount@ utility should already be installed.
+
+To use the FUSE driver elsewhere, you can install from a distribution package, PyPI, or source.
+
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7.
+{% include 'notebox_end' %}
+
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% assign rh_version = "6" %}
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+h4. Option 2: Install with pip
+
+Run @pip-2.7 install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+
+h4. Option 3: Install from source
+
+Install the @python-setuptools@ package from your distribution.  Then run the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/services/fuse</span>
+~/arvados/services/fuse$ <span class="userinput">python2.7 setup.py install</span>
+</code></pre>
+</notextile>
+
+h3. Usage
+
+Please refer to the "Mounting Keep as a filesystem":{{site.baseurl}}/user/tutorials/tutorial-keep-mount.html tutorial for more information.
\ No newline at end of file
index a046654a64c7027afff48213a56303720e0fad3b..4a6c453cae89e3039a526d268382c89be28cc55e 100644 (file)
@@ -164,3 +164,45 @@ for u in collection_uuids:
 newcol = arvados.collection.Collection(combined_manifest)
 newcol.save_new(name="My combined collection", owner_uuid=project_uuid)
 {% endcodeblock %}
+
+h2. Upload a file into a new collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+project_uuid = "qr1hi-j7d0g-zzzzzzzzzzzzzzz"
+collection_name = "My collection"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.Collection()
+with open(filename, "rb") as reader:
+    with c.open(filename, "wb") as writer:
+        content = reader.read(128*1024)
+        while content:
+            writer.write(content)
+            content = reader.read(128*1024)
+c.save_new(name=collection_name, owner_uuid=project_uuid)
+print("Saved %s to %s" % (collection_name, c.manifest_locator()))
+{% endcodeblock %}
+
+h2. Download a file from a collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+collection_uuid = "qr1hi-4zz18-zzzzzzzzzzzzzzz"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.CollectionReader(collection_uuid)
+with c.open(filename, "rb") as reader:
+    with open(filename, "wb") as writer:
+        content = reader.read(128*1024)
+        while content:
+            writer.write(content)
+            content = reader.read(128*1024)
+print("Finished downloading %s" % filename)
+{% endcodeblock %}
index 95422b6bd32206019a8703e8a94a6c57735b92dd..cf25639b14defda47456d6610458285a06aaecce 100644 (file)
@@ -26,7 +26,7 @@ hints:
   arv:RunInSingleContainer: {}
   arv:RuntimeConstraints:
     keep_cache: 123456
-    keep_output_dir: local_output_dir
+    outputDirType: keep_output_dir
   arv:PartitionRequirement:
     partition: dev_partition
   arv:APIRequirement: {}
@@ -36,6 +36,8 @@ hints:
     outputTTL: 3600
   arv:ReuseRequirement:
     enableReuse: false
+  cwltool:Secrets:
+    secrets: [input1, input2]
 </pre>
 
 The one exception to this is @arv:APIRequirement@, see note below.
@@ -101,3 +103,11 @@ Enable/disable work reuse for current process.  Default true (work reuse enabled
 table(table table-bordered table-condensed).
 |_. Field |_. Type |_. Description |
 |enableReuse|boolean|Enable/disable work reuse for current process.  Default true (work reuse enabled).|
+
+h2. cwltool:Secrets
+
+Indicate that one or more input parameters are "secret".  Must be applied at the top level Workflow.  Secret parameters are not stored in keep, are hidden from logs and API responses, and are wiped from the database after the workflow completes.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|secrets|array<string>|Input parameters which are considered "secret".  Must be strings.|
index 984e25ff6ba2466c3e07b36f45164a43c9e66cd4..f9e86cc17773a83a830c4a61bef3eac9ff821682 100644 (file)
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This tutoral describes how to access Arvados collections using traditional filesystem tools by mounting Keep as a read-only file system using @arv-mount@.
+This tutoral describes how to access Arvados collections using traditional filesystem tools by mounting Keep as a file system using @arv-mount@.
 
 {% include 'tutorial_expectations' %}
 
index 41c6ff425121515101bdaf1d7aa5b4004acef57d..2ca405060390c65df2f961f7c7a83e5a278d0687 100644 (file)
@@ -5,7 +5,6 @@
 package dispatchcloud
 
 import (
-       "bytes"
        "errors"
        "log"
        "os/exec"
@@ -88,35 +87,31 @@ func SlurmNodeTypeFeatureKludge(cc *arvados.Cluster) {
        }
        for {
                slurmKludge(features)
-               time.Sleep(time.Minute)
+               time.Sleep(2 * time.Second)
        }
 }
 
-var (
-       slurmDummyNode     = "compute0"
-       slurmErrBadFeature = "Invalid feature"
-       slurmErrNoNodes    = "node configuration is not available"
-)
+const slurmDummyNode = "compute0"
 
 func slurmKludge(features []string) {
-       cmd := exec.Command("srun", "--test-only", "--constraint="+strings.Join(features, "&"), "false")
-       out, err := cmd.CombinedOutput()
-       switch {
-       case err == nil || bytes.Contains(out, []byte(slurmErrNoNodes)):
-               // Evidently our node-type feature names are all valid.
+       allFeatures := strings.Join(features, ",")
 
-       case bytes.Contains(out, []byte(slurmErrBadFeature)):
-               log.Printf("temporarily configuring node %q with all node type features", slurmDummyNode)
-               for _, nodeFeatures := range []string{strings.Join(features, ","), ""} {
-                       cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+nodeFeatures)
-                       log.Printf("running: %q %q", cmd.Path, cmd.Args)
-                       out, err := cmd.CombinedOutput()
-                       if err != nil {
-                               log.Printf("error: scontrol: %s (output was %q)", err, out)
-                       }
-               }
+       cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
+       out, err := cmd.CombinedOutput()
+       if err != nil {
+               log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
+               return
+       }
+       if string(out) == allFeatures+"\n" {
+               // Already configured correctly, nothing to do.
+               return
+       }
 
-       default:
-               log.Printf("warning: expected srun error %q, %q, or success, but output was %q", slurmErrBadFeature, slurmErrNoNodes, out)
+       log.Printf("configuring node %q with all node type features", slurmDummyNode)
+       cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
+       log.Printf("running: %q %q", cmd.Path, cmd.Args)
+       out, err = cmd.CombinedOutput()
+       if err != nil {
+               log.Printf("error: scontrol: %s (output was %q)", err, out)
        }
 }
index 628b6aea69913da1944abea1d886a14d63db3d7a..e4f5ceab7465cb17372466b265d417e02a6d4637 100644 (file)
@@ -23,6 +23,7 @@ import cwltool.main
 import cwltool.workflow
 import cwltool.process
 from schema_salad.sourceline import SourceLine
+import schema_salad.validate as validate
 
 import arvados
 import arvados.config
@@ -53,6 +54,8 @@ arvados.log_handler.setFormatter(logging.Formatter(
         '%(asctime)s %(name)s %(levelname)s: %(message)s',
         '%Y-%m-%d %H:%M:%S'))
 
+DEFAULT_PRIORITY = 500
+
 class ArvCwlRunner(object):
     """Execute a CWL tool or workflow, submit work (using either jobs or
     containers API), wait for them to complete, and report output.
@@ -234,6 +237,8 @@ class ArvCwlRunner(object):
                     if not obj.get("dockerOutputDirectory").startswith('/'):
                         raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
                             "Option 'dockerOutputDirectory' must be an absolute path.")
+            if obj.get("class") == "http://commonwl.org/cwltool#Secrets" and self.work_api != "containers":
+                raise SourceLine(obj, "class", UnsupportedRequirement).makeError("Secrets not supported with --api=jobs")
             for v in obj.itervalues():
                 self.check_features(v)
         elif isinstance(obj, list):
@@ -351,7 +356,7 @@ class ArvCwlRunner(object):
         make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
                                                                  collection_cache=self.collection_cache)
         self.fs_access = make_fs_access(kwargs["basedir"])
-
+        self.secret_store = kwargs.get("secret_store")
 
         self.trash_intermediate = kwargs["trash_intermediate"]
         if self.trash_intermediate and self.work_api != "containers":
@@ -415,16 +420,21 @@ class ArvCwlRunner(object):
 
         if self.work_api == "containers":
             if self.ignore_docker_for_reuse:
-                raise validate.ValidationException("--ignore-docker-for-reuse not supported with containers API.")
+                raise Exception("--ignore-docker-for-reuse not supported with containers API.")
             kwargs["outdir"] = "/var/spool/cwl"
             kwargs["docker_outdir"] = "/var/spool/cwl"
             kwargs["tmpdir"] = "/tmp"
             kwargs["docker_tmpdir"] = "/tmp"
         elif self.work_api == "jobs":
+            if kwargs["priority"] != DEFAULT_PRIORITY:
+                raise Exception("--priority not implemented for jobs API.")
             kwargs["outdir"] = "$(task.outdir)"
             kwargs["docker_outdir"] = "$(task.outdir)"
             kwargs["tmpdir"] = "$(task.tmpdir)"
 
+        if kwargs["priority"] < 1 or kwargs["priority"] > 1000:
+            raise Exception("--priority must be in the range 1..1000.")
+
         runnerjob = None
         if kwargs.get("submit"):
             # Submit a runner job to run the workflow for us.
@@ -443,7 +453,9 @@ class ArvCwlRunner(object):
                                                 on_error=kwargs.get("on_error"),
                                                 submit_runner_image=kwargs.get("submit_runner_image"),
                                                 intermediate_output_ttl=kwargs.get("intermediate_output_ttl"),
-                                                merged_map=merged_map)
+                                                merged_map=merged_map,
+                                                priority=kwargs.get("priority"),
+                                                secret_store=self.secret_store)
             elif self.work_api == "jobs":
                 runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"),
                                       self.output_name,
@@ -581,7 +593,7 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--print-dot", action="store_true",
                          help="Print workflow visualization in graphviz format and exit")
-    exgroup.add_argument("--version", action="store_true", help="Print version and exit")
+    exgroup.add_argument("--version", action="version", help="Print version and exit", version=versionstring())
     exgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.")
 
     exgroup = parser.add_mutually_exclusive_group()
@@ -663,6 +675,10 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         help="If N > 0, intermediate output collections will be trashed N seconds after creation.  Default is 0 (don't trash).",
                         default=0)
 
+    parser.add_argument("--priority", type=int,
+                        help="Workflow priority (range 1..1000, higher has precedence over lower, containers api only)",
+                        default=DEFAULT_PRIORITY)
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--trash-intermediate", action="store_true",
                         default=False, dest="trash_intermediate",
@@ -671,7 +687,7 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default=False, dest="trash_intermediate",
                         help="Do not trash intermediate outputs (default).")
 
-    parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
+    parser.add_argument("workflow", type=str, default=None, help="The workflow to execute")
     parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
 
     return parser
@@ -699,10 +715,6 @@ def main(args, stdout, stderr, api_client=None, keep_client=None):
     job_order_object = None
     arvargs = parser.parse_args(args)
 
-    if arvargs.version:
-        print versionstring()
-        return
-
     if arvargs.update_workflow:
         if arvargs.update_workflow.find('-7fd4e-') == 5:
             want_api = 'containers'
index 7ae2239e2e81dd4f9345e4b2a47743eb585987e9..2ab96c94f0b3e54b42ae51b9b9f42eca6c7071fc 100644 (file)
@@ -27,6 +27,26 @@ $graph:
           name: LoadListingEnum
           symbols: [no_listing, shallow_listing, deep_listing]
 
+- name: cwltool:Secrets
+  type: record
+  inVocab: false
+  extends: cwl:ProcessRequirement
+  fields:
+    class:
+      type: string
+      doc: "Always 'Secrets'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    secrets:
+      type: string[]
+      doc: |
+        List one or more input parameters that are sensitive (such as passwords)
+        which will be deliberately obscured from logging.
+      jsonldPredicate:
+        "_type": "@id"
+        refScope: 0
+
 - name: RunInSingleContainer
   type: record
   extends: cwl:ProcessRequirement
index a2aaa8d49e176a1795c5a5f1d7c17f4a84b658ad..56281e3c75f16592bd77ca575c8cd921dde94957 100644 (file)
@@ -9,6 +9,7 @@ import urllib
 import time
 import datetime
 import ciso8601
+import uuid
 
 import ruamel.yaml as yaml
 
@@ -41,18 +42,31 @@ class ArvadosContainer(object):
         pass
 
     def run(self, dry_run=False, pull_image=True, **kwargs):
+        # ArvadosCommandTool subclasses from cwltool.CommandLineTool,
+        # which calls makeJobRunner() to get a new ArvadosContainer
+        # object.  The fields that define execution such as
+        # command_line, environment, etc are set on the
+        # ArvadosContainer object by CommandLineTool.job() before
+        # run() is called.
+
         container_request = {
             "command": self.command_line,
             "owner_uuid": self.arvrunner.project_uuid,
             "name": self.name,
             "output_path": self.outdir,
             "cwd": self.outdir,
-            "priority": 1,
+            "priority": kwargs.get("priority"),
             "state": "Committed",
             "properties": {},
         }
         runtime_constraints = {}
 
+        if self.arvrunner.secret_store.has_secret(self.command_line):
+            raise WorkflowException("Secret material leaked on command line, only file literals may contain secrets")
+
+        if self.arvrunner.secret_store.has_secret(self.environment):
+            raise WorkflowException("Secret material leaked in environment, only file literals may contain secrets")
+
         resources = self.builder.resources
         if resources is not None:
             runtime_constraints["vcpus"] = resources.get("cores", 1)
@@ -68,6 +82,7 @@ class ArvadosContainer(object):
                 "capacity": resources.get("tmpdirSize", 0) * 2**20
             }
         }
+        secret_mounts = {}
         scheduling_parameters = {}
 
         rf = [self.pathmapper.mapper(f) for f in self.pathmapper.referenced_files]
@@ -105,10 +120,12 @@ class ArvadosContainer(object):
                 generatemapper = NoFollowPathMapper([self.generatefiles], "", "",
                                                     separateDirs=False)
 
-                logger.debug("generatemapper is %s", generatemapper._pathmap)
+                sorteditems = sorted(generatemapper.items(), None, key=lambda n: n[1].target)
+
+                logger.debug("generatemapper is %s", sorteditems)
 
                 with Perf(metrics, "createfiles %s" % self.name):
-                    for f, p in generatemapper.items():
+                    for f, p in sorteditems:
                         if not p.target:
                             pass
                         elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
@@ -118,8 +135,14 @@ class ArvadosContainer(object):
                                 source, path = self.arvrunner.fs_access.get_collection(p.resolved)
                                 vwd.copy(path, p.target, source_collection=source)
                         elif p.type == "CreateFile":
-                            with vwd.open(p.target, "w") as n:
-                                n.write(p.resolved.encode("utf-8"))
+                            if self.arvrunner.secret_store.has_secret(p.resolved):
+                                secret_mounts["%s/%s" % (self.outdir, p.target)] = {
+                                    "kind": "text",
+                                    "content": self.arvrunner.secret_store.retrieve(p.resolved)
+                                }
+                            else:
+                                with vwd.open(p.target, "w") as n:
+                                    n.write(p.resolved.encode("utf-8"))
 
                 def keepemptydirs(p):
                     if isinstance(p, arvados.collection.RichCollectionBase):
@@ -134,8 +157,10 @@ class ArvadosContainer(object):
                 with Perf(metrics, "generatefiles.save_new %s" % self.name):
                     vwd.save_new()
 
-                for f, p in generatemapper.items():
-                    if not p.target:
+                prev = None
+                for f, p in sorteditems:
+                    if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or
+                        (prev is not None and p.target.startswith(prev))):
                         continue
                     mountpoint = "%s/%s" % (self.outdir, p.target)
                     mounts[mountpoint] = {"kind": "collection",
@@ -143,6 +168,7 @@ class ArvadosContainer(object):
                                           "path": p.target}
                     if p.type.startswith("Writable"):
                         mounts[mountpoint]["writable"] = True
+                    prev = p.target + "/"
 
         container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
         if self.environment:
@@ -200,10 +226,11 @@ class ArvadosContainer(object):
             self.output_ttl = self.arvrunner.intermediate_output_ttl
 
         if self.output_ttl < 0:
-            raise WorkflowError("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
+            raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
 
         container_request["output_ttl"] = self.output_ttl
         container_request["mounts"] = mounts
+        container_request["secret_mounts"] = secret_mounts
         container_request["runtime_constraints"] = runtime_constraints
         container_request["scheduling_parameters"] = scheduling_parameters
 
@@ -306,12 +333,22 @@ class RunnerContainer(Runner):
         visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
         visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
 
+        secret_mounts = {}
+        for param in sorted(self.job_order.keys()):
+            if self.secret_store.has_secret(self.job_order[param]):
+                mnt = "/secrets/s%d" % len(secret_mounts)
+                secret_mounts[mnt] = {
+                    "kind": "text",
+                    "content": self.secret_store.retrieve(self.job_order[param])
+                }
+                self.job_order[param] = {"$include": mnt}
+
         container_req = {
             "owner_uuid": self.arvrunner.project_uuid,
             "name": self.name,
             "output_path": "/var/spool/cwl",
             "cwd": "/var/spool/cwl",
-            "priority": 1,
+            "priority": self.priority,
             "state": "Committed",
             "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
             "mounts": {
@@ -328,6 +365,7 @@ class RunnerContainer(Runner):
                     "writable": True
                 }
             },
+            "secret_mounts": secret_mounts,
             "runtime_constraints": {
                 "vcpus": 1,
                 "ram": 1024*1024 * self.submit_runner_ram,
index 12d74a05c6aa855df8086d216f1e706df82ed680..fec728f5b785ef4897e05a334c920b93c12f7146 100644 (file)
@@ -128,6 +128,7 @@ def run():
         args.make_fs_access = make_fs_access
         args.trash_intermediate = False
         args.intermediate_output_ttl = 0
+        args.priority = arvados_cwl.DEFAULT_PRIORITY
 
         runner.arv_executor(t, job_order_object, **vars(args))
     except Exception as e:
index 9b79d458501dfd92e31584ae57aacbde6b513a7a..053c99502bf06e0c5829b67a4563bfd7544b8c1a 100644 (file)
@@ -316,7 +316,8 @@ class Runner(object):
     def __init__(self, runner, tool, job_order, enable_reuse,
                  output_name, output_tags, submit_runner_ram=0,
                  name=None, on_error=None, submit_runner_image=None,
-                 intermediate_output_ttl=0, merged_map=None):
+                 intermediate_output_ttl=0, merged_map=None, priority=None,
+                 secret_store=None):
         self.arvrunner = runner
         self.tool = tool
         self.job_order = job_order
@@ -336,6 +337,8 @@ class Runner(object):
         self.on_error = on_error
         self.jobs_image = submit_runner_image or "arvados/jobs:"+__version__
         self.intermediate_output_ttl = intermediate_output_ttl
+        self.priority = priority
+        self.secret_store = secret_store
 
         if submit_runner_ram:
             self.submit_runner_ram = submit_runner_ram
index 5b1d7370e8a2afcb692f568b34de67b443fe5dfd..da5154040d54262e81a5de85f1e1ce3b30a3cbcb 100644 (file)
@@ -41,7 +41,7 @@ setup(name='arvados-cwl-runner',
       # Note that arvados/build/run-build-packages.sh looks at this
       # file to determine what version of cwltool and schema-salad to build.
       install_requires=[
-          'cwltool==1.0.20180225105849',
+          'cwltool==1.0.20180322194411',
           'schema-salad==2.6.20171201034858',
           'typing==3.5.3.0',
           'ruamel.yaml==0.13.7',
index d99cf6c189dc16761f73e5c1699ceed68baf9e9b..ea6477cfe8b2c58facd80602c01786da1f0a9677 100644 (file)
     out: out
   tool: wf/runin-with-ttl-wf.cwl
   doc: "RunInSingleContainer respects outputTTL"
+
+- job: secret_test_job.yml
+  output: {
+    "out": {
+        "class": "File",
+        "location": "hashed_example.txt",
+        "size": 47,
+        "checksum": "sha1$f45341c7f03b4dd10646c402908d1aea0d580f5d"
+    }
+  }
+  tool: wf/secret_wf.cwl
+  doc: "Test secret input parameters"
diff --git a/sdk/cwl/tests/secret_test_job.yml b/sdk/cwl/tests/secret_test_job.yml
new file mode 100644 (file)
index 0000000..883d24e
--- /dev/null
@@ -0,0 +1 @@
+pw: blorp
index fea21e9e0fb1d47c36e4cb25c93faab84f325148..522946a4f49ee2acd68588c6100d45bcb097cbe8 100644 (file)
@@ -10,6 +10,7 @@ import unittest
 import os
 import functools
 import cwltool.process
+import cwltool.secrets
 from schema_salad.ref_resolver import Loader
 from schema_salad.sourceline import cmap
 
@@ -33,6 +34,7 @@ class TestContainer(unittest.TestCase):
             runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
             runner.ignore_docker_for_reuse = False
             runner.intermediate_output_ttl = 0
+            runner.secret_store = cwltool.secrets.SecretStore()
 
             keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
             runner.api.collections().get().execute.return_value = {
@@ -55,7 +57,7 @@ class TestContainer(unittest.TestCase):
             arvtool.formatgraph = None
             for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_"+str(enable_reuse),
                                  make_fs_access=make_fs_access, tmpdir="/tmp"):
-                j.run(enable_reuse=enable_reuse)
+                j.run(enable_reuse=enable_reuse, priority=500)
                 runner.api.container_requests().create.assert_called_with(
                     body=JsonDiffMatcher({
                         'environment': {
@@ -68,7 +70,7 @@ class TestContainer(unittest.TestCase):
                             'ram': 1073741824
                         },
                         'use_existing': enable_reuse,
-                        'priority': 1,
+                        'priority': 500,
                         'mounts': {
                             '/tmp': {'kind': 'tmp',
                                      "capacity": 1073741824
@@ -85,6 +87,7 @@ class TestContainer(unittest.TestCase):
                         'cwd': '/var/spool/cwl',
                         'scheduling_parameters': {},
                         'properties': {},
+                        'secret_mounts': {}
                     }))
 
     # The test passes some fields in builder.resources
@@ -96,6 +99,8 @@ class TestContainer(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 3600
+        runner.secret_store = cwltool.secrets.SecretStore()
+
         document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
@@ -138,7 +143,7 @@ class TestContainer(unittest.TestCase):
         arvtool.formatgraph = None
         for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_resource_requirements",
                              make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(enable_reuse=True)
+            j.run(enable_reuse=True, priority=500)
 
         call_args, call_kwargs = runner.api.container_requests().create.call_args
 
@@ -155,7 +160,7 @@ class TestContainer(unittest.TestCase):
                 'API': True
             },
             'use_existing': False,
-            'priority': 1,
+            'priority': 500,
             'mounts': {
                 '/tmp': {'kind': 'tmp',
                          "capacity": 4194304000 },
@@ -172,7 +177,8 @@ class TestContainer(unittest.TestCase):
             'scheduling_parameters': {
                 'partitions': ['blurb']
             },
-            'properties': {}
+            'properties': {},
+            'secret_mounts': {}
         }
 
         call_body = call_kwargs.get('body', None)
@@ -191,6 +197,8 @@ class TestContainer(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
         document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
@@ -247,7 +255,7 @@ class TestContainer(unittest.TestCase):
         arvtool.formatgraph = None
         for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_initial_work_dir",
                              make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run()
+            j.run(priority=500)
 
         call_args, call_kwargs = runner.api.container_requests().create.call_args
 
@@ -267,7 +275,7 @@ class TestContainer(unittest.TestCase):
                 'ram': 1073741824
             },
             'use_existing': True,
-            'priority': 1,
+            'priority': 500,
             'mounts': {
                 '/tmp': {'kind': 'tmp',
                          "capacity": 1073741824 },
@@ -303,7 +311,8 @@ class TestContainer(unittest.TestCase):
             'cwd': '/var/spool/cwl',
             'scheduling_parameters': {
             },
-            'properties': {}
+            'properties': {},
+            'secret_mounts': {}
         }
 
         call_body = call_kwargs.get('body', None)
@@ -321,6 +330,7 @@ class TestContainer(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -346,7 +356,7 @@ class TestContainer(unittest.TestCase):
         arvtool.formatgraph = None
         for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_redirect",
                              make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run()
+            j.run(priority=500)
             runner.api.container_requests().create.assert_called_with(
                 body=JsonDiffMatcher({
                     'environment': {
@@ -359,7 +369,7 @@ class TestContainer(unittest.TestCase):
                         'ram': 1073741824
                     },
                     'use_existing': True,
-                    'priority': 1,
+                    'priority': 500,
                     'mounts': {
                         '/tmp': {'kind': 'tmp',
                                  "capacity": 1073741824 },
@@ -388,6 +398,7 @@ class TestContainer(unittest.TestCase):
                     'cwd': '/var/spool/cwl',
                     'scheduling_parameters': {},
                     'properties': {},
+                    'secret_mounts': {}
                 }))
 
     @mock.patch("arvados.collection.Collection")
@@ -400,6 +411,7 @@ class TestContainer(unittest.TestCase):
         runner.num_retries = 0
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
 
         runner.api.containers().get().execute.return_value = {"state":"Complete",
                                                               "output": "abc+123",
@@ -443,6 +455,7 @@ class TestContainer(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -484,7 +497,7 @@ class TestContainer(unittest.TestCase):
         }
         for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_run_mounts",
                              make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run()
+            j.run(priority=500)
             runner.api.container_requests().create.assert_called_with(
                 body=JsonDiffMatcher({
                     'environment': {
@@ -497,7 +510,7 @@ class TestContainer(unittest.TestCase):
                         'ram': 1073741824
                     },
                     'use_existing': True,
-                    'priority': 1,
+                    'priority': 500,
                     'mounts': {
                         "/keep/99999999999999999999999999999994+44": {
                             "kind": "collection",
@@ -517,4 +530,102 @@ class TestContainer(unittest.TestCase):
                     'cwd': '/var/spool/cwl',
                     'scheduling_parameters': {},
                     'properties': {},
+                    'secret_mounts': {}
+                }))
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_secrets(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        tool = cmap({"arguments": ["md5sum", "example.conf"],
+                     "class": "CommandLineTool",
+                     "hints": [
+                         {
+                             "class": "http://commonwl.org/cwltool#Secrets",
+                             "secrets": [
+                                 "#secret_job.cwl/pw"
+                             ]
+                         }
+                     ],
+                     "id": "#secret_job.cwl",
+                     "inputs": [
+                         {
+                             "id": "#secret_job.cwl/pw",
+                             "type": "string"
+                         }
+                     ],
+                     "outputs": [
+                     ],
+                     "requirements": [
+                         {
+                             "class": "InitialWorkDirRequirement",
+                             "listing": [
+                                 {
+                                     "entry": "username: user\npassword: $(inputs.pw)\n",
+                                     "entryname": "example.conf"
+                                 }
+                             ]
+                         }
+                     ]})
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                     collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
+                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+        arvtool.formatgraph = None
+
+        job_order = {"pw": "blorp"}
+        runner.secret_store.store(["pw"], job_order)
+
+        for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_secrets",
+                             make_fs_access=make_fs_access, tmpdir="/tmp"):
+            j.run(enable_reuse=True, priority=500)
+            runner.api.container_requests().create.assert_called_with(
+                body=JsonDiffMatcher({
+                    'environment': {
+                        'HOME': '/var/spool/cwl',
+                        'TMPDIR': '/tmp'
+                    },
+                    'name': 'test_secrets',
+                    'runtime_constraints': {
+                        'vcpus': 1,
+                        'ram': 1073741824
+                    },
+                    'use_existing': True,
+                    'priority': 500,
+                    'mounts': {
+                        '/tmp': {'kind': 'tmp',
+                                 "capacity": 1073741824
+                             },
+                        '/var/spool/cwl': {'kind': 'tmp',
+                                           "capacity": 1073741824 }
+                    },
+                    'state': 'Committed',
+                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                    'output_path': '/var/spool/cwl',
+                    'output_ttl': 0,
+                    'container_image': 'arvados/jobs',
+                    'command': ['md5sum', 'example.conf'],
+                    'cwd': '/var/spool/cwl',
+                    'scheduling_parameters': {},
+                    'properties': {},
+                    "secret_mounts": {
+                        "/var/spool/cwl/example.conf": {
+                            "content": "username: user\npassword: blorp\n",
+                            "kind": "text"
+                        }
+                    }
                 }))
index c0b74fe4d3e1bd310afaa2b9bc906293c8c41498..45f8a05eed7495d84ef93dadde45efda43225d7d 100644 (file)
@@ -194,7 +194,7 @@ def stubs(func):
             expect_packed_workflow = yaml.round_trip_load(f)
 
         stubs.expect_container_spec = {
-            'priority': 1,
+            'priority': 500,
             'mounts': {
                 '/var/spool/cwl': {
                     'writable': True,
@@ -231,6 +231,7 @@ def stubs(func):
                     'kind': 'json'
                 }
             },
+            'secret_mounts': {},
             'state': 'Committed',
             'owner_uuid': None,
             'command': ['arvados-cwl-runner', '--local', '--api=containers', '--no-log-timestamps',
@@ -246,7 +247,8 @@ def stubs(func):
                 'ram': 1024*1024*1024
             },
             'use_existing': True,
-            'properties': {}
+            'properties': {},
+            'secret_mounts': {}
         }
 
         stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
@@ -709,7 +711,7 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(exited, 0)
 
         expect_container = {
-            'priority': 1,
+            'priority': 500,
             'mounts': {
                 '/var/spool/cwl': {
                     'writable': True,
@@ -744,7 +746,8 @@ class TestSubmit(unittest.TestCase):
                 'ram': 1073741824
             },
             'use_existing': True,
-            'properties': {}
+            'properties': {},
+            'secret_mounts': {}
         }
 
         stubs.api.container_requests().create.assert_called_with(
@@ -792,7 +795,7 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(exited, 0)
 
         expect_container = {
-            'priority': 1,
+            'priority': 500,
             'mounts': {
                 '/var/spool/cwl': {
                     'writable': True,
@@ -862,7 +865,8 @@ class TestSubmit(unittest.TestCase):
             'use_existing': True,
             'properties': {
                 "template_uuid": "962eh-7fd4e-gkbzl62qqtfig37"
-            }
+            },
+            'secret_mounts': {}
         }
 
         stubs.api.container_requests().create.assert_called_with(
@@ -980,6 +984,26 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(capture_stdout.getvalue(),
                          stubs.expect_container_request_uuid + '\n')
 
+    @stubs
+    def test_submit_priority(self, stubs):
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--submit", "--no-wait", "--api=containers", "--debug", "--priority=669",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        stubs.expect_container_spec["priority"] = 669
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
 
     @mock.patch("arvados.commands.keepdocker.find_one_image_hash")
     @mock.patch("cwltool.docker.DockerCommandLineJob.get_image")
@@ -1013,6 +1037,172 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual("arvados/jobs:"+arvados_cwl.__version__,
                          arvados_cwl.runner.arvados_jobs_image(arvrunner, "arvados/jobs:"+arvados_cwl.__version__))
 
+    @stubs
+    def test_submit_secrets(self, stubs):
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--submit", "--no-wait", "--api=containers", "--debug",
+                 "tests/wf/secret_wf.cwl", "tests/secret_test_job.yml"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+
+        expect_container = {
+            "command": [
+                "arvados-cwl-runner",
+                "--local",
+                "--api=containers",
+                "--no-log-timestamps",
+                "--enable-reuse",
+                "--on-error=continue",
+                "--eval-timeout=20",
+                "/var/lib/cwl/workflow.json#main",
+                "/var/lib/cwl/cwl.input.json"
+            ],
+            "container_image": "arvados/jobs:"+arvados_cwl.__version__,
+            "cwd": "/var/spool/cwl",
+            "mounts": {
+                "/var/lib/cwl/cwl.input.json": {
+                    "content": {
+                        "pw": {
+                            "$include": "/secrets/s0"
+                        }
+                    },
+                    "kind": "json"
+                },
+                "/var/lib/cwl/workflow.json": {
+                    "content": {
+                        "$graph": [
+                            {
+                                "$namespaces": {
+                                    "cwltool": "http://commonwl.org/cwltool#"
+                                },
+                                "arguments": [
+                                    "md5sum",
+                                    "example.conf"
+                                ],
+                                "class": "CommandLineTool",
+                                "hints": [
+                                    {
+                                        "class": "http://commonwl.org/cwltool#Secrets",
+                                        "secrets": [
+                                            "#secret_job.cwl/pw"
+                                        ]
+                                    }
+                                ],
+                                "id": "#secret_job.cwl",
+                                "inputs": [
+                                    {
+                                        "id": "#secret_job.cwl/pw",
+                                        "type": "string"
+                                    }
+                                ],
+                                "outputs": [
+                                    {
+                                        "id": "#secret_job.cwl/out",
+                                        "type": "stdout"
+                                    }
+                                ],
+                                "stdout": "hashed_example.txt",
+                                "requirements": [
+                                    {
+                                        "class": "InitialWorkDirRequirement",
+                                        "listing": [
+                                            {
+                                                "entry": "username: user\npassword: $(inputs.pw)\n",
+                                                "entryname": "example.conf"
+                                            }
+                                        ]
+                                    }
+                                ]
+                            },
+                            {
+                                "class": "Workflow",
+                                "hints": [
+                                    {
+                                        "class": "DockerRequirement",
+                                        "dockerPull": "debian:8"
+                                    },
+                                    {
+                                        "class": "http://commonwl.org/cwltool#Secrets",
+                                        "secrets": [
+                                            "#main/pw"
+                                        ]
+                                    }
+                                ],
+                                "id": "#main",
+                                "inputs": [
+                                    {
+                                        "id": "#main/pw",
+                                        "type": "string"
+                                    }
+                                ],
+                                "outputs": [
+                                    {
+                                        "id": "#main/out",
+                                        "outputSource": "#main/step1/out",
+                                        "type": "File"
+                                    }
+                                ],
+                                "steps": [
+                                    {
+                                        "id": "#main/step1",
+                                        "in": [
+                                            {
+                                                "id": "#main/step1/pw",
+                                                "source": "#main/pw"
+                                            }
+                                        ],
+                                        "out": [
+                                            "#main/step1/out"
+                                        ],
+                                        "run": "#secret_job.cwl"
+                                    }
+                                ]
+                            }
+                        ],
+                        "cwlVersion": "v1.0"
+                    },
+                    "kind": "json"
+                },
+                "/var/spool/cwl": {
+                    "kind": "collection",
+                    "writable": True
+                },
+                "stdout": {
+                    "kind": "file",
+                    "path": "/var/spool/cwl/cwl.output.json"
+                }
+            },
+            "name": "secret_wf.cwl",
+            "output_path": "/var/spool/cwl",
+            "owner_uuid": None,
+            "priority": 500,
+            "properties": {},
+            "runtime_constraints": {
+                "API": True,
+                "ram": 1073741824,
+                "vcpus": 1
+            },
+            "secret_mounts": {
+                "/secrets/s0": {
+                    "content": "blorp",
+                    "kind": "text"
+                }
+            },
+            "state": "Committed",
+            "use_existing": True
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
+
 class TestCreateTemplate(unittest.TestCase):
     existing_template_uuid = "zzzzz-d1hrv-validworkfloyml"
 
diff --git a/sdk/cwl/tests/wf/secret_job.cwl b/sdk/cwl/tests/wf/secret_job.cwl
new file mode 100644 (file)
index 0000000..0ddeb64
--- /dev/null
@@ -0,0 +1,20 @@
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+hints:
+  "cwltool:Secrets":
+    secrets: [pw]
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: example.conf
+        entry: |
+          username: user
+          password: $(inputs.pw)
+inputs:
+  pw: string
+outputs:
+  out: stdout
+stdout: hashed_example.txt
+arguments: [md5sum, example.conf]
diff --git a/sdk/cwl/tests/wf/secret_wf.cwl b/sdk/cwl/tests/wf/secret_wf.cwl
new file mode 100644 (file)
index 0000000..17c92d6
--- /dev/null
@@ -0,0 +1,21 @@
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+hints:
+  "cwltool:Secrets":
+    secrets: [pw]
+  DockerRequirement:
+    dockerPull: debian:8
+inputs:
+  pw: string
+outputs:
+  out:
+    type: File
+    outputSource: step1/out
+steps:
+  step1:
+    in:
+      pw: pw
+    out: [out]
+    run: secret_job.cwl
index 7f9d869e0cead2fdc766fb9deac95f2a9b99e420..9797440205cf3d8396d14ec389e380b2260486b2 100644 (file)
@@ -22,6 +22,14 @@ type KeepService struct {
        ReadOnly       bool   `json:"read_only"`
 }
 
+type KeepMount struct {
+       UUID           string   `json:"uuid"`
+       DeviceID       string   `json:"device_id"`
+       ReadOnly       bool     `json:"read_only"`
+       Replication    int      `json:"replication"`
+       StorageClasses []string `json:"storage_classes"`
+}
+
 // KeepServiceList is an arvados#keepServiceList record
 type KeepServiceList struct {
        Items          []KeepService `json:"items"`
@@ -77,10 +85,32 @@ func (s *KeepService) String() string {
        return s.UUID
 }
 
+func (s *KeepService) Mounts(c *Client) ([]KeepMount, error) {
+       url := s.url("mounts")
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               return nil, err
+       }
+       var mounts []KeepMount
+       err = c.DoAndDecode(&mounts, req)
+       if err != nil {
+               return nil, fmt.Errorf("GET %v: %v", url, err)
+       }
+       return mounts, nil
+}
+
+// Index returns an unsorted list of blocks at the given mount point.
+func (s *KeepService) IndexMount(c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {
+       return s.index(c, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
+}
+
 // Index returns an unsorted list of blocks that can be retrieved from
 // this server.
 func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry, error) {
-       url := s.url("index/" + prefix)
+       return s.index(c, s.url("index/"+prefix))
+}
+
+func (s *KeepService) index(c *Client, url string) ([]KeepServiceIndexEntry, error) {
        req, err := http.NewRequest("GET", url, nil)
        if err != nil {
                return nil, fmt.Errorf("NewRequest(%v): %v", url, err)
@@ -89,7 +119,7 @@ func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry,
        if err != nil {
                return nil, fmt.Errorf("Do(%v): %v", url, err)
        } else if resp.StatusCode != 200 {
-               return nil, fmt.Errorf("%v: %v", url, resp.Status)
+               return nil, fmt.Errorf("%v: %d %v", url, resp.StatusCode, resp.Status)
        }
        defer resp.Body.Close()
 
index 06a4f07a798e274094160dc9ce836ca6507b4b4d..bb97f3c1d8186adb0da84f541997157f149c0c1a 100644 (file)
@@ -41,10 +41,10 @@ import arvados.util as util
 
 # Set up Arvados logging based on the user's configuration.
 # All Arvados code should log under the arvados hierarchy.
+log_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'
+log_date_format = '%Y-%m-%d %H:%M:%S'
 log_handler = logging.StreamHandler()
-log_handler.setFormatter(logging.Formatter(
-        '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
-        '%Y-%m-%d %H:%M:%S'))
+log_handler.setFormatter(logging.Formatter(log_format, log_date_format))
 logger = logging.getLogger('arvados')
 logger.addHandler(log_handler)
 logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
index 33333ee86558c4b0244917a9ffb2c75645d321fa..8fb90c944396967e6863a38daee27ffe3cb8b9ec 100644 (file)
@@ -34,6 +34,8 @@ from arvados.retry import retry_method
 _logger = logging.getLogger('arvados.collection')
 
 class CollectionBase(object):
+    """Abstract base class for Collection classes."""
+
     def __enter__(self):
         return self
 
@@ -91,6 +93,8 @@ class _WriterFile(_FileLikeObjectBase):
 
 
 class CollectionWriter(CollectionBase):
+    """Deprecated, use Collection instead."""
+
     def __init__(self, api_client=None, num_retries=0, replication=None):
         """Instantiate a CollectionWriter.
 
@@ -396,6 +400,8 @@ class CollectionWriter(CollectionBase):
 
 
 class ResumableCollectionWriter(CollectionWriter):
+    """Deprecated, use Collection instead."""
+
     STATE_PROPS = ['_current_stream_files', '_current_stream_length',
                    '_current_stream_locators', '_current_stream_name',
                    '_current_file_name', '_current_file_pos', '_close_file',
index 716b456ab5e69e54bd11bbd3e72dfac85e056a4b..3ce47b20660bc68c51833b981cdfdda17c6672e8 100644 (file)
@@ -168,7 +168,7 @@ def main(arguments=None):
     ln = df_out.splitlines()[1]
     filesystem, blocks, used, available, use_pct, mounted = re.match(r"^([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+)", ln).groups(1)
     if int(available) <= will_need:
-        logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, will_need>>20)
+        logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, int(will_need)>>20)
         if not args.force:
             exit(1)
         else:
index 97ff8c6f3fe12eaa5d936a4dcdeb418d4eb036c3..5dde8e53c933d05b2facbf8df284941635da3b42 100644 (file)
@@ -309,6 +309,24 @@ class FileUploadList(list):
         super(FileUploadList, self).append(other)
 
 
+# Appends the X-Request-Id to the log message when log level is ERROR or DEBUG
+class ArvPutLogFormatter(logging.Formatter):
+    std_fmtr = logging.Formatter(arvados.log_format, arvados.log_date_format)
+    err_fmtr = None
+    request_id_informed = False
+
+    def __init__(self, request_id):
+        self.err_fmtr = logging.Formatter(
+            arvados.log_format+' (X-Request-Id: {})'.format(request_id),
+            arvados.log_date_format)
+
+    def format(self, record):
+        if (not self.request_id_informed) and (record.levelno in (logging.DEBUG, logging.ERROR)):
+            self.request_id_informed = True
+            return self.err_fmtr.format(record)
+        return self.std_fmtr.format(record)
+
+
 class ResumeCache(object):
     CACHE_DIR = '.cache/arvados/arv-put'
 
@@ -887,7 +905,7 @@ class ArvPutUploadJob(object):
         m = self._my_collection().stripped_manifest().encode()
         local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m))
         if pdh != local_pdh:
-            logger.warning("\n".join([
+            self.logger.warning("\n".join([
                 "arv-put: API server provided PDH differs from local manifest.",
                 "         This should not happen; showing API server version."]))
         return pdh
@@ -961,6 +979,7 @@ def progress_writer(progress_func, outfile=sys.stderr):
     return write_progress
 
 def exit_signal_handler(sigcode, frame):
+    logging.getLogger('arvados.arv_put').error("Caught signal {}, exiting.".format(sigcode))
     sys.exit(-sigcode)
 
 def desired_project_uuid(api_client, project_uuid, num_retries):
@@ -986,11 +1005,18 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     status = 0
 
     request_id = arvados.util.new_request_id()
-    logger.info('X-Request-Id: '+request_id)
+
+    formatter = ArvPutLogFormatter(request_id)
+    logging.getLogger('arvados').handlers[0].setFormatter(formatter)
 
     if api_client is None:
         api_client = arvados.api('v1', request_id=request_id)
 
+    # Install our signal handler for each code in CAUGHT_SIGNALS, and save
+    # the originals.
+    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
+                            for sigcode in CAUGHT_SIGNALS}
+
     # Determine the name to use
     if args.name:
         if args.stream or args.raw:
@@ -1107,11 +1133,6 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
             "arv-put: %s" % str(error)]))
         sys.exit(1)
 
-    # Install our signal handler for each code in CAUGHT_SIGNALS, and save
-    # the originals.
-    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
-                            for sigcode in CAUGHT_SIGNALS}
-
     if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
         logger.warning("\n".join([
             "arv-put: Resuming previous upload from last checkpoint.",
index 0c01619b4bd26798d8c74ba8bc4e584b626956de..4b1f69477e5823502b2a5396a586db82a56e6ff7 100644 (file)
@@ -18,11 +18,12 @@ import os
 import pwd
 import random
 import re
+import select
 import shutil
+import signal
 import subprocess
 import sys
 import tempfile
-import threading
 import time
 import unittest
 import uuid
@@ -575,6 +576,47 @@ class ArvadosPutReportTest(ArvadosBaseTestCase):
                                       arv_put.human_progress(count, None)))
 
 
+class ArvPutLogFormatterTest(ArvadosBaseTestCase):
+    matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)'
+
+    def setUp(self):
+        super(ArvPutLogFormatterTest, self).setUp()
+        self.stderr = tutil.StringIO()
+        self.loggingHandler = logging.StreamHandler(self.stderr)
+        self.loggingHandler.setFormatter(
+            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
+        self.logger = logging.getLogger()
+        self.logger.addHandler(self.loggingHandler)
+        self.logger.setLevel(logging.DEBUG)
+
+    def tearDown(self):
+        self.logger.removeHandler(self.loggingHandler)
+        self.stderr.close()
+        self.stderr = None
+        super(ArvPutLogFormatterTest, self).tearDown()
+
+    def test_request_id_logged_only_once_on_error(self):
+        self.logger.error('Ooops, something bad happened.')
+        self.logger.error('Another bad thing just happened.')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(2, len(log_lines))
+        self.assertRegex(log_lines[0], self.matcher)
+        self.assertNotRegex(log_lines[1], self.matcher)
+
+    def test_request_id_logged_only_once_on_debug(self):
+        self.logger.debug('This is just a debug message.')
+        self.logger.debug('Another message, move along.')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(2, len(log_lines))
+        self.assertRegex(log_lines[0], self.matcher)
+        self.assertNotRegex(log_lines[1], self.matcher)
+
+    def test_request_id_not_logged_on_info(self):
+        self.logger.info('This should be a useful message')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(1, len(log_lines))
+        self.assertNotRegex(log_lines[0], self.matcher)
+
 class ArvadosPutTest(run_test_server.TestCaseWithServers,
                      ArvadosBaseTestCase,
                      tutil.VersionChecker):
@@ -604,7 +646,8 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers,
         self.main_stdout = tutil.StringIO()
         self.main_stderr = tutil.StringIO()
         self.loggingHandler = logging.StreamHandler(self.main_stderr)
-        self.loggingHandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+        self.loggingHandler.setFormatter(
+            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
         logging.getLogger().addHandler(self.loggingHandler)
 
     def tearDown(self):
@@ -706,14 +749,17 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers,
             self.assertLess(0, coll_save_mock.call_count)
             self.assertEqual("", self.main_stdout.getvalue())
 
-    def test_request_id_logging(self):
-        matcher = r'INFO: X-Request-Id: req-[a-z0-9]{20}\n'
-
-        self.call_main_on_test_file()
-        self.assertRegex(self.main_stderr.getvalue(), matcher)
-
-        self.call_main_on_test_file(['--silent'])
-        self.assertNotRegex(self.main_stderr.getvalue(), matcher)
+    def test_request_id_logging_on_error(self):
+        matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)\n'
+        coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')
+        coll_save_mock.side_effect = arvados.errors.ApiError(
+            fake_httplib2_response(403), b'{}')
+        with mock.patch('arvados.collection.Collection.save_new',
+                        new=coll_save_mock):
+            with self.assertRaises(SystemExit) as exc_test:
+                self.call_main_with_args(['/dev/null'])
+            self.assertRegex(
+                self.main_stderr.getvalue(), matcher)
 
 
 class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
@@ -812,6 +858,30 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
         self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11',
                       pipe.stdout.read().decode())
 
+    def test_sigint_logs_request_id(self):
+        # Start arv-put, give it a chance to start up, send SIGINT,
+        # and check that its output includes the X-Request-Id.
+        input_stream = subprocess.Popen(
+            ['sleep', '10'],
+            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__, '--stream'],
+            stdin=input_stream.stdout, stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT, env=self.ENVIRON)
+        # Wait for arv-put child process to print something (i.e., a
+        # log message) so we know its signal handler is installed.
+        select.select([pipe.stdout], [], [], 10)
+        pipe.send_signal(signal.SIGINT)
+        deadline = time.time() + 5
+        while (pipe.poll() is None) and (time.time() < deadline):
+            time.sleep(.1)
+        returncode = pipe.poll()
+        input_stream.terminate()
+        if returncode is None:
+            pipe.terminate()
+            self.fail("arv-put did not exit within 5 seconds")
+        self.assertRegex(pipe.stdout.read().decode(), r'\(X-Request-Id: req-[a-z0-9]{20}\)')
+
     def test_ArvPutSignedManifest(self):
         # ArvPutSignedManifest runs "arv-put foo" and then attempts to get
         # the newly created manifest from the API server, testing to confirm
index 4cb5671e1801fc75107057da949653482cbf8430..7d4d4bba176591a97e2d166c6e291e4588dbe99e 100644 (file)
@@ -4,7 +4,7 @@
 
 source 'https://rubygems.org'
 
-gem 'rails', '~> 4.0'
+gem 'rails', '~> 4.2'
 gem 'responders', '~> 2.0'
 gem 'protected_attributes'
 
@@ -21,8 +21,14 @@ group :test, :development do
   gem 'mocha', require: false
 end
 
+# We'll need to update related code prior to Rails 5.
+# See: https://github.com/rails/activerecord-deprecated_finders
+gem 'activerecord-deprecated_finders', require: 'active_record/deprecated_finders'
+
 # pg is the only supported database driver.
-gem 'pg'
+# Note: Rails 4.2 is not compatible with pg 1.0
+#       (See: https://github.com/rails/rails/pull/31671)
+gem 'pg', '~> 0.18'
 
 gem 'multi_json'
 gem 'oj'
index b2de3f51f2851efe374613292ef99f827032a5b3..30cd8a559fb5c0d58177f59907edf7e0c716ac6f 100644 (file)
@@ -8,57 +8,57 @@ GIT
 GEM
   remote: https://rubygems.org/
   specs:
-    actionmailer (4.2.5.2)
-      actionpack (= 4.2.5.2)
-      actionview (= 4.2.5.2)
-      activejob (= 4.2.5.2)
+    actionmailer (4.2.10)
+      actionpack (= 4.2.10)
+      actionview (= 4.2.10)
+      activejob (= 4.2.10)
       mail (~> 2.5, >= 2.5.4)
       rails-dom-testing (~> 1.0, >= 1.0.5)
-    actionpack (4.2.5.2)
-      actionview (= 4.2.5.2)
-      activesupport (= 4.2.5.2)
+    actionpack (4.2.10)
+      actionview (= 4.2.10)
+      activesupport (= 4.2.10)
       rack (~> 1.6)
       rack-test (~> 0.6.2)
       rails-dom-testing (~> 1.0, >= 1.0.5)
       rails-html-sanitizer (~> 1.0, >= 1.0.2)
-    actionview (4.2.5.2)
-      activesupport (= 4.2.5.2)
+    actionview (4.2.10)
+      activesupport (= 4.2.10)
       builder (~> 3.1)
       erubis (~> 2.7.0)
       rails-dom-testing (~> 1.0, >= 1.0.5)
-      rails-html-sanitizer (~> 1.0, >= 1.0.2)
-    activejob (4.2.5.2)
-      activesupport (= 4.2.5.2)
+      rails-html-sanitizer (~> 1.0, >= 1.0.3)
+    activejob (4.2.10)
+      activesupport (= 4.2.10)
       globalid (>= 0.3.0)
-    activemodel (4.2.5.2)
-      activesupport (= 4.2.5.2)
+    activemodel (4.2.10)
+      activesupport (= 4.2.10)
       builder (~> 3.1)
-    activerecord (4.2.5.2)
-      activemodel (= 4.2.5.2)
-      activesupport (= 4.2.5.2)
+    activerecord (4.2.10)
+      activemodel (= 4.2.10)
+      activesupport (= 4.2.10)
       arel (~> 6.0)
-    activesupport (4.2.5.2)
+    activerecord-deprecated_finders (1.0.4)
+    activesupport (4.2.10)
       i18n (~> 0.7)
-      json (~> 1.7, >= 1.7.7)
       minitest (~> 5.1)
       thread_safe (~> 0.3, >= 0.3.4)
       tzinfo (~> 1.1)
-    acts_as_api (1.0.0)
+    acts_as_api (1.0.1)
       activemodel (>= 3.0.0)
       activesupport (>= 3.0.0)
       rack (>= 1.1.0)
-    addressable (2.5.1)
-      public_suffix (~> 2.0, >= 2.0.2)
+    addressable (2.5.2)
+      public_suffix (>= 2.0.2, < 4.0)
     andand (1.3.3)
     arel (6.0.4)
-    arvados (0.1.20170629115132)
-      activesupport (>= 3, < 4.2.6)
+    arvados (0.1.20180302192246)
+      activesupport (>= 3)
       andand (~> 1.3, >= 1.3.3)
       google-api-client (>= 0.7, < 0.8.9)
       i18n (~> 0)
       json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
-    arvados-cli (0.1.20170817171636)
+    arvados-cli (0.1.20171211220040)
       activesupport (>= 3.2.13, < 5)
       andand (~> 1.3, >= 1.3.3)
       arvados (~> 0.1, >= 0.1.20150128223554)
@@ -78,33 +78,33 @@ GEM
       net-sftp (>= 2.0.0)
       net-ssh (>= 2.0.14)
       net-ssh-gateway (>= 1.1.0)
-    coffee-rails (4.2.1)
+    coffee-rails (4.2.2)
       coffee-script (>= 2.2.0)
-      railties (>= 4.0.0, < 5.2.x)
+      railties (>= 4.0.0)
     coffee-script (2.4.1)
       coffee-script-source
       execjs
     coffee-script-source (1.12.2)
     concurrent-ruby (1.0.5)
-    crass (1.0.2)
-    curb (0.9.3)
-    database_cleaner (1.5.3)
+    crass (1.0.3)
+    curb (0.9.4)
+    database_cleaner (1.6.2)
     erubis (2.7.0)
-    eventmachine (1.2.3)
+    eventmachine (1.2.5)
     execjs (2.7.0)
     extlib (0.9.16)
-    factory_girl (4.8.0)
+    factory_girl (4.9.0)
       activesupport (>= 3.0.0)
-    factory_girl_rails (4.8.0)
-      factory_girl (~> 4.8.0)
+    factory_girl_rails (4.9.0)
+      factory_girl (~> 4.9.0)
       railties (>= 3.0.0)
-    faraday (0.11.0)
+    faraday (0.12.2)
       multipart-post (>= 1.2, < 3)
     faye-websocket (0.10.7)
       eventmachine (>= 0.12.0)
       websocket-driver (>= 0.5.1)
-    globalid (0.3.7)
-      activesupport (>= 4.1.0)
+    globalid (0.4.1)
+      activesupport (>= 4.2.0)
     google-api-client (0.8.7)
       activesupport (>= 3.2, < 5.0)
       addressable (~> 2.3)
@@ -116,25 +116,25 @@ GEM
       multi_json (~> 1.10)
       retriable (~> 1.4)
       signet (~> 0.6)
-    googleauth (0.5.1)
-      faraday (~> 0.9)
-      jwt (~> 1.4)
+    googleauth (0.6.2)
+      faraday (~> 0.12)
+      jwt (>= 1.4, < 3.0)
       logging (~> 2.0)
       memoist (~> 0.12)
       multi_json (~> 1.11)
       os (~> 0.9)
       signet (~> 0.7)
-    hashie (3.5.5)
-    highline (1.7.8)
+    hashie (3.5.7)
+    highline (1.7.10)
     hike (1.2.3)
     httpclient (2.8.3)
-    i18n (0.9.0)
+    i18n (0.9.5)
       concurrent-ruby (~> 1.0)
-    jquery-rails (4.2.2)
+    jquery-rails (4.3.1)
       rails-dom-testing (>= 1, < 3)
       railties (>= 4.2.0)
       thor (>= 0.14, < 2.0)
-    json (1.8.6)
+    json (2.1.0)
     jwt (1.5.6)
     launchy (2.4.3)
       addressable (~> 2.3)
@@ -143,97 +143,97 @@ GEM
     logging (2.2.2)
       little-plugger (~> 1.1)
       multi_json (~> 1.10)
-    lograge (0.7.1)
-      actionpack (>= 4, < 5.2)
-      activesupport (>= 4, < 5.2)
-      railties (>= 4, < 5.2)
+    lograge (0.9.0)
+      actionpack (>= 4)
+      activesupport (>= 4)
+      railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    loofah (2.1.1)
+    loofah (2.2.0)
       crass (~> 1.0.2)
       nokogiri (>= 1.5.9)
-    mail (2.6.4)
-      mime-types (>= 1.16, < 4)
+    mail (2.7.0)
+      mini_mime (>= 0.1.1)
     memoist (0.16.0)
     metaclass (0.0.4)
-    mime-types (3.1)
-      mime-types-data (~> 3.2015)
-    mime-types-data (3.2016.0521)
+    mini_mime (1.0.0)
     mini_portile2 (2.3.0)
-    minitest (5.10.3)
-    mocha (1.2.1)
+    minitest (5.11.3)
+    mocha (1.3.0)
       metaclass (~> 0.0.1)
-    multi_json (1.12.1)
+    multi_json (1.13.1)
     multi_xml (0.6.0)
     multipart-post (2.0.0)
     net-scp (1.2.1)
       net-ssh (>= 2.6.5)
     net-sftp (2.1.2)
       net-ssh (>= 2.6.5)
-    net-ssh (4.1.0)
+    net-ssh (4.2.0)
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
-    nokogiri (1.8.1)
+    nokogiri (1.8.2)
       mini_portile2 (~> 2.3.0)
-    oauth2 (1.3.1)
-      faraday (>= 0.8, < 0.12)
+    oauth2 (1.4.0)
+      faraday (>= 0.8, < 0.13)
       jwt (~> 1.0)
       multi_json (~> 1.3)
       multi_xml (~> 0.5)
       rack (>= 1.2, < 3)
     oj (2.18.5)
     oj_mimic_json (1.0.1)
-    omniauth (1.4.2)
+    omniauth (1.4.3)
       hashie (>= 1.2, < 4)
-      rack (>= 1.0, < 3)
-    omniauth-oauth2 (1.4.0)
-      oauth2 (~> 1.0)
+      rack (>= 1.6.2, < 3)
+    omniauth-oauth2 (1.5.0)
+      oauth2 (~> 1.1)
       omniauth (~> 1.2)
     os (0.9.6)
-    passenger (5.1.2)
+    passenger (5.2.1)
       rack
       rake (>= 0.8.1)
-    pg (0.20.0)
-    power_assert (1.0.1)
-    protected_attributes (1.1.3)
+    pg (0.21.0)
+    power_assert (1.1.1)
+    protected_attributes (1.1.4)
       activemodel (>= 4.0.1, < 5.0)
-    public_suffix (2.0.5)
-    rack (1.6.8)
+    public_suffix (3.0.2)
+    rack (1.6.9)
     rack-test (0.6.3)
       rack (>= 1.0)
-    rails (4.2.5.2)
-      actionmailer (= 4.2.5.2)
-      actionpack (= 4.2.5.2)
-      actionview (= 4.2.5.2)
-      activejob (= 4.2.5.2)
-      activemodel (= 4.2.5.2)
-      activerecord (= 4.2.5.2)
-      activesupport (= 4.2.5.2)
+    rails (4.2.10)
+      actionmailer (= 4.2.10)
+      actionpack (= 4.2.10)
+      actionview (= 4.2.10)
+      activejob (= 4.2.10)
+      activemodel (= 4.2.10)
+      activerecord (= 4.2.10)
+      activesupport (= 4.2.10)
       bundler (>= 1.3.0, < 2.0)
-      railties (= 4.2.5.2)
+      railties (= 4.2.10)
       sprockets-rails
     rails-deprecated_sanitizer (1.0.3)
       activesupport (>= 4.2.0.alpha)
-    rails-dom-testing (1.0.8)
-      activesupport (>= 4.2.0.beta, < 5.0)
+    rails-dom-testing (1.0.9)
+      activesupport (>= 4.2.0, < 5.0)
       nokogiri (~> 1.6)
       rails-deprecated_sanitizer (>= 1.0.1)
     rails-html-sanitizer (1.0.3)
       loofah (~> 2.0)
-    rails-observers (0.1.2)
-      activemodel (~> 4.0)
-    railties (4.2.5.2)
-      actionpack (= 4.2.5.2)
-      activesupport (= 4.2.5.2)
+    rails-observers (0.1.5)
+      activemodel (>= 4.0)
+    railties (4.2.10)
+      actionpack (= 4.2.10)
+      activesupport (= 4.2.10)
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.2.1)
+    rake (12.3.0)
     ref (2.0.0)
-    request_store (1.3.2)
-    responders (2.3.0)
-      railties (>= 4.2.0, < 5.1)
+    request_store (1.4.0)
+      rack (>= 1.4)
+    responders (2.4.0)
+      actionpack (>= 4.2.0, < 5.3)
+      railties (>= 4.2.0, < 5.3)
     retriable (1.4.1)
-    ruby-prof (0.16.2)
+    ruby-prof (0.17.0)
     rvm-capistrano (1.5.6)
       capistrano (~> 2.15.4)
     safe_yaml (1.0.4)
@@ -243,10 +243,10 @@ GEM
       sass (~> 3.2.2)
       sprockets (~> 2.8, < 3.0)
       sprockets-rails (~> 2.0)
-    signet (0.7.3)
+    signet (0.8.1)
       addressable (~> 2.3)
       faraday (~> 0.9)
-      jwt (~> 1.5)
+      jwt (>= 1.5, < 3.0)
       multi_json (~> 1.10)
     simplecov (0.7.1)
       multi_json (~> 1.0)
@@ -264,7 +264,7 @@ GEM
       activesupport (>= 3.0)
       sprockets (>= 2.8, < 4.0)
     sshkey (1.9.0)
-    test-unit (3.2.3)
+    test-unit (3.2.7)
       power_assert
     test_after_commit (1.1.0)
       activerecord (>= 3.2)
@@ -275,19 +275,20 @@ GEM
     thread_safe (0.3.6)
     tilt (1.4.1)
     trollop (2.1.2)
-    tzinfo (1.2.4)
+    tzinfo (1.2.5)
       thread_safe (~> 0.1)
     uglifier (2.7.2)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
-    websocket-driver (0.6.5)
+    websocket-driver (0.7.0)
       websocket-extensions (>= 0.1.0)
-    websocket-extensions (0.1.2)
+    websocket-extensions (0.1.3)
 
 PLATFORMS
   ruby
 
 DEPENDENCIES
+  activerecord-deprecated_finders
   acts_as_api
   andand
   arvados (>= 0.1.20150615153458)
@@ -307,9 +308,9 @@ DEPENDENCIES
   omniauth (~> 1.4.0)
   omniauth-oauth2 (~> 1.1)
   passenger
-  pg
+  pg (~> 0.18)
   protected_attributes
-  rails (~> 4.0)
+  rails (~> 4.2)
   rails-observers
   responders (~> 2.0)
   ruby-prof
@@ -327,4 +328,4 @@ DEPENDENCIES
   uglifier (~> 2.0)
 
 BUNDLED WITH
-   1.16.0
+   1.16.1
index c4f64f6039b3683127d2b5735ae11064446d10cb..ba7c07d27266f26ef3bcdbcfc01940d406cec9b1 100644 (file)
@@ -3,6 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'safe_json'
+require 'request_error'
 
 module ApiTemplateOverride
   def allowed_to_render?(fieldset, field, model, options)
@@ -137,7 +138,7 @@ class ApplicationController < ActionController::Base
 
   def render_error(e)
     logger.error e.inspect
-    if e.respond_to? :backtrace and e.backtrace
+    if !e.is_a? RequestError and (e.respond_to? :backtrace and e.backtrace)
       logger.error e.backtrace.collect { |x| x + "\n" }.join('')
     end
     if (@object.respond_to? :errors and
index c5da06e36758ecccf001f6d0050b16fef5fa5166..47ea16e34200a3a382e9059812495288709c5e83 100644 (file)
@@ -9,4 +9,5 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
   accept_attribute_as_json :command, Array
   accept_attribute_as_json :filters, Array
   accept_attribute_as_json :scheduling_parameters, Hash
+  accept_attribute_as_json :secret_mounts, Hash
 end
index 8c63ea7f5b756f39f88cccba6812f26aa9290490..6ec92b0ba66f9a59bc978844563a68d84d20f417 100644 (file)
@@ -60,4 +60,14 @@ class Arvados::V1::ContainersController < ApplicationController
       end
     end
   end
+
+  def secret_mounts
+    if @object &&
+       @object.auth_uuid &&
+       @object.auth_uuid == Thread.current[:api_client_authorization].uuid
+      send_json({"secret_mounts" => @object.secret_mounts})
+    else
+      send_error("Token is not associated with this container.", status: 403)
+    end
+  end
 end
index 91685f59988bf1a852a7a25a2aa46f47a3f32300..adac9960c41a06fff4da68da67e87a0ebf6facd6 100644 (file)
@@ -123,7 +123,7 @@ class Arvados::V1::SchemaController < ApplicationController
         end
         object_properties = {}
         k.columns.
-          select { |col| col.name != 'id' }.
+          select { |col| col.name != 'id' && !col.name.start_with?('secret_') }.
           collect do |col|
           if k.serialized_attributes.has_key? col.name
             object_properties[col.name] = {
index 05deba7bc153b50f5af256dd7a3cc97f1e942454..6a59d3bbaa976b1e554b214159b7a8b4d73044b9 100644 (file)
@@ -5,6 +5,7 @@
 require 'has_uuid'
 require 'record_filters'
 require 'serializers'
+require 'request_error'
 
 class ArvadosModel < ActiveRecord::Base
   self.abstract_class = true
@@ -38,37 +39,37 @@ class ArvadosModel < ActiveRecord::Base
            class_name: 'Link',
            primary_key: :uuid)
 
-  class PermissionDeniedError < StandardError
+  class PermissionDeniedError < RequestError
     def http_status
       403
     end
   end
 
-  class AlreadyLockedError < StandardError
+  class AlreadyLockedError < RequestError
     def http_status
       422
     end
   end
 
-  class LockFailedError < StandardError
+  class LockFailedError < RequestError
     def http_status
       422
     end
   end
 
-  class InvalidStateTransitionError < StandardError
+  class InvalidStateTransitionError < RequestError
     def http_status
       422
     end
   end
 
-  class UnauthorizedError < StandardError
+  class UnauthorizedError < RequestError
     def http_status
       401
     end
   end
 
-  class UnresolvableContainerError < StandardError
+  class UnresolvableContainerError < RequestError
     def http_status
       422
     end
@@ -256,56 +257,66 @@ class ArvadosModel < ActiveRecord::Base
     sql_table = kwargs.fetch(:table_name, table_name)
     include_trash = kwargs.fetch(:include_trash, false)
 
-    sql_conds = []
+    sql_conds = nil
     user_uuids = users_list.map { |u| u.uuid }
 
-    exclude_trashed_records = if !include_trash and (sql_table == "groups" or sql_table == "collections") then
-                                # Only include records that are not explicitly trashed
-                                "AND #{sql_table}.is_trashed = false"
-                              else
-                                ""
-                              end
+    exclude_trashed_records = ""
+    if !include_trash and (sql_table == "groups" or sql_table == "collections") then
+      # Only include records that are not explicitly trashed
+      exclude_trashed_records = "AND #{sql_table}.is_trashed = false"
+    end
 
     if users_list.select { |u| u.is_admin }.any?
+      # Admin skips most permission checks, but still want to filter on trashed items.
       if !include_trash
         if sql_table != "api_client_authorizations"
-          # Exclude rows where the owner is trashed
-          sql_conds.push "NOT EXISTS(SELECT 1 "+
-                  "FROM #{PERMISSION_VIEW} "+
-                  "WHERE trashed = 1 AND "+
-                  "(#{sql_table}.owner_uuid = target_uuid)) "+
-                  exclude_trashed_records
+          # Only include records where the owner is not trashed
+          sql_conds = "NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+                      "WHERE trashed = 1 AND "+
+                      "(#{sql_table}.owner_uuid = target_uuid)) #{exclude_trashed_records}"
         end
       end
     else
-      trashed_check = if !include_trash then
-                        "AND trashed = 0"
-                      else
-                        ""
-                      end
-
-      owner_check = if sql_table != "api_client_authorizations" and sql_table != "groups" then
-                      "OR (target_uuid = #{sql_table}.owner_uuid AND target_owner_uuid IS NOT NULL)"
-                    else
-                      ""
-                    end
-
-      sql_conds.push "EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
-                     "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND (target_uuid = #{sql_table}.uuid #{owner_check})) "+
-                     exclude_trashed_records
+      trashed_check = ""
+      if !include_trash then
+        trashed_check = "AND trashed = 0"
+      end
+
+      # Note: it is possible to combine the direct_check and
+      # owner_check into a single EXISTS() clause, however it turns
+      # out query optimizer doesn't like it and forces a sequential
+      # table scan.  Constructing the query with separate EXISTS()
+      # clauses enables it to use the index.
+      #
+      # see issue 13208 for details.
+
+      # Match a direct read permission link from the user to the record uuid
+      direct_check = "EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+                     "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND target_uuid = #{sql_table}.uuid)"
+
+      # Match a read permission link from the user to the record's owner_uuid
+      owner_check = ""
+      if sql_table != "api_client_authorizations" and sql_table != "groups" then
+        owner_check = "OR EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+          "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND target_uuid = #{sql_table}.owner_uuid AND target_owner_uuid IS NOT NULL) "
+      end
 
+      links_cond = ""
       if sql_table == "links"
         # Match any permission link that gives one of the authorized
         # users some permission _or_ gives anyone else permission to
         # view one of the authorized users.
-        sql_conds.push "(#{sql_table}.link_class IN (:permission_link_classes) AND "+
+        links_cond = "OR (#{sql_table}.link_class IN (:permission_link_classes) AND "+
                        "(#{sql_table}.head_uuid IN (:user_uuids) OR #{sql_table}.tail_uuid IN (:user_uuids)))"
       end
+
+      sql_conds = "(#{direct_check} #{owner_check} #{links_cond}) #{exclude_trashed_records}"
+
     end
 
-    self.where(sql_conds.join(' OR '),
-                    user_uuids: user_uuids,
-                    permission_link_classes: ['permission', 'resources'])
+    self.where(sql_conds,
+               user_uuids: user_uuids,
+               permission_link_classes: ['permission', 'resources'])
   end
 
   def save_with_unique_name!
index 3cf106328a31c2244e192e94814cd4d9874f86bd..55a257856c989faa18bc70dfe075cc7d534b90e1 100644 (file)
@@ -2,6 +2,8 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+require 'request_error'
+
 class Blob
   extend DbCurrentTime
 
@@ -25,8 +27,8 @@ class Blob
   #     locator_hash +A blob_signature @ timestamp
   # where the timestamp is a Unix time expressed as a hexadecimal value,
   # and the blob_signature is the signed locator_hash + API token + timestamp.
-  # 
-  class InvalidSignatureError < StandardError
+  #
+  class InvalidSignatureError < RequestError
   end
 
   # Blob.sign_locator: return a signed and timestamped blob locator.
index 19254ce8846a326d747d49928d4b7a21baaa9011..921c690cd00f78f6fc2b46bbace23fff89992db8 100644 (file)
@@ -2,10 +2,12 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+require 'request_error'
+
 class Commit < ActiveRecord::Base
   extend CurrentApiClient
 
-  class GitError < StandardError
+  class GitError < RequestError
     def http_status
       422
     end
index b013776b98d3690db6cd5921bc8a3c11e6ce4ad4..3765266405d86f41a1a3a10372b79d1c17aea14f 100644 (file)
@@ -20,17 +20,20 @@ class Container < ArvadosModel
   serialize :runtime_constraints, Hash
   serialize :command, Array
   serialize :scheduling_parameters, Hash
+  serialize :secret_mounts, Hash
 
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :set_timestamps
-  validates :command, :container_image, :output_path, :cwd, :priority, :presence => true
-  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+  validates :command, :container_image, :output_path, :cwd, :priority, { presence: true }
+  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
   validate :validate_state_change
   validate :validate_change
   validate :validate_lock
   validate :validate_output
   after_validation :assign_auth
   before_save :sort_serialized_attrs
+  before_save :update_secret_mounts_md5
+  before_save :scrub_secret_mounts
   after_save :handle_completed
   after_save :propagate_priority
 
@@ -79,33 +82,56 @@ class Container < ArvadosModel
     ["mounts"]
   end
 
+  def self.full_text_searchable_columns
+    super - ["secret_mounts", "secret_mounts_md5"]
+  end
+
+  def self.searchable_columns *args
+    super - ["secret_mounts_md5"]
+  end
+
+  def logged_attributes
+    super.except('secret_mounts')
+  end
+
   def state_transitions
     State_transitions
   end
 
+  # Container priority is the highest "computed priority" of any
+  # matching request. The computed priority of a container-submitted
+  # request is the priority of the submitting container. The computed
+  # priority of a user-submitted request is a function of
+  # user-assigned priority and request creation time.
   def update_priority!
-    if [Queued, Locked, Running].include? self.state
-      # Update the priority of this container to the maximum priority of any of
-      # its committed container requests and save the record.
-      self.priority = ContainerRequest.
-        where(container_uuid: uuid,
-              state: ContainerRequest::Committed).
-        maximum('priority') || 0
-      self.save!
-    end
+    return if ![Queued, Locked, Running].include?(state)
+    p = ContainerRequest.
+        where('container_uuid=? and priority>0', uuid).
+        includes(:requesting_container).
+        lock(true).
+        map do |cr|
+      if cr.requesting_container
+        cr.requesting_container.priority
+      else
+        (cr.priority << 50) - (cr.created_at.to_time.to_f * 1000).to_i
+      end
+    end.max || 0
+    update_attributes!(priority: p)
   end
 
   def propagate_priority
-    if self.priority_changed?
-      act_as_system_user do
-         # Update the priority of child container requests to match new priority
-         # of the parent container.
-         ContainerRequest.where(requesting_container_uuid: self.uuid,
-                                state: ContainerRequest::Committed).each do |cr|
-           cr.priority = self.priority
-           cr.save
-         end
-       end
+    return true unless priority_changed?
+    act_as_system_user do
+      # Update the priority of child container requests to match new
+      # priority of the parent container (ignoring requests with no
+      # container assigned, because their priority doesn't matter).
+      ContainerRequest.
+        where(requesting_container_uuid: self.uuid,
+              state: ContainerRequest::Committed).
+        where('container_uuid is not null').
+        includes(:container).
+        map(&:container).
+        map(&:update_priority!)
     end
   end
 
@@ -121,6 +147,7 @@ class Container < ArvadosModel
       mounts: resolve_mounts(req.mounts),
       runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
       scheduling_parameters: req.scheduling_parameters,
+      secret_mounts: req.secret_mounts,
     }
     act_as_system_user do
       if req.use_existing && (reusable = find_reusable(c_attrs))
@@ -217,6 +244,9 @@ class Container < ArvadosModel
     candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]))
     log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
 
+    candidates = candidates.where('secret_mounts_md5 = ?', Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts]))))
+    log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+
     candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]))
     log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
 
@@ -378,7 +408,8 @@ class Container < ArvadosModel
     if self.new_record?
       permitted.push(:owner_uuid, :command, :container_image, :cwd,
                      :environment, :mounts, :output_path, :priority,
-                     :runtime_constraints, :scheduling_parameters)
+                     :runtime_constraints, :scheduling_parameters,
+                     :secret_mounts)
     end
 
     case self.state
@@ -487,6 +518,22 @@ class Container < ArvadosModel
     end
   end
 
+  def update_secret_mounts_md5
+    if self.secret_mounts_changed?
+      self.secret_mounts_md5 = Digest::MD5.hexdigest(
+        SafeJSON.dump(self.class.deep_sort_hash(self.secret_mounts)))
+    end
+  end
+
+  def scrub_secret_mounts
+    # this runs after update_secret_mounts_md5, so the
+    # secret_mounts_md5 will still reflect the secrets that are being
+    # scrubbed here.
+    if self.state_changed? && self.final?
+      self.secret_mounts = {}
+    end
+  end
+
   def handle_completed
     # This container is finished so finalize any associated container requests
     # that are associated with this container.
index bcca40700bd9efbaf57c74332a94f3325763299a..3587cf046a3adb1028ac97f5717b229f1f4cdded 100644 (file)
@@ -11,6 +11,11 @@ class ContainerRequest < ArvadosModel
   include WhitelistUpdate
 
   belongs_to :container, foreign_key: :container_uuid, primary_key: :uuid
+  belongs_to :requesting_container, {
+               class_name: 'Container',
+               foreign_key: :requesting_container_uuid,
+               primary_key: :uuid,
+             }
 
   serialize :properties, Hash
   serialize :environment, Hash
@@ -18,6 +23,7 @@ class ContainerRequest < ArvadosModel
   serialize :runtime_constraints, Hash
   serialize :command, Array
   serialize :scheduling_parameters, Hash
+  serialize :secret_mounts, Hash
 
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :validate_runtime_constraints
@@ -28,6 +34,8 @@ class ContainerRequest < ArvadosModel
   validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
   validate :validate_state_change
   validate :check_update_whitelist
+  validate :secret_mounts_key_conflict
+  before_save :scrub_secret_mounts
   after_save :update_priority
   after_save :finalize_if_needed
   before_create :set_requesting_container_uuid
@@ -79,12 +87,16 @@ class ContainerRequest < ArvadosModel
   :container_image, :cwd, :environment, :filters, :mounts,
   :output_path, :priority, :properties, :requesting_container_uuid,
   :runtime_constraints, :state, :container_uuid, :use_existing,
-  :scheduling_parameters, :output_name, :output_ttl]
+  :scheduling_parameters, :secret_mounts, :output_name, :output_ttl]
 
   def self.limit_index_columns_read
     ["mounts"]
   end
 
+  def logged_attributes
+    super.except('secret_mounts')
+  end
+
   def state_transitions
     State_transitions
   end
@@ -145,7 +157,7 @@ class ContainerRequest < ArvadosModel
   end
 
   def self.full_text_searchable_columns
-    super - ["mounts"]
+    super - ["mounts", "secret_mounts", "secret_mounts_md5"]
   end
 
   protected
@@ -216,7 +228,7 @@ class ContainerRequest < ArvadosModel
 
     if self.new_record? || self.state_was == Uncommitted
       # Allow create-and-commit in a single operation.
-      permitted.push *AttrsPermittedBeforeCommit
+      permitted.push(*AttrsPermittedBeforeCommit)
     end
 
     case self.state
@@ -253,19 +265,31 @@ class ContainerRequest < ArvadosModel
     super(permitted)
   end
 
-  def update_priority
-    if self.state_changed? or
-        self.priority_changed? or
-        self.container_uuid_changed?
-      act_as_system_user do
-        Container.
-          where('uuid in (?)',
-                [self.container_uuid_was, self.container_uuid].compact).
-          map(&:update_priority!)
+  def secret_mounts_key_conflict
+    secret_mounts.each do |k, v|
+      if mounts.has_key?(k)
+        errors.add(:secret_mounts, 'conflict with non-secret mounts')
+        return false
       end
     end
   end
 
+  def scrub_secret_mounts
+    if self.state == Final
+      self.secret_mounts = {}
+    end
+  end
+
+  def update_priority
+    return unless state_changed? || priority_changed? || container_uuid_changed?
+    act_as_system_user do
+      Container.
+        where('uuid in (?)', [self.container_uuid_was, self.container_uuid].compact).
+        lock(true).
+        map(&:update_priority!)
+    end
+  end
+
   def set_priority_zero
     self.update_attributes!(priority: 0) if self.state != Final
   end
@@ -277,7 +301,7 @@ class ContainerRequest < ArvadosModel
     container = Container.where('auth_uuid=?', token_uuid).order('created_at desc').first
     if container
       self.requesting_container_uuid = container.uuid
-      self.priority = container.priority
+      self.priority = container.priority > 0 ? 1 : 0
     end
     true
   end
index 8de3897eab0125bd712d2858e4a3d078e9ac1849..7508ead5d5c31bd397d2b0958eac72f4712d2062 100644 (file)
@@ -33,7 +33,7 @@ class Job < ArvadosModel
   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
 
-  class SubmitIdReused < StandardError
+  class SubmitIdReused < RequestError
   end
 
   api_accessible :user, extend: :common do |t|
index 564f31ad8be11a90461e23f6d3ef28cf68bd6f62..db9b2255c2e92cb2d5b346d12f35fbb9a43bb95a 100644 (file)
@@ -15,6 +15,18 @@ Server::Application.configure do
     }
     exceptions = %w(controller action format id)
     params = event.payload[:params].except(*exceptions)
+
+    # Omit secret_mounts field if supplied in create/update request
+    # body.
+    [
+      ['container', 'secret_mounts'],
+      ['container_request', 'secret_mounts'],
+    ].each do |resource, field|
+      if params[resource].is_a? Hash
+        params[resource] = params[resource].except(field)
+      end
+    end
+
     params_s = SafeJSON.dump(params)
     if params_s.length > Rails.configuration.max_request_log_params_size
       payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
index fcd5c34212d16187207e64abc8372e43ecd09af8..ad2406ae45a7be049c8122920204ab90542b9f67 100644 (file)
@@ -39,6 +39,7 @@ Server::Application.routes.draw do
         get 'auth', on: :member
         post 'lock', on: :member
         post 'unlock', on: :member
+        get 'secret_mounts', on: :member
         get 'current', on: :collection
       end
       resources :container_requests
diff --git a/services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb b/services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb
new file mode 100644 (file)
index 0000000..c56b7dc
--- /dev/null
@@ -0,0 +1,8 @@
+class AddSecretMountsToContainers < ActiveRecord::Migration
+  def change
+    add_column :container_requests, :secret_mounts, :jsonb, default: {}
+    add_column :containers, :secret_mounts, :jsonb, default: {}
+    add_column :containers, :secret_mounts_md5, :string, default: "99914b932bd37a50b983c5e7c90ae93b"
+    add_index :containers, :secret_mounts_md5
+  end
+end
diff --git a/services/api/db/migrate/20180313180114_change_container_priority_bigint.rb b/services/api/db/migrate/20180313180114_change_container_priority_bigint.rb
new file mode 100644 (file)
index 0000000..d577cbb
--- /dev/null
@@ -0,0 +1,5 @@
+class ChangeContainerPriorityBigint < ActiveRecord::Migration
+  def change
+    change_column :containers, :priority, :integer, limit: 8
+  end
+end
index 357e95c564f885ff6da725091b0711dac0cbaffe..27511145e9002f6abf8370d38872af99cf18922c 100644 (file)
@@ -299,7 +299,8 @@ CREATE TABLE container_requests (
     output_uuid character varying(255),
     log_uuid character varying(255),
     output_name character varying(255) DEFAULT NULL::character varying,
-    output_ttl integer DEFAULT 0 NOT NULL
+    output_ttl integer DEFAULT 0 NOT NULL,
+    secret_mounts jsonb DEFAULT '{}'::jsonb
 );
 
 
@@ -347,12 +348,14 @@ CREATE TABLE containers (
     output character varying(255),
     container_image character varying(255),
     progress double precision,
-    priority integer,
+    priority bigint,
     updated_at timestamp without time zone NOT NULL,
     exit_code integer,
     auth_uuid character varying(255),
     locked_by_uuid character varying(255),
-    scheduling_parameters text
+    scheduling_parameters text,
+    secret_mounts jsonb DEFAULT '{}'::jsonb,
+    secret_mounts_md5 character varying DEFAULT '99914b932bd37a50b983c5e7c90ae93b'::character varying
 );
 
 
@@ -1888,6 +1891,13 @@ CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (mod
 CREATE INDEX index_containers_on_owner_uuid ON containers USING btree (owner_uuid);
 
 
+--
+-- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_secret_mounts_md5 ON containers USING btree (secret_mounts_md5);
+
+
 --
 -- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -
 --
@@ -3054,3 +3064,7 @@ INSERT INTO schema_migrations (version) VALUES ('20171212153352');
 
 INSERT INTO schema_migrations (version) VALUES ('20180216203422');
 
+INSERT INTO schema_migrations (version) VALUES ('20180228220311');
+
+INSERT INTO schema_migrations (version) VALUES ('20180313180114');
+
diff --git a/services/api/lib/request_error.rb b/services/api/lib/request_error.rb
new file mode 100644 (file)
index 0000000..cd9f9f8
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RequestError < StandardError
+end
index 39957916d3079be18da8c205b1d58bb928b30b37..3f5d3a1e9d4de95640fad9938989b8f86305e608 100644 (file)
@@ -39,6 +39,11 @@ running:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts:
+    /secret/6x9:
+      kind: text
+      content: "42\n"
+  secret_mounts_md5: <%= Digest::MD5.hexdigest(SafeJSON.dump({'/secret/6x9' => {'content' => "42\n", 'kind' => 'text'}})) %>
   auth_uuid: zzzzz-gj3su-077z32aux8dg2s2
 
 running_older:
@@ -56,6 +61,8 @@ running_older:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 locked:
   uuid: zzzzz-dz642-lockedcontainer
@@ -72,6 +79,8 @@ locked:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 completed:
   uuid: zzzzz-dz642-compltcontainer
@@ -92,6 +101,8 @@ completed:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 completed_older:
   uuid: zzzzz-dz642-compltcontainr2
@@ -111,6 +122,8 @@ completed_older:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 requester:
   uuid: zzzzz-dz642-requestingcntnr
@@ -128,6 +141,8 @@ requester:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 requester_container:
   uuid: zzzzz-dz642-requestercntnr1
@@ -145,6 +160,8 @@ requester_container:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 failed_container:
   uuid: zzzzz-dz642-failedcontainr1
@@ -162,6 +179,8 @@ failed_container:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 ancient_container_with_logs:
   uuid: zzzzz-dz642-logscontainer01
@@ -181,6 +200,8 @@ ancient_container_with_logs:
   finished_at: <%= 2.year.ago.to_s(:db) %>
   log: ea10d51bcf88862dbcc36eb292017dfd+45
   output: test
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 previous_container_with_logs:
   uuid: zzzzz-dz642-logscontainer02
@@ -200,6 +221,8 @@ previous_container_with_logs:
   finished_at: <%= 1.month.ago.to_s(:db) %>
   log: ea10d51bcf88862dbcc36eb292017dfd+45
   output: test
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 running_container_with_logs:
   uuid: zzzzz-dz642-logscontainer03
@@ -215,6 +238,8 @@ running_container_with_logs:
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
 
 running_to_be_deleted:
   uuid: zzzzz-dz642-runnincntrtodel
@@ -232,3 +257,5 @@ running_to_be_deleted:
     ram: 12000000000
     vcpus: 4
   auth_uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
index 4ebda977a14ce80153b3ff5b93303aed7bb1934a..b2cd366540d81c36fc08c3b662b9cc3ed763ebd2 100644 (file)
@@ -5,22 +5,80 @@
 require 'test_helper'
 
 class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
+  def minimal_cr
+    {
+      command: ['echo', 'hello'],
+      container_image: 'test',
+      output_path: 'test',
+    }
+  end
+
   test 'create with scheduling parameters' do
-    authorize_with :system_user
+    authorize_with :active
 
     sp = {'partitions' => ['test1', 'test2']}
     post :create, {
-      container_request: {
-        command: ['echo', 'hello'],
-        container_image: 'test',
-        output_path: 'test',
-        scheduling_parameters: sp,
-      },
-    }
+           container_request: minimal_cr.merge(scheduling_parameters: sp.dup)
+         }
     assert_response :success
 
     cr = JSON.parse(@response.body)
     assert_not_nil cr, 'Expected container request'
     assert_equal sp, cr['scheduling_parameters']
   end
+
+  test "secret_mounts not in #create responses" do
+    authorize_with :active
+
+    post :create, {
+           container_request: minimal_cr.merge(
+             secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}}),
+         }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req = ContainerRequest.where(uuid: resp['uuid']).first
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
+
+  test "update with secret_mounts" do
+    authorize_with :active
+    req = container_requests(:uncommitted)
+
+    patch :update, {
+            id: req.uuid,
+            container_request: {
+              secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}},
+            },
+          }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req.reload
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
+
+  test "update without deleting secret_mounts" do
+    authorize_with :active
+    req = container_requests(:uncommitted)
+    req.update_attributes!(secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}})
+
+    patch :update, {
+            id: req.uuid,
+            container_request: {
+              command: ['echo', 'test'],
+            },
+          }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req.reload
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
 end
index 5510e903a29af544f5bc46208fbbe38e356e4b70..8e2002c75919a68f27b64718e50279907339ce7d 100644 (file)
@@ -45,13 +45,14 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
     assert_equal 'arvados#apiClientAuthorization', json_response['kind']
   end
 
-  test 'no auth in container response' do
+  test 'no auth or secret_mounts in container response' do
     authorize_with :dispatch1
     c = containers(:queued)
     assert c.lock, show_errors(c)
     get :show, id: c.uuid
     assert_response :success
     assert_nil json_response['auth']
+    assert_nil json_response['secret_mounts']
   end
 
   test "lock container" do
@@ -106,7 +107,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
     [:running, :lock, 422, 'Running'],
     [:running, :unlock, 422, 'Running'],
   ].each do |fixture, action, response, state|
-    test "state transitions from #{fixture } to #{action}" do
+    test "state transitions from #{fixture} to #{action}" do
       authorize_with :dispatch1
       uuid = containers(fixture).uuid
       post action, {id: uuid}
@@ -133,4 +134,21 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
     assert_response 401
   end
 
+  [
+    [true, :running_container_auth],
+    [false, :dispatch2],
+    [false, :admin],
+    [false, :active],
+  ].each do |expect_success, auth|
+    test "get secret_mounts with #{auth} token" do
+      authorize_with auth
+      get :secret_mounts, {id: containers(:running).uuid}
+      if expect_success
+        assert_response :success
+        assert_equal "42\n", json_response["secret_mounts"]["/secret/6x9"]["content"]
+      else
+        assert_response 403
+      end
+    end
+  end
 end
diff --git a/services/api/test/helpers/container_test_helper.rb b/services/api/test/helpers/container_test_helper.rb
new file mode 100644 (file)
index 0000000..88de724
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ContainerTestHelper
+  def secret_string
+    'UNGU3554BL3'
+  end
+
+  def assert_no_secrets_logged
+    Log.all.map(&:properties).each do |props|
+      refute_match /secret\/6x9|#{secret_string}/, SafeJSON.dump(props)
+    end
+  end
+end
index 0edc0f45938cc07924150c365a77585ade65fc55..70ad11e0f47b32a78597b4f98dd42dc5a5750870 100644 (file)
@@ -3,11 +3,30 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'test_helper'
+require 'helpers/container_test_helper'
 require 'helpers/docker_migration_helper'
 
 class ContainerRequestTest < ActiveSupport::TestCase
   include DockerMigrationHelper
   include DbCurrentTime
+  include ContainerTestHelper
+
+  def with_container_auth(ctr)
+    auth_was = Thread.current[:api_client_authorization]
+    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(ctr.auth_uuid)
+    begin
+      yield
+    ensure
+      Thread.current[:api_client_authorization] = auth_was
+    end
+  end
+
+  def lock_and_run(ctr)
+      act_as_system_user do
+        ctr.update_attributes!(state: Container::Locked)
+        ctr.update_attributes!(state: Container::Running)
+      end
+  end
 
   def create_minimal_req! attrs={}
     defaults = {
@@ -141,7 +160,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
     assert_equal "/out", c.output_path
     assert_equal({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}, c.runtime_constraints)
-    assert_equal 1, c.priority
+    assert_operator 0, :<, c.priority
 
     assert_raises(ActiveRecord::RecordInvalid) do
       cr.priority = nil
@@ -157,50 +176,17 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal 0, c.priority
   end
 
-
-  test "Container request max priority" do
-    set_user_from_auth :active
-    cr = create_minimal_req!(priority: 5, state: "Committed")
-
-    c = Container.find_by_uuid cr.container_uuid
-    assert_equal 5, c.priority
-
-    cr2 = create_minimal_req!
-    cr2.priority = 10
-    cr2.state = "Committed"
-    cr2.container_uuid = cr.container_uuid
-    act_as_system_user do
-      cr2.save!
-    end
-
-    # cr and cr2 have priority 5 and 10, and are being satisfied by
-    # the same container c, so c's priority should be
-    # max(priority)=10.
-    c.reload
-    assert_equal 10, c.priority
-
-    cr2.update_attributes!(priority: 0)
-
-    c.reload
-    assert_equal 5, c.priority
-
-    cr.update_attributes!(priority: 0)
-
-    c.reload
-    assert_equal 0, c.priority
-  end
-
-
   test "Independent container requests" do
     set_user_from_auth :active
     cr1 = create_minimal_req!(command: ["foo", "1"], priority: 5, state: "Committed")
     cr2 = create_minimal_req!(command: ["foo", "2"], priority: 10, state: "Committed")
 
     c1 = Container.find_by_uuid cr1.container_uuid
-    assert_equal 5, c1.priority
+    assert_operator 0, :<, c1.priority
 
     c2 = Container.find_by_uuid cr2.container_uuid
-    assert_equal 10, c2.priority
+    assert_operator c1.priority, :<, c2.priority
+    c2priority_was = c2.priority
 
     cr1.update_attributes!(priority: 0)
 
@@ -208,7 +194,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal 0, c1.priority
 
     c2.reload
-    assert_equal 10, c2.priority
+    assert_equal c2priority_was, c2.priority
   end
 
   test "Request is finalized when its container is cancelled" do
@@ -270,14 +256,14 @@ class ContainerRequestTest < ActiveSupport::TestCase
     cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 1)
 
     c = Container.find_by_uuid cr.container_uuid
-    assert_equal 5, c.priority
+    assert_operator 0, :<, c.priority
 
     cr2 = create_minimal_req!
     cr2.update_attributes!(priority: 10, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"], container_count_max: 1)
     cr2.reload
 
     c2 = Container.find_by_uuid cr2.container_uuid
-    assert_equal 10, c2.priority
+    assert_operator 0, :<, c2.priority
 
     act_as_system_user do
       c.state = "Cancelled"
@@ -294,37 +280,94 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal 0, c2.priority
   end
 
+  test "child container priority follows same ordering as corresponding top-level ancestors" do
+    findctr = lambda { |cr| Container.find_by_uuid(cr.container_uuid) }
 
-  test "Container makes container request, then changes priority" do
     set_user_from_auth :active
-    cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 1)
-
-    c = Container.find_by_uuid cr.container_uuid
-    assert_equal 5, c.priority
-
-    cr2 = create_minimal_req!
-    cr2.update_attributes!(priority: 5, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"], container_count_max: 1)
-    cr2.reload
 
-    c2 = Container.find_by_uuid cr2.container_uuid
-    assert_equal 5, c2.priority
+    toplevel_crs = [
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "0"}),
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "1"}),
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "2"}),
+    ]
+    parents = toplevel_crs.map(&findctr)
+
+    children = parents.map do |parent|
+      lock_and_run(parent)
+      with_container_auth(parent) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"child" => parent.environment["workflow"]})
+      end
+    end.map(&findctr)
+
+    grandchildren = children.reverse.map do |child|
+      lock_and_run(child)
+      with_container_auth(child) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"grandchild" => child.environment["child"]})
+      end
+    end.reverse.map(&findctr)
 
-    act_as_system_user do
-      c.priority = 10
-      c.save!
-    end
+    shared_grandchildren = children.map do |child|
+      with_container_auth(child) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"grandchild" => "shared"})
+      end
+    end.map(&findctr)
 
-    cr.reload
+    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[1].uuid
+    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[2].uuid
+    shared_grandchild = shared_grandchildren[0]
 
-    cr2.reload
-    assert_equal 10, cr2.priority
+    set_user_from_auth :active
 
-    c2.reload
-    assert_equal 10, c2.priority
+    # parents should be prioritized by submit time.
+    assert_operator parents[0].priority, :>, parents[1].priority
+    assert_operator parents[1].priority, :>, parents[2].priority
+
+    # children should be prioritized in same order as their respective
+    # parents.
+    assert_operator children[0].priority, :>, children[1].priority
+    assert_operator children[1].priority, :>, children[2].priority
+
+    # grandchildren should also be prioritized in the same order,
+    # despite having been submitted in the opposite order.
+    assert_operator grandchildren[0].priority, :>, grandchildren[1].priority
+    assert_operator grandchildren[1].priority, :>, grandchildren[2].priority
+
+    # shared grandchild container should be prioritized above
+    # everything that isn't needed by parents[0], but not above
+    # earlier-submitted descendants of parents[0]
+    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+    assert_operator shared_grandchild.priority, :>, children[1].priority
+    assert_operator shared_grandchild.priority, :>, parents[1].priority
+    assert_operator shared_grandchild.priority, :<=, grandchildren[0].priority
+    assert_operator shared_grandchild.priority, :<=, children[0].priority
+    assert_operator shared_grandchild.priority, :<=, parents[0].priority
+
+    # increasing priority of the most recent toplevel container should
+    # reprioritize all of its descendants (including the shared
+    # grandchild) above everything else.
+    toplevel_crs[2].update_attributes!(priority: 72)
+    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+    assert_operator shared_grandchild.priority, :>, grandchildren[0].priority
+    assert_operator shared_grandchild.priority, :>, children[0].priority
+    assert_operator shared_grandchild.priority, :>, parents[0].priority
+    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+    assert_operator shared_grandchild.priority, :>, children[1].priority
+    assert_operator shared_grandchild.priority, :>, parents[1].priority
+    # ...but the shared container should not have higher priority than
+    # the earlier-submitted descendants of the high-priority workflow.
+    assert_operator shared_grandchild.priority, :<=, grandchildren[2].priority
+    assert_operator shared_grandchild.priority, :<=, children[2].priority
+    assert_operator shared_grandchild.priority, :<=, parents[2].priority
   end
 
   [
-    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 12],
+    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 1],
     ['active_no_prefs', nil, 0],
   ].each do |token, expected, expected_priority|
     test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
@@ -790,8 +833,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
       c = Container.find_by_uuid(cr.container_uuid)
       assert_equal 1, c.priority
 
-      # destroy the cr
-      assert_nothing_raised {cr.destroy}
+      cr.destroy
 
       # the cr's container now has priority of 0
       c = Container.find_by_uuid(cr.container_uuid)
@@ -836,4 +878,71 @@ class ContainerRequestTest < ActiveSupport::TestCase
     end
   end
 
+  # Note: some of these tests might look redundant because they test
+  # that out-of-order spellings of hashes are still considered equal
+  # regardless of whether the existing (container) or new (container
+  # request) hash needs to be re-ordered.
+  secrets = {"/foo" => {"kind" => "text", "content" => "xyzzy"}}
+  same_secrets = {"/foo" => {"content" => "xyzzy", "kind" => "text"}}
+  different_secrets = {"/foo" => {"kind" => "text", "content" => "something completely different"}}
+  [
+    [true, nil, nil],
+    [true, nil, {}],
+    [true, {}, nil],
+    [true, {}, {}],
+    [true, secrets, same_secrets],
+    [true, same_secrets, secrets],
+    [false, nil, secrets],
+    [false, {}, secrets],
+    [false, secrets, {}],
+    [false, secrets, nil],
+    [false, secrets, different_secrets],
+  ].each do |expect_reuse, sm1, sm2|
+    test "container reuse secret_mounts #{sm1.inspect}, #{sm2.inspect}" do
+      set_user_from_auth :active
+      cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm1)
+      cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm2)
+      assert_not_nil cr1.container_uuid
+      assert_not_nil cr2.container_uuid
+      if expect_reuse
+        assert_equal cr1.container_uuid, cr2.container_uuid
+      else
+        assert_not_equal cr1.container_uuid, cr2.container_uuid
+      end
+    end
+  end
+
+  test "scrub secret_mounts but reuse container for request with identical secret_mounts" do
+    set_user_from_auth :active
+    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+    cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+    run_container(cr1)
+    cr1.reload
+
+    # secret_mounts scrubbed from db
+    c = Container.where(uuid: cr1.container_uuid).first
+    assert_equal({}, c.secret_mounts)
+    assert_equal({}, cr1.secret_mounts)
+
+    # can reuse container if secret_mounts match
+    cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+    assert_equal cr1.container_uuid, cr2.container_uuid
+
+    # don't reuse container if secret_mounts don't match
+    cr3 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: {})
+    assert_not_equal cr1.container_uuid, cr3.container_uuid
+
+    assert_no_secrets_logged
+  end
+
+  test "conflicting key in mounts and secret_mounts" do
+    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+    set_user_from_auth :active
+    cr = create_minimal_req!
+    assert_equal false, cr.update_attributes(state: "Committed",
+                                             priority: 1,
+                                             mounts: cr.mounts.merge(sm),
+                                             secret_mounts: sm)
+    assert_equal [:secret_mounts], cr.errors.messages.keys
+  end
 end
index 0e13ee950c3aa57a4eab704d1003b378e2b4f4d2..7ee5921e0ca37842168608f9eafa63d16dd1d90f 100644 (file)
@@ -3,9 +3,11 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'test_helper'
+require 'helpers/container_test_helper'
 
 class ContainerTest < ActiveSupport::TestCase
   include DbCurrentTime
+  include ContainerTestHelper
 
   DEFAULT_ATTRS = {
     command: ['echo', 'foo'],
@@ -30,6 +32,7 @@ class ContainerTest < ActiveSupport::TestCase
     environment: {
       "var" => "val",
     },
+    secret_mounts: {},
   }
 
   def minimal_new attrs={}
@@ -123,10 +126,8 @@ class ContainerTest < ActiveSupport::TestCase
       c.priority = 1000
       c.save!
 
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.priority = 1001
-        c.save!
-      end
+      c.priority = 1000 << 50
+      c.save!
     end
   end
 
@@ -621,4 +622,25 @@ class ContainerTest < ActiveSupport::TestCase
     end
   end
 
+  [
+    {state: Container::Complete, exit_code: 0, output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'},
+    {state: Container::Cancelled},
+  ].each do |final_attrs|
+    test "secret_mounts is null after container is #{final_attrs[:state]}" do
+      c, cr = minimal_new(secret_mounts: {'/secret' => {'kind' => 'text', 'content' => 'foo'}},
+                          container_count_max: 1)
+      set_user_from_auth :dispatch1
+      c.lock
+      c.update_attributes!(state: Container::Running)
+      c.reload
+      assert c.secret_mounts.has_key?('/secret')
+
+      c.update_attributes!(final_attrs)
+      c.reload
+      assert_equal({}, c.secret_mounts)
+      cr.reload
+      assert_equal({}, cr.secret_mounts)
+      assert_no_secrets_logged
+    end
+  end
 end
index 6852fc4be81978dd557870a2b629d5e56e16fc47..4211026a67a4d0bb0a8dfe200fdef7322ea3daf7 100644 (file)
@@ -55,10 +55,11 @@ func (s *IntegrationSuite) TearDownTest(c *C) {
 }
 
 type slurmFake struct {
-       didBatch  [][]string
-       didCancel []string
-       didRenice [][]string
-       queue     string
+       didBatch   [][]string
+       didCancel  []string
+       didRelease []string
+       didRenice  [][]string
+       queue      string
        // If non-nil, run this func during the 2nd+ call to Cancel()
        onCancel func()
        // Error returned by Batch()
@@ -74,6 +75,11 @@ func (sf *slurmFake) QueueCommand(args []string) *exec.Cmd {
        return exec.Command("echo", sf.queue)
 }
 
+func (sf *slurmFake) Release(name string) error {
+       sf.didRelease = append(sf.didRelease, name)
+       return nil
+}
+
 func (sf *slurmFake) Renice(name string, nice int64) error {
        sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
        return nil
@@ -151,7 +157,7 @@ func (s *IntegrationSuite) integrationTest(c *C,
 }
 
 func (s *IntegrationSuite) TestNormal(c *C) {
-       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100\n"}
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
        container := s.integrationTest(c,
                nil,
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
@@ -163,7 +169,7 @@ func (s *IntegrationSuite) TestNormal(c *C) {
 }
 
 func (s *IntegrationSuite) TestCancel(c *C) {
-       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100\n"}
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
        readyToCancel := make(chan bool)
        s.slurm.onCancel = func() { <-readyToCancel }
        container := s.integrationTest(c,
index 735e057e2542c42cf52940fcd3279e3b82ae28fe..9e9f45270f82d3450d27e380fe63924177e5501d 100644 (file)
@@ -13,10 +13,11 @@ import (
 )
 
 type Slurm interface {
+       Batch(script io.Reader, args []string) error
        Cancel(name string) error
-       Renice(name string, nice int64) error
        QueueCommand(args []string) *exec.Cmd
-       Batch(script io.Reader, args []string) error
+       Release(name string) error
+       Renice(name string, nice int64) error
 }
 
 type slurmCLI struct{}
@@ -54,6 +55,10 @@ func (scli *slurmCLI) QueueCommand(args []string) *exec.Cmd {
        return exec.Command("squeue", args...)
 }
 
+func (scli *slurmCLI) Release(name string) error {
+       return scli.run(nil, "scontrol", []string{"release", "Name=" + name})
+}
+
 func (scli *slurmCLI) Renice(name string, nice int64) error {
        return scli.run(nil, "scontrol", []string{"update", "JobName=" + name, fmt.Sprintf("Nice=%d", nice)})
 }
index b8e3108c7c1a235e7c7e28ffc10974652a35cc6a..ee79c6f774c1ca4cb277f1c356ebca792d790f49 100644 (file)
@@ -79,18 +79,33 @@ func (sqc *SqueueChecker) reniceAll() {
                        // (perhaps it's not an Arvados job)
                        continue
                }
+               if j.priority == 0 {
+                       // SLURM <= 15.x implements "hold" by setting
+                       // priority to 0. If we include held jobs
+                       // here, we'll end up trying to push other
+                       // jobs below them using negative priority,
+                       // which won't help anything.
+                       continue
+               }
                jobs = append(jobs, j)
        }
 
        sort.Slice(jobs, func(i, j int) bool {
-               return jobs[i].wantPriority > jobs[j].wantPriority
+               if jobs[i].wantPriority != jobs[j].wantPriority {
+                       return jobs[i].wantPriority > jobs[j].wantPriority
+               } else {
+                       // break ties with container uuid --
+                       // otherwise, the ordering would change from
+                       // one interval to the next, and we'd do many
+                       // pointless slurm queue rearrangements.
+                       return jobs[i].uuid > jobs[j].uuid
+               }
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
                if renice[i] == job.nice {
                        continue
                }
-               log.Printf("updating slurm priority for %q: nice %d => %d", job.uuid, job.nice, renice[i])
                sqc.Slurm.Renice(job.uuid, renice[i])
        }
 }
@@ -114,7 +129,7 @@ func (sqc *SqueueChecker) check() {
        sqc.L.Lock()
        defer sqc.L.Unlock()
 
-       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--format=%j %y %Q"})
+       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
        stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
        cmd.Stdout, cmd.Stderr = stdout, stderr
        if err := cmd.Run(); err != nil {
@@ -128,9 +143,9 @@ func (sqc *SqueueChecker) check() {
                if line == "" {
                        continue
                }
-               var uuid string
+               var uuid, state, reason string
                var n, p int64
-               if _, err := fmt.Sscan(line, &uuid, &n, &p); err != nil {
+               if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
                        log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
                        continue
                }
@@ -141,6 +156,23 @@ func (sqc *SqueueChecker) check() {
                replacing.priority = p
                replacing.nice = n
                newq[uuid] = replacing
+
+               if state == "PENDING" && reason == "BadConstraints" && p == 0 && replacing.wantPriority > 0 {
+                       // When using SLURM 14.x or 15.x, our queued
+                       // jobs land in this state when "scontrol
+                       // reconfigure" invalidates their feature
+                       // constraints by clearing all node features.
+                       // They stay in this state even after the
+                       // features reappear, until we run "scontrol
+                       // release {jobid}".
+                       //
+                       // "scontrol release" is silent and successful
+                       // regardless of whether the features have
+                       // reappeared, so rather than second-guessing
+                       // whether SLURM is ready, we just keep trying
+                       // this until it works.
+                       sqc.Slurm.Release(uuid)
+               }
        }
        sqc.queue = newq
        sqc.Broadcast()
index 694a4d6f36c1da35ce830edf708a8c3df56bd7b5..c9329fdf95bf87028346fb727b8521dc8edfa1cd 100644 (file)
@@ -14,6 +14,36 @@ var _ = Suite(&SqueueSuite{})
 
 type SqueueSuite struct{}
 
+func (s *SqueueSuite) TestReleasePending(c *C) {
+       uuids := []string{
+               "zzzzz-dz642-fake0fake0fake0",
+               "zzzzz-dz642-fake1fake1fake1",
+               "zzzzz-dz642-fake2fake2fake2",
+       }
+       slurm := &slurmFake{
+               queue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING BadConstraints\n",
+       }
+       sqc := &SqueueChecker{
+               Slurm:  slurm,
+               Period: time.Hour,
+       }
+       sqc.startOnce.Do(sqc.start)
+       defer sqc.Stop()
+
+       done := make(chan struct{})
+       go func() {
+               for _, u := range uuids {
+                       sqc.SetPriority(u, 1)
+               }
+               close(done)
+       }()
+       callUntilReady(sqc.check, done)
+
+       slurm.didRelease = nil
+       sqc.check()
+       c.Check(slurm.didRelease, DeepEquals, []string{uuids[2]})
+}
+
 func (s *SqueueSuite) TestReniceAll(c *C) {
        uuids := []string{"zzzzz-dz642-fake0fake0fake0", "zzzzz-dz642-fake1fake1fake1", "zzzzz-dz642-fake2fake2fake2"}
        for _, test := range []struct {
@@ -24,28 +54,34 @@ func (s *SqueueSuite) TestReniceAll(c *C) {
        }{
                {
                        spread: 1,
-                       squeue: uuids[0] + " 10000 4294000000\n",
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n",
                        want:   map[string]int64{uuids[0]: 1},
                        expect: [][]string{{uuids[0], "0"}},
                },
                { // fake0 priority is too high
                        spread: 1,
-                       squeue: uuids[0] + " 10000 4294000777\n" + uuids[1] + " 10000 4294000444\n",
+                       squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
                        want:   map[string]int64{uuids[0]: 1, uuids[1]: 999},
                        expect: [][]string{{uuids[1], "0"}, {uuids[0], "334"}},
                },
                { // specify spread
                        spread: 100,
-                       squeue: uuids[0] + " 10000 4294000777\n" + uuids[1] + " 10000 4294000444\n",
+                       squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
                        want:   map[string]int64{uuids[0]: 1, uuids[1]: 999},
                        expect: [][]string{{uuids[1], "0"}, {uuids[0], "433"}},
                },
                { // ignore fake2 because SetPriority() not called
                        spread: 1,
-                       squeue: uuids[0] + " 10000 4294000000\n" + uuids[1] + " 10000 4294000111\n" + uuids[2] + " 10000 4294000222\n",
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 4294000222 PENDING Resources\n",
                        want:   map[string]int64{uuids[0]: 999, uuids[1]: 1},
                        expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
                },
+               { // ignore fake2 because slurm priority=0
+                       spread: 1,
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 999, uuids[1]: 1, uuids[2]: 997},
+                       expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
+               },
        } {
                c.Logf("spread=%d squeue=%q want=%v -> expect=%v", test.spread, test.squeue, test.want, test.expect)
                slurm := &slurmFake{
@@ -97,7 +133,7 @@ func (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {
        for {
                select {
                case <-tick.C:
-                       slurm.queue = uuidGood + " 0 12345\n"
+                       slurm.queue = uuidGood + " 0 12345 PENDING Resources\n"
                        sqc.check()
 
                        // Avoid immediately selecting this case again
@@ -117,3 +153,16 @@ func (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {
                }
        }
 }
+
+func callUntilReady(fn func(), done <-chan struct{}) {
+       tick := time.NewTicker(time.Millisecond)
+       defer tick.Stop()
+       for {
+               select {
+               case <-done:
+                       return
+               case <-tick.C:
+                       fn()
+               }
+       }
+}
index 653e0b4949da882cbcc185894ffdaee369c19318..53815cbe1c8222d4e6c9614ce889d649224af7e1 100644 (file)
@@ -111,6 +111,8 @@ type ContainerRunner struct {
        OutputPDH     *string
        SigChan       chan os.Signal
        ArvMountExit  chan error
+       SecretMounts  map[string]arvados.Mount
+       MkArvClient   func(token string) (IArvadosClient, error)
        finalState    string
        parentTemp    string
 
@@ -396,10 +398,24 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        for bind := range runner.Container.Mounts {
                binds = append(binds, bind)
        }
+       for bind := range runner.SecretMounts {
+               if _, ok := runner.Container.Mounts[bind]; ok {
+                       return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
+               }
+               if runner.SecretMounts[bind].Kind != "json" &&
+                       runner.SecretMounts[bind].Kind != "text" {
+                       return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
+                               bind, runner.SecretMounts[bind].Kind)
+               }
+               binds = append(binds, bind)
+       }
        sort.Strings(binds)
 
        for _, bind := range binds {
-               mnt := runner.Container.Mounts[bind]
+               mnt, ok := runner.Container.Mounts[bind]
+               if !ok {
+                       mnt = runner.SecretMounts[bind]
+               }
                if bind == "stdout" || bind == "stderr" {
                        // Is it a "file" mount kind?
                        if mnt.Kind != "file" {
@@ -428,8 +444,8 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                }
 
                if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
-                       if mnt.Kind != "collection" {
-                               return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
+                       if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
+                               return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
                        }
                }
 
@@ -503,26 +519,35 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                runner.HostOutputDir = tmpdir
                        }
 
-               case mnt.Kind == "json":
-                       jsondata, err := json.Marshal(mnt.Content)
-                       if err != nil {
-                               return fmt.Errorf("encoding json data: %v", err)
+               case mnt.Kind == "json" || mnt.Kind == "text":
+                       var filedata []byte
+                       if mnt.Kind == "json" {
+                               filedata, err = json.Marshal(mnt.Content)
+                               if err != nil {
+                                       return fmt.Errorf("encoding json data: %v", err)
+                               }
+                       } else {
+                               text, ok := mnt.Content.(string)
+                               if !ok {
+                                       return fmt.Errorf("content for mount %q must be a string", bind)
+                               }
+                               filedata = []byte(text)
                        }
-                       // Create a tempdir with a single file
-                       // (instead of just a tempfile): this way we
-                       // can ensure the file is world-readable
-                       // inside the container, without having to
-                       // make it world-readable on the docker host.
-                       tmpdir, err := runner.MkTempDir(runner.parentTemp, "json")
+
+                       tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
                        if err != nil {
                                return fmt.Errorf("creating temp dir: %v", err)
                        }
-                       tmpfn := filepath.Join(tmpdir, "mountdata.json")
-                       err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+                       tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
+                       err = ioutil.WriteFile(tmpfn, filedata, 0444)
                        if err != nil {
                                return fmt.Errorf("writing temp file: %v", err)
                        }
-                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+                       if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                               copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+                       } else {
+                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+                       }
 
                case mnt.Kind == "git_tree":
                        tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
@@ -1259,6 +1284,16 @@ func (runner *ContainerRunner) CaptureOutput() error {
        }
        sort.Strings(binds)
 
+       // Delete secret mounts so they don't get saved to the output collection.
+       for bind := range runner.SecretMounts {
+               if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                       err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):])
+                       if err != nil {
+                               return fmt.Errorf("Unable to remove secret mount: %v", err)
+                       }
+               }
+       }
+
        var manifestText string
 
        collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
@@ -1702,6 +1737,31 @@ func (runner *ContainerRunner) fetchContainerRecord() error {
        if err != nil {
                return fmt.Errorf("error decoding container record: %v", err)
        }
+
+       var sm struct {
+               SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+       }
+
+       containerToken, err := runner.ContainerToken()
+       if err != nil {
+               return fmt.Errorf("error getting container token: %v", err)
+       }
+
+       containerClient, err := runner.MkArvClient(containerToken)
+       if err != nil {
+               return fmt.Errorf("error creating container API client: %v", err)
+       }
+
+       err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
+       if err != nil {
+               if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
+                       return fmt.Errorf("error fetching secret_mounts: %v", err)
+               }
+               // ok && apierr.HttpStatusCode == 404, which means
+               // secret_mounts isn't supported by this API server.
+       }
+       runner.SecretMounts = sm.SecretMounts
+
        return nil
 }
 
@@ -1715,6 +1775,14 @@ func NewContainerRunner(api IArvadosClient,
        cr.NewLogWriter = cr.NewArvLogWriter
        cr.RunArvMount = cr.ArvMountCmd
        cr.MkTempDir = ioutil.TempDir
+       cr.MkArvClient = func(token string) (IArvadosClient, error) {
+               cl, err := arvadosclient.MakeArvadosClient()
+               if err != nil {
+                       return nil, err
+               }
+               cl.ApiToken = token
+               return cl, nil
+       }
        cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        cr.Container.UUID = containerUUID
        cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
index 94b713355dd10a13e2563f701589f39874d8ac12..48da8edd86c521b007e0ba127136fb050e4c2773 100644 (file)
@@ -13,6 +13,7 @@ import (
        "fmt"
        "io"
        "io/ioutil"
+       "log"
        "net"
        "os"
        "os/exec"
@@ -57,7 +58,8 @@ type ArvTestClient struct {
        Calls   int
        Content []arvadosclient.Dict
        arvados.Container
-       Logs map[string]*bytes.Buffer
+       secretMounts []byte
+       Logs         map[string]*bytes.Buffer
        sync.Mutex
        WasSetRunning bool
        callraw       bool
@@ -240,6 +242,12 @@ func (client *ArvTestClient) Call(method, resourceType, uuid, action string, par
                        "uuid": "`+fakeAuthUUID+`",
                        "api_token": "`+fakeAuthToken+`"
                        }`), output)
+       case method == "GET" && resourceType == "containers" && action == "secret_mounts":
+               if client.secretMounts != nil {
+                       return json.Unmarshal(client.secretMounts, output)
+               } else {
+                       return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
+               }
        default:
                return fmt.Errorf("Not found")
        }
@@ -356,6 +364,10 @@ func (client *KeepTestClient) PutHB(hash string, buf []byte) (string, int, error
 func (*KeepTestClient) ClearBlockCache() {
 }
 
+func (client *KeepTestClient) Close() {
+       client.Content = nil
+}
+
 type FileWrapper struct {
        io.ReadCloser
        len int64
@@ -400,6 +412,7 @@ func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename s
 
 func (s *TestSuite) TestLoadImage(c *C) {
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        _, err := cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
@@ -506,7 +519,9 @@ func (KeepReadErrorTestClient) ManifestFileReader(m manifest.Manifest, filename
 
 func (s *TestSuite) TestLoadImageArvError(c *C) {
        // (1) Arvados error
-       cr := NewContainerRunner(ArvErrorTestClient{}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.Container.ContainerImage = hwPDH
 
        err := cr.LoadImage()
@@ -577,7 +592,9 @@ func (s *TestSuite) TestRunContainer(c *C) {
                t.logWriter.Write(dockerLog(1, "Hello world\n"))
                t.logWriter.Close()
        }
-       cr := NewContainerRunner(&ArvTestClient{}, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        var logs TestLogs
        cr.NewLogWriter = logs.NewTestLoggingWriter
@@ -602,6 +619,7 @@ func (s *TestSuite) TestRunContainer(c *C) {
 func (s *TestSuite) TestCommitLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
@@ -622,6 +640,7 @@ func (s *TestSuite) TestCommitLogs(c *C) {
 func (s *TestSuite) TestUpdateContainerRunning(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        err := cr.UpdateContainerRunning()
@@ -633,6 +652,7 @@ func (s *TestSuite) TestUpdateContainerRunning(c *C) {
 func (s *TestSuite) TestUpdateContainerComplete(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        cr.LogsPDH = new(string)
@@ -653,6 +673,7 @@ func (s *TestSuite) TestUpdateContainerComplete(c *C) {
 func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.cCancelled = true
        cr.finalState = "Cancelled"
@@ -672,13 +693,24 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
        err := json.Unmarshal([]byte(record), &rec)
        c.Check(err, IsNil)
 
+       var sm struct {
+               SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+       }
+       err = json.Unmarshal([]byte(record), &sm)
+       c.Check(err, IsNil)
+       secretMounts, err := json.Marshal(sm)
+       log.Printf("%q %q", sm, secretMounts)
+       c.Check(err, IsNil)
+
        s.docker.exitCode = exitCode
        s.docker.fn = fn
        s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
 
        api = &ArvTestClient{Container: rec}
        s.docker.api = api
-       cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.statInterval = 100 * time.Millisecond
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
@@ -700,6 +732,9 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
                }
                return d, err
        }
+       cr.MkArvClient = func(token string) (IArvadosClient, error) {
+               return &ArvTestClient{secretMounts: secretMounts}, nil
+       }
 
        if extraMounts != nil && len(extraMounts) > 0 {
                err := cr.SetupArvMountPoint("keep")
@@ -951,8 +986,13 @@ func (s *TestSuite) testStopContainer(c *C, setup func(cr *ContainerRunner)) {
        s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
 
        api := &ArvTestClient{Container: rec}
-       cr := NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }
+       cr.MkArvClient = func(token string) (IArvadosClient, error) {
+               return &ArvTestClient{}, nil
+       }
        setup(cr)
 
        done := make(chan error)
@@ -1018,6 +1058,7 @@ func stubCert(temp string) string {
 func (s *TestSuite) TestSetupMounts(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
@@ -1211,6 +1252,35 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                checkEmpty()
        }
 
+       for _, test := range []struct {
+               in  interface{}
+               out string
+       }{
+               {in: "foo", out: `foo`},
+               {in: nil, out: "error"},
+               {in: map[string]int64{"foo": 123456789123456789}, out: "error"},
+       } {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/mnt/test.txt": {Kind: "text", Content: test.in},
+               }
+               err := cr.SetupMounts()
+               if test.out == "error" {
+                       c.Check(err.Error(), Equals, "content for mount \"/mnt/test.txt\" must be a string")
+               } else {
+                       c.Check(err, IsNil)
+                       sort.StringSlice(cr.Binds).Sort()
+                       c.Check(cr.Binds, DeepEquals, []string{realTemp + "/text2/mountdata.text:/mnt/test.txt:ro"})
+                       content, err := ioutil.ReadFile(realTemp + "/text2/mountdata.text")
+                       c.Check(err, IsNil)
+                       c.Check(content, DeepEquals, []byte(test.out))
+               }
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
        // Read-only mount points are allowed underneath output_dir mount point
        {
                i = 0
@@ -1277,13 +1347,13 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                cr.Container.Mounts = make(map[string]arvados.Mount)
                cr.Container.Mounts = map[string]arvados.Mount{
                        "/tmp":     {Kind: "tmp"},
-                       "/tmp/foo": {Kind: "json"},
+                       "/tmp/foo": {Kind: "tmp"},
                }
                cr.OutputPath = "/tmp"
 
                err := cr.SetupMounts()
                c.Check(err, NotNil)
-               c.Check(err, ErrorMatches, `Only mount points of kind 'collection' are supported underneath the output_path.*`)
+               c.Check(err, ErrorMatches, `Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1397,9 +1467,14 @@ func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDoc
        s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
 
        api = &ArvTestClient{Container: rec}
-       cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
+       cr.MkArvClient = func(token string) (IArvadosClient, error) {
+               return &ArvTestClient{}, nil
+       }
 
        err = cr.Run()
        return
@@ -1833,7 +1908,9 @@ func (s *TestSuite) TestStderrMount(c *C) {
 }
 
 func (s *TestSuite) TestNumberRoundTrip(c *C) {
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
        cr.fetchContainerRecord()
 
        jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content)
@@ -1843,7 +1920,9 @@ func (s *TestSuite) TestNumberRoundTrip(c *C) {
 }
 
 func (s *TestSuite) TestEvalSymlinks(c *C) {
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        realTemp, err := ioutil.TempDir("", "crunchrun_test-")
        c.Assert(err, IsNil)
@@ -1873,7 +1952,9 @@ func (s *TestSuite) TestEvalSymlinks(c *C) {
 }
 
 func (s *TestSuite) TestEvalSymlinkDir(c *C) {
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
 
        realTemp, err := ioutil.TempDir("", "crunchrun_test-")
        c.Assert(err, IsNil)
@@ -2035,3 +2116,61 @@ func (s *TestSuite) TestBadCommand3(c *C) {
        c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
        c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
 }
+
+func (s *TestSuite) TestSecretTextMountPoint(c *C) {
+       // under normal mounts, gets captured in output, oops
+       helperRecord := `{
+               "command": ["true"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "mounts": {
+                    "/tmp": {"kind": "tmp"},
+                    "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+                },
+                "secret_mounts": {
+                },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {}
+       }`
+
+       api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte("mypassword"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(api.CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), NotNil)
+       c.Check(api.CalledWith("collection.manifest_text", ""), IsNil)
+
+       // under secret mounts, not captured in output
+       helperRecord = `{
+               "command": ["true"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "mounts": {
+                    "/tmp": {"kind": "tmp"}
+                },
+                "secret_mounts": {
+                    "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+                },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {}
+       }`
+
+       api, _, _ = s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte("mypassword"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(api.CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil)
+       c.Check(api.CalledWith("collection.manifest_text", ""), NotNil)
+}
index 87593be9bfe01f6e7d3b057157d3e980b752211c..abac2bbecc6e997886283f11012d50a44a9beab9 100644 (file)
@@ -6,11 +6,12 @@ package main
 
 import (
        "fmt"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       . "gopkg.in/check.v1"
        "strings"
        "testing"
        "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       . "gopkg.in/check.v1"
 )
 
 type LoggingTestSuite struct{}
@@ -34,6 +35,7 @@ var _ = Suite(&LoggingTestSuite{})
 func (s *LoggingTestSuite) TestWriteLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
@@ -61,6 +63,7 @@ func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
        }
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
        cr.CrunchLog.Immediate = nil
@@ -82,6 +85,7 @@ func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
 func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
        ts := &TestTimestamper{}
        cr.CrunchLog.Timestamper = ts.Timestamp
@@ -135,6 +139,7 @@ func testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, t
 
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
+       defer kc.Close()
        cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
index b54e336c2d99c50d472cc71b668e1c169ad9c2e4..ddad8bf475c3b73bf4bd9ecb973919f3d21b6c36 100644 (file)
@@ -191,6 +191,7 @@ func (m *CollectionWriter) Finish() error {
                }
                if stream.Block != nil {
                        stream.uploader <- stream.Block
+                       stream.Block = nil
                }
                close(stream.uploader)
                stream.uploader = nil
index 86dab41a6461222f297d8c7f27dcd05213fd7a20..24333c34930f7320fae9ec603a0fcbb2135599dc 100644 (file)
@@ -5,13 +5,14 @@
 package main
 
 import (
-       . "gopkg.in/check.v1"
        "io/ioutil"
        "log"
        "os"
        "path/filepath"
        "sync"
        "syscall"
+
+       . "gopkg.in/check.v1"
 )
 
 type UploadTestSuite struct{}
@@ -46,7 +47,9 @@ func (s *TestSuite) TestSimpleUpload(c *C) {
 
        ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
        c.Check(err, IsNil)
        c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
@@ -67,7 +70,9 @@ func (s *TestSuite) TestUploadThreeFiles(c *C) {
                c.Assert(err, IsNil)
        }
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
 
        c.Check(err, IsNil)
@@ -85,7 +90,9 @@ func (s *TestSuite) TestSimpleUploadSubdir(c *C) {
        ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
        ioutil.WriteFile(tmpdir+"/subdir/file2.txt", []byte("bar"), 0600)
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
 
        c.Check(err, IsNil)
@@ -119,7 +126,9 @@ func (s *TestSuite) TestSimpleUploadLarge(c *C) {
 
        ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
 
        c.Check(err, IsNil)
@@ -136,7 +145,9 @@ func (s *TestSuite) TestUploadEmptySubdir(c *C) {
 
        ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
 
        c.Check(err, IsNil)
@@ -152,7 +163,9 @@ func (s *TestSuite) TestUploadEmptyFile(c *C) {
 
        ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte(""), 0600)
 
-       cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
 
        c.Check(err, IsNil)
index 0c4dd5ba4cd16c38de2ebdf729b16af7a3a5ec64..32f36e02980be3420ff04d0e2d4c91a0c591c676 100644 (file)
@@ -82,6 +82,12 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions R
        if err != nil {
                return
        }
+       for _, srv := range bal.KeepServices {
+               err = srv.discoverMounts(&config.Client)
+               if err != nil {
+                       return
+               }
+       }
 
        if err = bal.CheckSanityEarly(&config.Client); err != nil {
                return
@@ -242,20 +248,24 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                wg.Add(1)
                go func(srv *KeepService) {
                        defer wg.Done()
-                       bal.logf("%s: retrieve index", srv)
-                       idx, err := srv.Index(c, "")
-                       if err != nil {
-                               errs <- fmt.Errorf("%s: %v", srv, err)
-                               return
-                       }
-                       if len(errs) > 0 {
-                               // Some other goroutine encountered an
-                               // error -- any further effort here
-                               // will be wasted.
-                               return
+                       bal.logf("%s: retrieve indexes", srv)
+                       for _, mount := range srv.mounts {
+                               bal.logf("%s: retrieve index", mount)
+                               idx, err := srv.IndexMount(c, mount.UUID, "")
+                               if err != nil {
+                                       errs <- fmt.Errorf("%s: retrieve index: %v", mount, err)
+                                       return
+                               }
+                               if len(errs) > 0 {
+                                       // Some other goroutine encountered an
+                                       // error -- any further effort here
+                                       // will be wasted.
+                                       return
+                               }
+                               bal.logf("%s: add %d replicas to map", mount, len(idx))
+                               bal.BlockStateMap.AddReplicas(mount, idx)
+                               bal.logf("%s: done", mount)
                        }
-                       bal.logf("%s: add %d replicas to map", srv, len(idx))
-                       bal.BlockStateMap.AddReplicas(srv, idx)
                        bal.logf("%s: done", srv)
                }(srv)
        }
@@ -395,14 +405,60 @@ var changeName = map[int]string{
 // block, and makes the appropriate ChangeSet calls.
 func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
        debugf("balanceBlock: %v %+v", blkid, blk)
+
+       // A slot is somewhere a replica could potentially be trashed
+       // from, pulled from, or pulled to. Each KeepService gets
+       // either one empty slot, or one or more non-empty slots.
+       type slot struct {
+               srv  *KeepService // never nil
+               repl *Replica     // nil if none found
+       }
+
+       // First, we build an ordered list of all slots worth
+       // considering (including all slots where replicas have been
+       // found, as well as all of the optimal slots for this block).
+       // Then, when we consider each slot in that order, we will
+       // have all of the information we need to make a decision
+       // about that slot.
+
        uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
-       hasRepl := make(map[string]Replica, len(bal.serviceRoots))
-       for _, repl := range blk.Replicas {
-               hasRepl[repl.UUID] = repl
-               // TODO: when multiple copies are on one server, use
-               // the oldest one that doesn't have a timestamp
-               // collision with other replicas.
+       rendezvousOrder := make(map[*KeepService]int, len(uuids))
+       slots := make([]slot, len(uuids))
+       for i, uuid := range uuids {
+               srv := bal.KeepServices[uuid]
+               rendezvousOrder[srv] = i
+               slots[i].srv = srv
+       }
+
+       // Sort readonly replicas ahead of trashable ones. This way,
+       // if a single service has excessive replicas, the ones we
+       // encounter last (and therefore choose to delete) will be on
+       // the writable volumes, where possible.
+       //
+       // TODO: within the trashable set, prefer the oldest replica
+       // that doesn't have a timestamp collision with others.
+       sort.Slice(blk.Replicas, func(i, j int) bool {
+               mnt := blk.Replicas[i].KeepMount
+               return mnt.ReadOnly || mnt.KeepService.ReadOnly
+       })
+
+       // Assign existing replicas to slots.
+       for ri := range blk.Replicas {
+               repl := &blk.Replicas[ri]
+               srv := repl.KeepService
+               slotIdx := rendezvousOrder[srv]
+               if slots[slotIdx].repl != nil {
+                       // Additional replicas on a single server are
+                       // considered non-optimal. Within this
+                       // category, we don't try to optimize layout:
+                       // we just say the optimal order is the order
+                       // we encounter them.
+                       slotIdx = len(slots)
+                       slots = append(slots, slot{srv: srv})
+               }
+               slots[slotIdx].repl = repl
        }
+
        // number of replicas already found in positions better than
        // the position we're contemplating now.
        reportedBestRepl := 0
@@ -418,12 +474,11 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
        // requested on rendezvous positions M<N will be successful.)
        pulls := 0
        var changes []string
-       for _, uuid := range uuids {
+       for _, slot := range slots {
                change := changeNone
-               srv := bal.KeepServices[uuid]
+               srv, repl := slot.srv, slot.repl
                // TODO: request a Touch if Mtime is duplicated.
-               repl, ok := hasRepl[srv.UUID]
-               if ok {
+               if repl != nil {
                        // This service has a replica. We should
                        // delete it if [1] we already have enough
                        // distinct replicas in better rendezvous
@@ -431,6 +486,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
                        // distinct from all of the better replicas'
                        // Mtimes.
                        if !srv.ReadOnly &&
+                               !repl.KeepMount.ReadOnly &&
                                repl.Mtime < bal.MinMtime &&
                                len(uniqueBestRepl) >= blk.Desired &&
                                !uniqueBestRepl[repl.Mtime] {
@@ -459,7 +515,11 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
                        change = changePull
                }
                if bal.Dumper != nil {
-                       changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], repl.Mtime))
+                       var mtime int64
+                       if repl != nil {
+                               mtime = repl.Mtime
+                       }
+                       changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], mtime))
                }
        }
        if bal.Dumper != nil {
index 2d6dd2b5a6bf38b71fa5006e753aa7a13ae0096d..08cfcce5849e4bb98440f40a8e241fefce74b352 100644 (file)
@@ -5,7 +5,7 @@
 package main
 
 import (
-       "encoding/json"
+       "encoding/json"
        "fmt"
        "io"
        "io/ioutil"
@@ -41,6 +41,63 @@ func (rt *reqTracker) Add(req *http.Request) int {
        return len(rt.reqs)
 }
 
+var stubServices = []arvados.KeepService{
+       {
+               UUID:           "zzzzz-bi6l4-000000000000000",
+               ServiceHost:    "keep0.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000001",
+               ServiceHost:    "keep1.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000002",
+               ServiceHost:    "keep2.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000003",
+               ServiceHost:    "keep3.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-h0a0xwut9qa6g3a",
+               ServiceHost:    "keep.zzzzz.arvadosapi.com",
+               ServicePort:    25333,
+               ServiceSSLFlag: true,
+               ServiceType:    "proxy",
+       },
+}
+
+var stubMounts = map[string][]arvados.KeepMount{
+       "keep0.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-000000000000000",
+               DeviceID: "keep0-vol0",
+       }},
+       "keep1.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-100000000000000",
+               DeviceID: "keep1-vol0",
+       }},
+       "keep2.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-200000000000000",
+               DeviceID: "keep2-vol0",
+       }},
+       "keep3.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-300000000000000",
+               DeviceID: "keep3-vol0",
+       }},
+}
+
 // stubServer is an HTTP transport that intercepts and processes all
 // requests using its own handlers.
 type stubServer struct {
@@ -156,17 +213,32 @@ func (s *stubServer) serveCollectionsButSkipOne() *reqTracker {
 }
 
 func (s *stubServer) serveZeroKeepServices() *reqTracker {
-       return s.serveStatic("/arvados/v1/keep_services",
-               `{"items":[],"items_available":0}`)
+       return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{})
 }
 
-func (s *stubServer) serveFourDiskKeepServices() *reqTracker {
-       return s.serveStatic("/arvados/v1/keep_services", `{"items_available":5,"items":[
-               {"uuid":"zzzzz-bi6l4-000000000000000","service_host":"keep0.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
-               {"uuid":"zzzzz-bi6l4-000000000000001","service_host":"keep1.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
-               {"uuid":"zzzzz-bi6l4-000000000000002","service_host":"keep2.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
-               {"uuid":"zzzzz-bi6l4-000000000000003","service_host":"keep3.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
-               {"uuid":"zzzzz-bi6l4-h0a0xwut9qa6g3a","service_host":"keep.zzzzz.arvadosapi.com","service_port":25333,"service_ssl_flag":true,"service_type":"proxy"}]}`)
+func (s *stubServer) serveKeepServices(svcs []arvados.KeepService) *reqTracker {
+       return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{
+               ItemsAvailable: len(svcs),
+               Items:          svcs,
+       })
+}
+
+func (s *stubServer) serveJSON(path string, resp interface{}) *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               json.NewEncoder(w).Encode(resp)
+       })
+       return rt
+}
+
+func (s *stubServer) serveKeepstoreMounts() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/mounts", func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               json.NewEncoder(w).Encode(stubMounts[r.Host])
+       })
+       return rt
 }
 
 func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
@@ -178,6 +250,21 @@ func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
                }
                fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n\n", 12345678+count)
        })
+       for _, mounts := range stubMounts {
+               for i, mnt := range mounts {
+                       i := i
+                       s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
+                               count := rt.Add(r)
+                               if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
+                                       io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+                               }
+                               if i == 0 {
+                                       fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+count)
+                               }
+                               fmt.Fprintf(w, "\n")
+                       })
+               }
+       }
        return rt
 }
 
@@ -238,7 +325,8 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveZeroCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
@@ -257,7 +345,8 @@ func (s *runSuite) TestServiceTypes(c *check.C) {
        s.config.KeepServiceTypes = []string{"unlisted-type"}
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        indexReqs := s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        _, err := (&Balancer{}).Run(s.config, opts)
@@ -274,7 +363,8 @@ func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
        }
        s.stub.serveCurrentUserNotAdmin()
        s.stub.serveZeroCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
        _, err := (&Balancer{}).Run(s.config, opts)
@@ -291,7 +381,8 @@ func (s *runSuite) TestDetectSkippedCollections(c *check.C) {
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveCollectionsButSkipOne()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
@@ -309,7 +400,8 @@ func (s *runSuite) TestDryRun(c *check.C) {
        }
        s.stub.serveCurrentUserAdmin()
        collReqs := s.stub.serveFooBarFileCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
@@ -336,7 +428,8 @@ func (s *runSuite) TestCommit(c *check.C) {
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
@@ -362,7 +455,8 @@ func (s *runSuite) TestRunForever(c *check.C) {
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
-       s.stub.serveFourDiskKeepServices()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
index edc8cf391a9f7076397b8bc0144ecc46dc551575..167e8741dba3ed25d1f7ae8c51a89bebf277f3d9 100644 (file)
@@ -76,6 +76,7 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                                UUID: fmt.Sprintf("zzzzz-bi6l4-%015x", i),
                        },
                }
+               srv.mounts = []*KeepMount{{KeepMount: arvados.KeepMount{UUID: fmt.Sprintf("mount-%015x", i)}, KeepService: srv}}
                bal.srvs[i] = srv
                bal.KeepServices[srv.UUID] = srv
        }
@@ -149,6 +150,47 @@ func (bal *balancerSuite) TestFixUnbalanced(c *check.C) {
                shouldTrash: slots{7}})
 }
 
+func (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {
+       bal.try(c, tester{
+               desired:    2,
+               current:    slots{0, 0},
+               shouldPull: slots{1}})
+       bal.try(c, tester{
+               desired:    2,
+               current:    slots{2, 2},
+               shouldPull: slots{0, 1}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{0, 0, 1},
+               shouldTrash: slots{0}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{1, 1, 0},
+               shouldTrash: slots{1}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{1, 0, 1, 0, 2},
+               shouldTrash: slots{0, 1, 2}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{1, 1, 1, 0, 2},
+               shouldTrash: slots{1, 1, 2}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{1, 1, 2},
+               shouldPull:  slots{0},
+               shouldTrash: slots{1}})
+       bal.try(c, tester{
+               desired:     2,
+               current:     slots{1, 1, 0},
+               timestamps:  []int64{12345678, 12345678, 12345679},
+               shouldTrash: nil})
+       bal.try(c, tester{
+               desired:    2,
+               current:    slots{1, 1},
+               shouldPull: slots{0}})
+}
+
 func (bal *balancerSuite) TestIncreaseReplTimestampCollision(c *check.C) {
        // For purposes of increasing replication, we assume identical
        // replicas are distinct.
@@ -231,8 +273,8 @@ func (bal *balancerSuite) try(c *check.C, t tester) {
 }
 
 // srvList returns the KeepServices, sorted in rendezvous order and
-// then selected by idx. For example, srvList(3, 0, 1, 4) returns the
-// the first-, second-, and fifth-best servers for storing
+// then selected by idx. For example, srvList(3, slots{0, 1, 4})
+// returns the the first-, second-, and fifth-best servers for storing
 // bal.knownBlkid(3).
 func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepService) {
        for _, i := range order {
@@ -246,7 +288,7 @@ func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepSe
 func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
        mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
        for _, srv := range bal.srvList(knownBlockID, order) {
-               repls = append(repls, Replica{srv, mtime})
+               repls = append(repls, Replica{srv.mounts[0], mtime})
                mtime++
        }
        return
index d6dd0f3e88589eb01c9915bc1b7b26667cfe5185..958cdb596b61155c7138aeba05782b4eeffec7a5 100644 (file)
@@ -14,7 +14,7 @@ import (
 // Azure storage container, etc.) as reported in a keepstore index
 // response.
 type Replica struct {
-       *KeepService
+       *KeepMount
        Mtime int64
 }
 
@@ -73,16 +73,16 @@ func (bsm *BlockStateMap) Apply(f func(arvados.SizedDigest, *BlockState)) {
        }
 }
 
-// AddReplicas updates the map to indicate srv has a replica of each
-// block in idx.
-func (bsm *BlockStateMap) AddReplicas(srv *KeepService, idx []arvados.KeepServiceIndexEntry) {
+// AddReplicas updates the map to indicate that mnt has a replica of
+// each block in idx.
+func (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceIndexEntry) {
        bsm.mutex.Lock()
        defer bsm.mutex.Unlock()
 
        for _, ent := range idx {
                bsm.get(ent.SizedDigest).addReplica(Replica{
-                       KeepService: srv,
-                       Mtime:       ent.Mtime,
+                       KeepMount: mnt,
+                       Mtime:     ent.Mtime,
                })
        }
 }
index 41f22ab7d6c5c2c95ab7f492cbe86ea1ca3bb8dd..27d0af8ebd4da0e3f7e850401cccb2baa2940b13 100644 (file)
@@ -17,6 +17,7 @@ import (
 // KeepService represents a keepstore server that is being rebalanced.
 type KeepService struct {
        arvados.KeepService
+       mounts []*KeepMount
        *ChangeSet
 }
 
@@ -78,3 +79,28 @@ func (srv *KeepService) put(c *arvados.Client, path string, data interface{}) er
 
        return err
 }
+
+func (srv *KeepService) discoverMounts(c *arvados.Client) error {
+       mounts, err := srv.Mounts(c)
+       if err != nil {
+               return fmt.Errorf("%s: error retrieving mounts: %v", srv, err)
+       }
+       srv.mounts = nil
+       for _, m := range mounts {
+               srv.mounts = append(srv.mounts, &KeepMount{
+                       KeepMount:   m,
+                       KeepService: srv,
+               })
+       }
+       return nil
+}
+
+type KeepMount struct {
+       arvados.KeepMount
+       KeepService *KeepService
+}
+
+// String implements fmt.Stringer.
+func (mnt *KeepMount) String() string {
+       return fmt.Sprintf("%s (%s) on %s", mnt.UUID, mnt.DeviceID, mnt.KeepService)
+}
index f18d82c06b29b7948a90431be686001b1bd9e572..828a1f1b7a485b4353f32ace30de5c8cf9a40192 100644 (file)
@@ -452,7 +452,7 @@ func (v *AzureBlobVolume) Touch(loc string) error {
                return os.ErrNotExist
        }
 
-       metadata["touch"] = fmt.Sprintf("%d", time.Now())
+       metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
        return v.container.SetBlobMetadata(loc, metadata, nil)
 }
 
index 258604ce59c50176a4d4473fd7dbf87c8d66de9c..8b37b906eb7ee11f79813c9749c6de2d3af48623 100644 (file)
@@ -514,7 +514,7 @@ type PullRequest struct {
        Servers []string `json:"servers"`
 
        // Destination mount, or "" for "anywhere"
-       MountUUID string
+       MountUUID string `json:"mount_uuid"`
 }
 
 // PullHandler processes "PUT /pull" requests for the data manager.
@@ -553,7 +553,7 @@ type TrashRequest struct {
        BlockMtime int64  `json:"block_mtime"`
 
        // Target mount, or "" for "everywhere"
-       MountUUID string
+       MountUUID string `json:"mount_uuid"`
 }
 
 // TrashHandler processes /trash requests.
index 66a212456d51c78e09cc12dc41bdccb17bf953df..0f7b6e97351a1a6049cb1ef49db69dcb1f29a6d7 100644 (file)
@@ -46,11 +46,11 @@ func (s *MountsSuite) TestMounts(c *check.C) {
        resp := s.call("GET", "/mounts", "", nil)
        c.Check(resp.Code, check.Equals, http.StatusOK)
        var mntList []struct {
-               UUID           string
-               DeviceID       string
-               ReadOnly       bool
-               Replication    int
-               StorageClasses []string
+               UUID           string   `json:"uuid"`
+               DeviceID       string   `json:"device_id"`
+               ReadOnly       bool     `json:"read_only"`
+               Replication    int      `json:"replication"`
+               StorageClasses []string `json:"storage_classes"`
        }
        err := json.Unmarshal(resp.Body.Bytes(), &mntList)
        c.Assert(err, check.IsNil)
index 7a8297039b513d62b2f8a38ab81ab0f9bc89f187..7b5077c1a7f70dad794e14fc148ae1da7fc60d04 100644 (file)
@@ -127,7 +127,7 @@ func (s *PullWorkerTestSuite) TestSpecifyMountUUID(c *C) {
                        requestBody: []byte(`[{
                                "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
                                "servers":["server_1","server_2"],
-                               "mountuuid":"` + spec.sendUUID + `"}]`),
+                               "mount_uuid":"` + spec.sendUUID + `"}]`),
                })
                c.Assert(resp.Code, Equals, http.StatusOK)
                expectEqualWithin(c, time.Second, 0, func() interface{} {
index 1f8fba5d067c2a0731cb05eeebf81cc76bc315b7..6bce05bec033fbda6c759b6b4266bcbff0f3e051 100644 (file)
@@ -12,6 +12,8 @@ import (
        "math/big"
        "sync/atomic"
        "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
 )
 
 type BlockWriter interface {
@@ -287,12 +289,8 @@ type VolumeManager interface {
 
 // A VolumeMount is an attachment of a Volume to a VolumeManager.
 type VolumeMount struct {
-       UUID           string
-       DeviceID       string
-       ReadOnly       bool
-       Replication    int
-       StorageClasses []string
-       volume         Volume
+       arvados.KeepMount
+       volume Volume
 }
 
 // Generate a UUID the way API server would for a "KeepVolumeMount"
@@ -334,12 +332,14 @@ func MakeRRVolumeManager(volumes []Volume) *RRVolumeManager {
                        sc = []string{"default"}
                }
                mnt := &VolumeMount{
-                       UUID:           (*VolumeMount)(nil).generateUUID(),
-                       DeviceID:       v.DeviceID(),
-                       ReadOnly:       !v.Writable(),
-                       Replication:    v.Replication(),
-                       StorageClasses: sc,
-                       volume:         v,
+                       KeepMount: arvados.KeepMount{
+                               UUID:           (*VolumeMount)(nil).generateUUID(),
+                               DeviceID:       v.DeviceID(),
+                               ReadOnly:       !v.Writable(),
+                               Replication:    v.Replication(),
+                               StorageClasses: sc,
+                       },
+                       volume: v,
                }
                vm.iostats[v] = &ioStats{}
                vm.mounts = append(vm.mounts, mnt)
index 0360bfc5424913c2f85bafc52d2ceaf29bd41296..20849c917a92422b86d214b57e8115fdba4a7529 100644 (file)
@@ -153,7 +153,7 @@ class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
     def _send_request(self):
         queuelist = []
         if self.slurm_queue:
-            # cpus, memory, tempory disk space, reason, job name
+            # cpus, memory, tempory disk space, reason, job name, feature constraints
             squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j|%f"])
             for out in squeue_out.splitlines():
                 try:
@@ -163,7 +163,7 @@ class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
                     continue
                 if '-dz642-' not in jobname:
                     continue
-                if not re.search(r'ReqNodeNotAvail|Resources|Priority', reason):
+                if not re.search(r'BadConstraints|ReqNodeNotAvail|Resources|Priority', reason):
                     continue
 
                 for feature in features.split(','):
index 3429a1b65d0b13c500e3f109d017eaa6afcc60de..a5971502f65354b8786b676888970d2fecd27ce7 100755 (executable)
@@ -433,9 +433,9 @@ case "$subcmd" in
 
     log)
         if test -n "$1" ; then
-            exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM less --follow-name -R +GF "/etc/service/$1/log/main/current"
+            exec docker exec -ti -e LINES=$(tput lines) -e COLUMNS=$(tput cols) -e TERM=$TERM $ARVBOX_CONTAINER less --follow-name -R +GF "/etc/service/$1/log/main/current"
         else
-            exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
+            exec docker exec -ti $ARVBOX_CONTAINER tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
         fi
         ;;
 
index d90a2e2f8170d9d8d5960b69ba903822538a0ce0..5615884f75c25e6d9be859f6181d464d4bfefab2 100755 (executable)
@@ -22,6 +22,6 @@ else
 fi
 
 if test "$1" != "--only-deps" ; then
-    exec bundle exec passenger start --port 80 \
-         --user arvbox --runtime-dir=/var/lib/passenger
+    exec bundle exec passenger start --port=${services[workbench]} \
+         --user arvbox
 fi
index e5bd0147b87674bee40cc8a2e2546c95d0d65468..09d77e01d0f6a28548b32e44787a38a5b8a610ad 100755 (executable)
@@ -17,7 +17,8 @@ else
 fi
 
 run_bundler --without=development
-bundle exec passenger start --runtime-check-only --runtime-dir=/var/lib/passenger
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
 mkdir -p /usr/src/arvados/apps/workbench/tmp
 RAILS_GROUPS=assets bundle exec rake npm:install