Merge branch '15028-cwl-v1.1' refs #15028
authorPeter Amstutz <pamstutz@veritasgenetics.com>
Tue, 4 Jun 2019 17:20:24 +0000 (13:20 -0400)
committerPeter Amstutz <pamstutz@veritasgenetics.com>
Tue, 4 Jun 2019 17:20:24 +0000 (13:20 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz@veritasgenetics.com>

195 files changed:
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/application.js
apps/workbench/app/assets/stylesheets/application.css.scss
apps/workbench/app/controllers/actions_controller.rb
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/collections_controller.rb
apps/workbench/app/controllers/container_requests_controller.rb
apps/workbench/app/controllers/containers_controller.rb
apps/workbench/app/controllers/healthcheck_controller.rb
apps/workbench/app/controllers/jobs_controller.rb
apps/workbench/app/controllers/logs_controller.rb
apps/workbench/app/controllers/pipeline_instances_controller.rb
apps/workbench/app/controllers/pipeline_templates_controller.rb
apps/workbench/app/controllers/projects_controller.rb
apps/workbench/app/controllers/repositories_controller.rb
apps/workbench/app/controllers/search_controller.rb
apps/workbench/app/controllers/sessions_controller.rb
apps/workbench/app/controllers/status_controller.rb
apps/workbench/app/controllers/tests_controller.rb
apps/workbench/app/controllers/user_agreements_controller.rb
apps/workbench/app/controllers/users_controller.rb
apps/workbench/app/controllers/websocket_controller.rb
apps/workbench/app/controllers/work_units_controller.rb
apps/workbench/app/controllers/workflows_controller.rb
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/models/application_record.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_base.rb
apps/workbench/app/models/user.rb
apps/workbench/app/views/application/_content.html.erb
apps/workbench/app/views/jobs/show.html.erb
apps/workbench/app/views/pipeline_instances/show.html.erb
apps/workbench/bin/bundle [new file with mode: 0755]
apps/workbench/bin/rails [new file with mode: 0755]
apps/workbench/bin/rake [new file with mode: 0755]
apps/workbench/bin/setup [new file with mode: 0755]
apps/workbench/bin/update [new file with mode: 0755]
apps/workbench/config/application.default.yml
apps/workbench/config/application.rb
apps/workbench/config/cable.yml [new file with mode: 0644]
apps/workbench/config/environment.rb
apps/workbench/config/environments/production.rb.example
apps/workbench/config/environments/test.rb.example
apps/workbench/config/initializers/application_controller_renderer.rb [new file with mode: 0644]
apps/workbench/config/initializers/assets.rb [new file with mode: 0644]
apps/workbench/config/initializers/cookies_serializer.rb [new file with mode: 0644]
apps/workbench/config/initializers/filter_parameter_logging.rb [new file with mode: 0644]
apps/workbench/config/initializers/new_framework_defaults.rb [new file with mode: 0644]
apps/workbench/config/initializers/session_store.rb
apps/workbench/config/initializers/validate_wb2_url_config.rb
apps/workbench/config/puma.rb [new file with mode: 0644]
apps/workbench/config/secrets.yml [new file with mode: 0644]
apps/workbench/config/spring.rb [new file with mode: 0644]
apps/workbench/test/controllers/actions_controller_test.rb
apps/workbench/test/controllers/application_controller_test.rb
apps/workbench/test/controllers/collections_controller_test.rb
apps/workbench/test/controllers/container_requests_controller_test.rb
apps/workbench/test/controllers/containers_controller_test.rb
apps/workbench/test/controllers/disabled_api_test.rb
apps/workbench/test/controllers/jobs_controller_test.rb
apps/workbench/test/controllers/pipeline_instances_controller_test.rb
apps/workbench/test/controllers/pipeline_templates_controller_test.rb
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/controllers/repositories_controller_test.rb
apps/workbench/test/controllers/search_controller_test.rb
apps/workbench/test/controllers/trash_items_controller_test.rb
apps/workbench/test/controllers/user_agreements_controller_test.rb
apps/workbench/test/controllers/users_controller_test.rb
apps/workbench/test/controllers/work_units_controller_test.rb
apps/workbench/test/controllers/workflows_controller_test.rb
apps/workbench/test/functional/.gitkeep [new file with mode: 0644]
apps/workbench/test/helpers/share_object_helper.rb
apps/workbench/test/integration/application_layout_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/mailers/.gitkeep [new file with mode: 0644]
apps/workbench/test/models/.gitkeep [new file with mode: 0644]
apps/workbench/test/test_helper.rb
apps/workbench/test/unit/work_unit_test.rb
build/rails-package-scripts/arvados-api-server.sh
build/run-library.sh
build/run-tests.sh
cmd/arvados-server/cmd.go
doc/admin/logs-table-management.html.textile.liquid
doc/admin/merge-remote-account.html.textile.liquid
doc/admin/upgrading.html.textile.liquid
doc/install/install-controller.html.textile.liquid
doc/install/install-dispatch-cloud.html.textile.liquid
doc/sdk/python/arvados-fuse.html.textile.liquid
doc/sdk/python/sdk-python.html.textile.liquid
lib/cloud/azure/azure.go
lib/cloud/azure/azure_test.go
lib/cloud/ec2/ec2.go
lib/cloud/ec2/ec2_test.go
lib/cloud/interfaces.go
lib/config/cmd.go [new file with mode: 0644]
lib/config/cmd_test.go [new file with mode: 0644]
lib/config/config.default.yml
lib/config/deprecated.go [new file with mode: 0644]
lib/config/deprecated_test.go [new file with mode: 0644]
lib/config/generate.go [new file with mode: 0644]
lib/config/generated_config.go [new file with mode: 0644]
lib/config/load.go [new file with mode: 0644]
lib/config/load_test.go [new file with mode: 0644]
lib/config/uptodate.go [new file with mode: 0644]
lib/config/uptodate_test.go [new file with mode: 0644]
lib/controller/cmd.go
lib/controller/fed_collections.go
lib/controller/fed_generic.go
lib/controller/federation_test.go
lib/controller/handler.go
lib/controller/handler_test.go
lib/controller/semaphore.go [new file with mode: 0644]
lib/controller/server_test.go
lib/dispatchcloud/cmd.go
lib/dispatchcloud/dispatcher.go
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/driver.go
lib/dispatchcloud/test/stub_driver.go
lib/dispatchcloud/worker/pool.go
lib/dispatchcloud/worker/pool_test.go
lib/dispatchcloud/worker/verify.go
lib/dispatchcloud/worker/worker.go
lib/dispatchcloud/worker/worker_test.go
lib/service/cmd.go
lib/service/cmd_test.go
lib/service/error.go
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/tests/15241-writable-dir-job.json [new file with mode: 0644]
sdk/cwl/tests/15241-writable-dir.cwl [new file with mode: 0644]
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_submit.py
sdk/go/arvados/config.go
sdk/go/arvados/duration.go
sdk/go/arvados/duration_test.go [new file with mode: 0644]
sdk/go/arvados/fs_collection_test.go
sdk/go/arvados/fs_project_test.go
sdk/go/arvados/fs_site_test.go
sdk/go/arvados/postgresql.go
sdk/go/arvadostest/fixtures.go
sdk/go/arvadostest/stub.go
sdk/go/health/aggregator.go
sdk/go/health/aggregator_test.go
sdk/pam/arvados_pam/__init__.py
sdk/pam/setup.py
sdk/python/arvados/commands/federation_migrate.py [new file with mode: 0755]
sdk/python/bin/arv-federation-migrate [new file with mode: 0755]
sdk/python/setup.py
sdk/python/tests/run_test_server.py
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/assets/images/logo.png
services/api/app/assets/stylesheets/application.css
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/controllers/user_sessions_controller.rb
services/api/app/models/blob.rb
services/api/app/models/collection.rb
services/api/app/models/jsonb_type.rb
services/api/app/models/user.rb
services/api/app/views/layouts/application.html.erb
services/api/app/views/static/login_failure.html.erb
services/api/app/views/user_sessions/create.html.erb [new file with mode: 0644]
services/api/config/arvados_config.rb
services/api/db/migrate/20190322174136_add_file_info_to_collection.rb [changed mode: 0755->0644]
services/api/lib/audit_logs.rb
services/api/lib/tasks/delete_old_container_logs.rake
services/api/lib/tasks/delete_old_job_logs.rake
services/api/lib/trashable.rb
services/api/lib/update_priority.rb
services/api/script/populate-file-info-columns-in-collections.rb [new file with mode: 0755]
services/api/test/functional/arvados/v1/collections_controller_test.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/integration/collections_api_test.rb
services/api/test/unit/collection_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/update_priority_test.rb
services/crunch-run/copier_test.go
services/health/main.go
services/keep-web/cache_test.go
services/keep-web/cadaver_test.go
services/keep-web/handler_test.go
services/keep-web/server_test.go
services/keepstore/unix_volume.go
services/nodemanager/doc/ec2.example.cfg
services/nodemanager/setup.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/service/certificate/run
tools/arvbox/lib/arvbox/docker/service/controller/run
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service
tools/arvbox/lib/arvbox/docker/service/nginx/run
tools/arvbox/lib/arvbox/docker/service/workbench2/run-service
tools/keep-xref/keep-xref.py [new file with mode: 0755]
vendor/vendor.json

index 7150faa9e12767db52ae6d11416122038c8875cd..ce2a1377d72a3f9f6bbbc3cf92243921eff137cf 100644 (file)
@@ -4,10 +4,10 @@
 
 source 'https://rubygems.org'
 
-gem 'rails', '~> 4.2.0'
+gem 'rails', '~> 5.0.0'
 gem 'arvados', '>= 0.1.20150511150219'
 
-gem 'activerecord-nulldb-adapter'
+gem 'activerecord-nulldb-adapter', git: 'https://github.com/curoverse/nulldb'
 gem 'multi_json'
 gem 'oj'
 gem 'sass'
@@ -24,7 +24,7 @@ gem 'coffee-rails'
 # Gems used only for assets and not required
 # in production environments by default.
 group :assets do
-  gem 'sass-rails'
+  gem 'sassc-rails'
   gem 'uglifier', '~> 2.0'
 
   # See https://github.com/sstephenson/execjs#readme for more supported runtimes
@@ -48,6 +48,7 @@ group :test, :diagnostics, :performance do
 end
 
 group :test, :performance do
+  gem 'byebug'
   gem 'rails-perftest'
   gem 'ruby-prof'
   gem 'rvm-capistrano'
@@ -57,10 +58,11 @@ group :test, :performance do
   gem 'simplecov', '~> 0.7', require: false
   gem 'simplecov-rcov', require: false
   gem 'mocha', require: false
+  gem 'rails-controller-testing'
 end
 
 gem 'jquery-rails'
-gem 'bootstrap-sass', '~> 3.1.0'
+gem 'bootstrap-sass', '~> 3.4.1'
 gem 'bootstrap-x-editable-rails'
 gem 'bootstrap-tab-history-rails'
 
@@ -68,7 +70,12 @@ gem 'angularjs-rails', '~> 1.3.8'
 
 gem 'less'
 gem 'less-rails'
-gem 'wiselinks'
+
+# Wiselinks hasn't been updated for many years and it's using deprecated methods
+# Use our own Wiselinks fork until this PR is accepted:
+# https://github.com/igor-alexandrov/wiselinks/pull/116
+# gem 'wiselinks', git: 'https://github.com/curoverse/wiselinks.git', branch: 'rails-5.1-compatibility'
+
 gem 'sshkey'
 
 # To use ActiveModel has_secure_password
index cc45ca66f2cb9bb9f09efa4269f7533fe572bdb0..548da1dc049bf32a2abc886e2c451f2b5b1927ab 100644 (file)
@@ -1,6 +1,13 @@
+GIT
+  remote: https://github.com/curoverse/nulldb
+  revision: d8e0073b665acdd2537c5eb15178a60f02f4b413
+  specs:
+    activerecord-nulldb-adapter (0.3.9)
+      activerecord (>= 2.0.0)
+
 GIT
   remote: https://github.com/curoverse/themes_for_rails
-  revision: 61154877047d2346890bda0b7be5827cf51a6a76
+  revision: ddf6e592b3b6493ea0c2de7b5d3faa120ed35be0
   specs:
     themes_for_rails (0.5.1)
       rails (>= 3.0.0)
@@ -9,66 +16,81 @@ GEM
   remote: https://rubygems.org/
   specs:
     RedCloth (4.3.2)
-    actionmailer (4.2.11)
-      actionpack (= 4.2.11)
-      actionview (= 4.2.11)
-      activejob (= 4.2.11)
+    actioncable (5.0.7.2)
+      actionpack (= 5.0.7.2)
+      nio4r (>= 1.2, < 3.0)
+      websocket-driver (~> 0.6.1)
+    actionmailer (5.0.7.2)
+      actionpack (= 5.0.7.2)
+      actionview (= 5.0.7.2)
+      activejob (= 5.0.7.2)
       mail (~> 2.5, >= 2.5.4)
-      rails-dom-testing (~> 1.0, >= 1.0.5)
-    actionpack (4.2.11)
-      actionview (= 4.2.11)
-      activesupport (= 4.2.11)
-      rack (~> 1.6)
-      rack-test (~> 0.6.2)
-      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-dom-testing (~> 2.0)
+    actionpack (5.0.7.2)
+      actionview (= 5.0.7.2)
+      activesupport (= 5.0.7.2)
+      rack (~> 2.0)
+      rack-test (~> 0.6.3)
+      rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.2)
-    actionview (4.2.11)
-      activesupport (= 4.2.11)
+    actionview (5.0.7.2)
+      activesupport (= 5.0.7.2)
       builder (~> 3.1)
       erubis (~> 2.7.0)
-      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.3)
-    activejob (4.2.11)
-      activesupport (= 4.2.11)
-      globalid (>= 0.3.0)
-    activemodel (4.2.11)
-      activesupport (= 4.2.11)
-      builder (~> 3.1)
-    activerecord (4.2.11)
-      activemodel (= 4.2.11)
-      activesupport (= 4.2.11)
-      arel (~> 6.0)
-    activerecord-nulldb-adapter (0.3.8)
-      activerecord (>= 2.0.0)
-    activesupport (4.2.11)
-      i18n (~> 0.7)
+    activejob (5.0.7.2)
+      activesupport (= 5.0.7.2)
+      globalid (>= 0.3.6)
+    activemodel (5.0.7.2)
+      activesupport (= 5.0.7.2)
+    activerecord (5.0.7.2)
+      activemodel (= 5.0.7.2)
+      activesupport (= 5.0.7.2)
+      arel (~> 7.0)
+    activesupport (5.0.7.2)
+      concurrent-ruby (~> 1.0, >= 1.0.2)
+      i18n (>= 0.7, < 2)
       minitest (~> 5.1)
-      thread_safe (~> 0.3, >= 0.3.4)
       tzinfo (~> 1.1)
-    addressable (2.5.2)
+    addressable (2.6.0)
       public_suffix (>= 2.0.2, < 4.0)
     andand (1.3.3)
     angularjs-rails (1.3.15)
-    arel (6.0.4)
-    arvados (0.1.20180302192246)
+    arel (7.1.4)
+    arvados (1.3.1.20190320201707)
       activesupport (>= 3)
       andand (~> 1.3, >= 1.3.3)
-      google-api-client (>= 0.7, < 0.8.9)
+      arvados-google-api-client (>= 0.7, < 0.8.9)
       i18n (~> 0)
       json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
+    arvados-google-api-client (0.8.7.2)
+      activesupport (>= 3.2, < 5.1)
+      addressable (~> 2.3)
+      autoparse (~> 0.3)
+      extlib (~> 0.9)
+      faraday (~> 0.9)
+      googleauth (~> 0.3)
+      launchy (~> 2.4)
+      multi_json (~> 1.10)
+      retriable (~> 1.4)
+      signet (~> 0.6)
     autoparse (0.3.3)
       addressable (>= 2.3.1)
       extlib (>= 0.9.15)
       multi_json (>= 1.0.0)
-    bootstrap-sass (3.1.1.1)
-      sass (~> 3.2)
+    autoprefixer-rails (9.5.1.1)
+      execjs
+    bootstrap-sass (3.4.1)
+      autoprefixer-rails (>= 5.2.1)
+      sassc (>= 2.0.0)
     bootstrap-tab-history-rails (0.1.0)
       railties (>= 3.1)
     bootstrap-x-editable-rails (1.5.1.1)
       railties (>= 3.0)
     builder (3.2.3)
-    byebug (10.0.0)
+    byebug (11.0.1)
     capistrano (2.15.9)
       highline
       net-scp (>= 1.0.0)
@@ -92,65 +114,47 @@ GEM
       execjs
     coffee-script-source (1.12.2)
     commonjs (0.2.7)
-    concurrent-ruby (1.1.4)
+    concurrent-ruby (1.1.5)
     crass (1.0.4)
     deep_merge (1.2.1)
-    docile (1.1.5)
+    docile (1.3.1)
     erubis (2.7.0)
     execjs (2.7.0)
     extlib (0.9.16)
-    faraday (0.14.0)
+    faraday (0.15.4)
       multipart-post (>= 1.2, < 3)
-    ffi (1.9.25)
+    ffi (1.10.0)
     flamegraph (0.9.5)
-    globalid (0.4.1)
+    globalid (0.4.2)
       activesupport (>= 4.2.0)
-    google-api-client (0.8.7)
-      activesupport (>= 3.2, < 5.0)
-      addressable (~> 2.3)
-      autoparse (~> 0.3)
-      extlib (~> 0.9)
-      faraday (~> 0.9)
-      googleauth (~> 0.3)
-      launchy (~> 2.4)
-      multi_json (~> 1.10)
-      retriable (~> 1.4)
-      signet (~> 0.6)
-    googleauth (0.6.2)
+    googleauth (0.8.1)
       faraday (~> 0.12)
       jwt (>= 1.4, < 3.0)
-      logging (~> 2.0)
-      memoist (~> 0.12)
+      memoist (~> 0.16)
       multi_json (~> 1.11)
-      os (~> 0.9)
+      os (>= 0.9, < 2.0)
       signet (~> 0.7)
-    grease (0.3.1)
     headless (1.0.2)
-    highline (1.7.10)
+    highline (2.0.2)
     httpclient (2.8.3)
     i18n (0.9.5)
       concurrent-ruby (~> 1.0)
-    jquery-rails (3.1.4)
-      railties (>= 3.0, < 5.0)
+    jquery-rails (4.3.3)
+      rails-dom-testing (>= 1, < 3)
+      railties (>= 4.2.0)
       thor (>= 0.14, < 2.0)
-    json (2.1.0)
+    json (2.2.0)
     jwt (1.5.6)
     launchy (2.4.3)
       addressable (~> 2.3)
     less (2.6.0)
       commonjs (~> 0.2.7)
-    less-rails (3.0.0)
-      actionpack (>= 4.0)
-      grease
+    less-rails (4.0.0)
+      actionpack (>= 4)
       less (~> 2.6.0)
-      sprockets (> 2, < 4)
-      tilt
+      sprockets (>= 2)
     libv8 (3.16.14.19)
-    little-plugger (1.1.4)
-    logging (2.2.2)
-      little-plugger (~> 1.1)
-      multi_json (~> 1.10)
-    lograge (0.9.0)
+    lograge (0.10.0)
       actionpack (>= 4)
       activesupport (>= 4)
       railties (>= 4)
@@ -163,32 +167,34 @@ GEM
       mini_mime (>= 0.1.1)
     memoist (0.16.0)
     metaclass (0.0.4)
-    mime-types (3.1)
+    method_source (0.9.2)
+    mime-types (3.2.2)
       mime-types-data (~> 3.2015)
-    mime-types-data (3.2016.0521)
+    mime-types-data (3.2019.0331)
     mini_mime (1.0.1)
     mini_portile2 (2.4.0)
     minitest (5.10.3)
-    mocha (1.3.0)
+    mocha (1.8.0)
       metaclass (~> 0.0.1)
     morrisjs-rails (0.5.1.2)
       railties (> 3.1, < 6)
     multi_json (1.13.1)
     multipart-post (2.0.0)
-    net-scp (1.2.1)
-      net-ssh (>= 2.6.5)
+    net-scp (2.0.0)
+      net-ssh (>= 2.6.5, < 6.0.0)
     net-sftp (2.1.2)
       net-ssh (>= 2.6.5)
-    net-ssh (4.2.0)
+    net-ssh (5.2.0)
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
-    nokogiri (1.9.1)
+    nio4r (2.3.1)
+    nokogiri (1.10.2)
       mini_portile2 (~> 2.4.0)
     npm-rails (0.2.1)
       rails (>= 3.2)
-    oj (3.6.4)
-    os (0.9.6)
-    passenger (5.2.1)
+    oj (3.7.11)
+    os (1.0.0)
+    passenger (6.0.2)
       rack
       rake (>= 0.8.1)
     piwik_analytics (1.0.2)
@@ -200,76 +206,82 @@ GEM
       cliver (~> 0.3.1)
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
-    public_suffix (3.0.2)
-    rack (1.6.11)
-    rack-mini-profiler (0.10.7)
+    public_suffix (3.0.3)
+    rack (2.0.7)
+    rack-mini-profiler (1.0.2)
       rack (>= 1.2.0)
     rack-test (0.6.3)
       rack (>= 1.0)
-    rails (4.2.11)
-      actionmailer (= 4.2.11)
-      actionpack (= 4.2.11)
-      actionview (= 4.2.11)
-      activejob (= 4.2.11)
-      activemodel (= 4.2.11)
-      activerecord (= 4.2.11)
-      activesupport (= 4.2.11)
-      bundler (>= 1.3.0, < 2.0)
-      railties (= 4.2.11)
-      sprockets-rails
-    rails-deprecated_sanitizer (1.0.3)
-      activesupport (>= 4.2.0.alpha)
-    rails-dom-testing (1.0.9)
-      activesupport (>= 4.2.0, < 5.0)
-      nokogiri (~> 1.6)
-      rails-deprecated_sanitizer (>= 1.0.1)
+    rails (5.0.7.2)
+      actioncable (= 5.0.7.2)
+      actionmailer (= 5.0.7.2)
+      actionpack (= 5.0.7.2)
+      actionview (= 5.0.7.2)
+      activejob (= 5.0.7.2)
+      activemodel (= 5.0.7.2)
+      activerecord (= 5.0.7.2)
+      activesupport (= 5.0.7.2)
+      bundler (>= 1.3.0)
+      railties (= 5.0.7.2)
+      sprockets-rails (>= 2.0.0)
+    rails-controller-testing (1.0.4)
+      actionpack (>= 5.0.1.x)
+      actionview (>= 5.0.1.x)
+      activesupport (>= 5.0.1.x)
+    rails-dom-testing (2.0.3)
+      activesupport (>= 4.2.0)
+      nokogiri (>= 1.6)
     rails-html-sanitizer (1.0.4)
       loofah (~> 2.2, >= 2.2.2)
     rails-perftest (0.0.7)
-    railties (4.2.11)
-      actionpack (= 4.2.11)
-      activesupport (= 4.2.11)
+    railties (5.0.7.2)
+      actionpack (= 5.0.7.2)
+      activesupport (= 5.0.7.2)
+      method_source
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
     rake (12.3.2)
     raphael-rails (2.1.2)
     rb-fsevent (0.10.3)
-    rb-inotify (0.9.10)
-      ffi (>= 0.5.0, < 2)
+    rb-inotify (0.10.0)
+      ffi (~> 1.0)
     ref (2.0.0)
-    request_store (1.4.0)
+    request_store (1.4.1)
       rack (>= 1.4)
-    responders (2.4.0)
-      actionpack (>= 4.2.0, < 5.3)
-      railties (>= 4.2.0, < 5.3)
+    responders (2.4.1)
+      actionpack (>= 4.2.0, < 6.0)
+      railties (>= 4.2.0, < 6.0)
     retriable (1.4.1)
     ruby-debug-passenger (0.2.0)
     ruby-prof (0.17.0)
     rubyzip (1.2.2)
     rvm-capistrano (1.5.6)
       capistrano (~> 2.15.4)
-    safe_yaml (1.0.4)
-    sass (3.5.5)
+    safe_yaml (1.0.5)
+    sass (3.7.4)
       sass-listen (~> 4.0.0)
     sass-listen (4.0.0)
       rb-fsevent (~> 0.9, >= 0.9.4)
       rb-inotify (~> 0.9, >= 0.9.7)
-    sass-rails (5.0.7)
-      railties (>= 4.0.0, < 6)
-      sass (~> 3.1)
-      sprockets (>= 2.8, < 4.0)
-      sprockets-rails (>= 2.0, < 4.0)
-      tilt (>= 1.1, < 3)
-    selenium-webdriver (3.14.1)
+    sassc (2.0.1)
+      ffi (~> 1.9)
+      rake
+    sassc-rails (2.1.0)
+      railties (>= 4.0.0)
+      sassc (>= 2.0)
+      sprockets (> 3.0)
+      sprockets-rails
+      tilt
+    selenium-webdriver (3.141.0)
       childprocess (~> 0.5)
       rubyzip (~> 1.2, >= 1.2.2)
-    signet (0.8.1)
+    signet (0.11.0)
       addressable (~> 2.3)
       faraday (~> 0.9)
       jwt (>= 1.5, < 3.0)
       multi_json (~> 1.10)
-    simplecov (0.15.1)
-      docile (~> 1.1.0)
+    simplecov (0.16.1)
+      docile (~> 1.1)
       json (>= 1.8, < 3)
       simplecov-html (~> 0.10.0)
     simplecov-html (0.10.2)
@@ -282,22 +294,21 @@ GEM
       actionpack (>= 4.0)
       activesupport (>= 4.0)
       sprockets (>= 3.0.0)
-    sshkey (1.9.0)
+    sshkey (2.0.0)
     therubyracer (0.12.3)
       libv8 (~> 3.16.14.15)
       ref
     thor (0.20.3)
     thread_safe (0.3.6)
-    tilt (2.0.8)
+    tilt (2.0.9)
     tzinfo (1.2.5)
       thread_safe (~> 0.1)
     uglifier (2.7.2)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
-    websocket-driver (0.7.0)
+    websocket-driver (0.6.5)
       websocket-extensions (>= 0.1.0)
     websocket-extensions (0.1.3)
-    wiselinks (1.2.1)
     xpath (2.1.0)
       nokogiri (~> 1.3)
 
@@ -306,11 +317,11 @@ PLATFORMS
 
 DEPENDENCIES
   RedCloth
-  activerecord-nulldb-adapter
+  activerecord-nulldb-adapter!
   andand
   angularjs-rails (~> 1.3.8)
   arvados (>= 0.1.20150511150219)
-  bootstrap-sass (~> 3.1.0)
+  bootstrap-sass (~> 3.4.1)
   bootstrap-tab-history-rails
   bootstrap-x-editable-rails
   byebug
@@ -336,7 +347,8 @@ DEPENDENCIES
   piwik_analytics
   poltergeist (~> 1.5.1)
   rack-mini-profiler
-  rails (~> 4.2.0)
+  rails (~> 5.0.0)
+  rails-controller-testing
   rails-perftest
   raphael-rails
   responders (~> 2.0)
@@ -345,7 +357,7 @@ DEPENDENCIES
   rvm-capistrano
   safe_yaml
   sass
-  sass-rails
+  sassc-rails
   selenium-webdriver (~> 3)
   simplecov (~> 0.7)
   simplecov-rcov
@@ -353,7 +365,6 @@ DEPENDENCIES
   themes_for_rails!
   therubyracer
   uglifier (~> 2.0)
-  wiselinks
 
 BUNDLED WITH
-   1.17.2
+   1.17.3
index 270a4c766d3152f3edd487561cc40ae4e2bdb256..1898128133535014f795bafaee942e10c6ff058d 100644 (file)
@@ -26,7 +26,6 @@
 //= require bootstrap/button
 //= require bootstrap3-editable/bootstrap-editable
 //= require bootstrap-tab-history
-//= require wiselinks
 //= require angular
 //= require raphael
 //= require morris
index 8822d5c0a07c606370f6c056d8c6b429f1d9f39b..1f21c397293927ca1eb4e0fcf6dde2fbedc78cd5 100644 (file)
  * compiled file, but it's generally better to create a new file per style scope.
  *
  *= require_self
- *= require bootstrap
  *= require bootstrap3-editable/bootstrap-editable
  *= require morris
  *= require awesomplete
  *= require_tree .
  */
 
+@import "bootstrap-sprockets";
+@import "bootstrap";
+
 .contain-align-left {
     text-align: left;
 }
index beeae0760c21dfe2558483176000347c25167ad9..b1bbb122670dcb6b11aac21d915f26174851aed9 100644 (file)
@@ -3,19 +3,20 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require "arvados/collection"
+require "app_version"
 
 class ActionsController < ApplicationController
 
   # Skip require_thread_api_token if this is a show action
   # for an object uuid that supports anonymous access.
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name and
     params['uuid'] and
     model_class.in?([Collection, Group, Job, PipelineInstance, PipelineTemplate])
   }
-  skip_filter :require_thread_api_token, only: [:report_issue_popup, :report_issue]
-  skip_filter :check_user_agreements, only: [:report_issue_popup, :report_issue]
+  skip_around_action :require_thread_api_token, only: [:report_issue_popup, :report_issue]
+  skip_before_action :check_user_agreements, only: [:report_issue_popup, :report_issue]
 
   @@exposed_actions = {}
   def self.expose_action method, &block
@@ -203,7 +204,7 @@ You can try recreating the collection to get a copy with full provenance data."
 
     respond_to do |format|
       IssueReporter.send_report(current_user, params).deliver
-      format.js {render nothing: true}
+      format.js {render body: nil}
     end
   end
 
index 8d9e8578c529a618678f9a8136d8472ae81021ff..21e9b49fd800fdce05d34e3358eafc9692111e12 100644 (file)
@@ -11,19 +11,19 @@ class ApplicationController < ActionController::Base
 
   ERROR_ACTIONS = [:render_error, :render_not_found]
 
-  around_filter :thread_clear
-  around_filter :set_current_request_id
-  around_filter :set_thread_api_token
+  around_action :thread_clear
+  around_action :set_current_request_id
+  around_action :set_thread_api_token
   # Methods that don't require login should
-  #   skip_around_filter :require_thread_api_token
-  around_filter :require_thread_api_token, except: ERROR_ACTIONS
-  before_filter :ensure_arvados_api_exists, only: [:index, :show]
-  before_filter :set_cache_buster
-  before_filter :accept_uuid_as_id_param, except: ERROR_ACTIONS
-  before_filter :check_user_agreements, except: ERROR_ACTIONS
-  before_filter :check_user_profile, except: ERROR_ACTIONS
-  before_filter :load_filters_and_paging_params, except: ERROR_ACTIONS
-  before_filter :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
+  #   skip_around_action :require_thread_api_token
+  around_action :require_thread_api_token, except: ERROR_ACTIONS
+  before_action :ensure_arvados_api_exists, only: [:index, :show]
+  before_action :set_cache_buster
+  before_action :accept_uuid_as_id_param, except: ERROR_ACTIONS
+  before_action :check_user_agreements, except: ERROR_ACTIONS
+  before_action :check_user_profile, except: ERROR_ACTIONS
+  before_action :load_filters_and_paging_params, except: ERROR_ACTIONS
+  before_action :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
   theme :select_theme
 
   begin
@@ -353,6 +353,9 @@ class ApplicationController < ActionController::Base
 
   def update
     @updates ||= params[@object.resource_param_name.to_sym]
+    if @updates.is_a? ActionController::Parameters
+      @updates = @updates.to_unsafe_hash
+    end
     @updates.keys.each do |attr|
       if @object.send(attr).is_a? Hash
         if @updates[attr].is_a? String
@@ -361,6 +364,9 @@ class ApplicationController < ActionController::Base
         if params[:merge] || params["merge_#{attr}".to_sym]
           # Merge provided Hash with current Hash, instead of
           # replacing.
+          if @updates[attr].is_a? ActionController::Parameters
+            @updates[attr] = @updates[attr].to_unsafe_hash
+          end
           @updates[attr] = @object.send(attr).with_indifferent_access.
             deep_merge(@updates[attr].with_indifferent_access)
         end
index 0a7f22b95789edc163198fbf32ab55045317f298..8d7e6ee332af5e3cf53dac674e6185626f43d413 100644 (file)
@@ -9,17 +9,17 @@ require "uri"
 class CollectionsController < ApplicationController
   include ActionController::Live
 
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
-  skip_around_filter(:require_thread_api_token,
+  skip_around_action(:require_thread_api_token,
                      only: [:show_file, :show_file_links])
-  skip_before_filter(:find_object_by_uuid,
+  skip_before_action(:find_object_by_uuid,
                      only: [:provenance, :show_file, :show_file_links])
   # We depend on show_file to display the user agreement:
-  skip_before_filter :check_user_agreements, only: :show_file
-  skip_before_filter :check_user_profile, only: :show_file
+  skip_before_action :check_user_agreements, only: :show_file
+  skip_before_action :check_user_profile, only: :show_file
 
   RELATION_LIMIT = 5
 
@@ -265,7 +265,7 @@ class CollectionsController < ApplicationController
   end
 
   def update
-    updated_attr = params[:collection].each.select {|a| a[0].andand.start_with? 'rename-file-path:'}
+    updated_attr = params[:collection].to_unsafe_hash.each.select {|a| a[0].andand.start_with? 'rename-file-path:'}
 
     if updated_attr.size > 0
       # Is it file rename?
index 454be448d9d1e7afad061ac983cd38780abd1365..d5627076f5e1d0b0dee23531fbe884dacb16aa64 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class ContainerRequestsController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
index f0e31644f1ba0b5ba8db27ef007be63382c16455..a8549cd5b82e8b9e82420d7f7478b2c3a9660247 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class ContainersController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
index 60043d9024c223558cabc9cfc51a1d2522e6e1f4..7afe4032a2c19f726465e8aae7b1f6cdf55dafc0 100644 (file)
@@ -3,17 +3,17 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class HealthcheckController < ApplicationController
-  skip_around_filter :thread_clear
-  skip_around_filter :set_thread_api_token
-  skip_around_filter :require_thread_api_token
-  skip_before_filter :ensure_arvados_api_exists
-  skip_before_filter :accept_uuid_as_id_param
-  skip_before_filter :check_user_agreements
-  skip_before_filter :check_user_profile
-  skip_before_filter :load_filters_and_paging_params
-  skip_before_filter :find_object_by_uuid
+  skip_around_action :thread_clear
+  skip_around_action :set_thread_api_token
+  skip_around_action :require_thread_api_token
+  skip_before_action :ensure_arvados_api_exists
+  skip_before_action :accept_uuid_as_id_param
+  skip_before_action :check_user_agreements
+  skip_before_action :check_user_profile
+  skip_before_action :load_filters_and_paging_params
+  skip_before_action :find_object_by_uuid
 
-  before_filter :check_auth_header
+  before_action :check_auth_header
 
   def check_auth_header
     mgmt_token = Rails.configuration.ManagementToken
index 204dbb76dfa545a19a3c3124b91acd58b6b77607..e38d3ba87b3e40e6df08e4d6150a2a3c392220a7 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class JobsController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
index 512f0a32e710e9037c167c05676733d5aa373cd3..7e413284bbc3ebb421434fc7e0748d0c897a7593 100644 (file)
@@ -3,5 +3,5 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class LogsController < ApplicationController
-  before_filter :ensure_current_user_is_admin
+  before_action :ensure_current_user_is_admin
 end
index 93bb86961386c1e9a299ec93057b2c046b1080bc..26a9f85d4e4e890b85ab722151737afe12d2dd12 100644 (file)
@@ -3,9 +3,9 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class PipelineInstancesController < ApplicationController
-  skip_before_filter :find_object_by_uuid, only: :compare
-  before_filter :find_objects_by_uuid, only: :compare
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_before_action :find_object_by_uuid, only: :compare
+  before_action :find_objects_by_uuid, only: :compare
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
@@ -67,7 +67,7 @@ class PipelineInstancesController < ApplicationController
   end
 
   def update
-    @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+    @updates ||= params.to_unsafe_hash[@object.class.to_s.underscore.singularize.to_sym]
     if (components = @updates[:components])
       components.each do |cname, component|
         if component[:script_parameters]
index 7d94e3469a10792f148a32e5c3a84e67c564dbc4..c497c70d434c4b67edb1dab42c2558bdd8242b16 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class PipelineTemplatesController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
index 4a7563a959d88cf70157c0b686c734ad7a66c6ff..cc657cbad92406d9887eb5f0a2173415aa8ff51e 100644 (file)
@@ -3,8 +3,8 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class ProjectsController < ApplicationController
-  before_filter :set_share_links, if: -> { defined? @object and @object}
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  before_action :set_share_links, if: -> { defined? @object and @object}
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     %w(show tab_counts public).include? ctrl.action_name
   }
index 5ca6f22b02f54bf2ea3b213b7106717cd10c845b..6ef541ebab23a23159d834ef117692812daa5f9e 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class RepositoriesController < ApplicationController
-  before_filter :set_share_links, if: -> { defined? @object }
+  before_action :set_share_links, if: -> { defined? @object }
 
   def index_pane_list
     %w(repositories help)
index 3775abd1ae9f1117926d7bde8c847fc32ad0cd60..80f3ff117a583c3e6b96df76b08f6d75e5dc4cd9 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class SearchController < ApplicationController
-  skip_before_filter :ensure_arvados_api_exists
+  skip_before_action :ensure_arvados_api_exists
 
   def find_objects_for_index
     search_what = Group
index 48fbc6bd04a708326d2dfe0f01afd5ae5383c12f..bff0f9f1c96ed8267470c350e24f92d268f5baf5 100644 (file)
@@ -3,11 +3,11 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class SessionsController < ApplicationController
-  skip_around_filter :require_thread_api_token, :only => [:destroy, :logged_out]
-  skip_around_filter :set_thread_api_token, :only => [:destroy, :logged_out]
-  skip_before_filter :find_object_by_uuid
-  skip_before_filter :find_objects_for_index
-  skip_before_filter :ensure_arvados_api_exists
+  skip_around_action :require_thread_api_token, :only => [:destroy, :logged_out]
+  skip_around_action :set_thread_api_token, :only => [:destroy, :logged_out]
+  skip_before_action :find_object_by_uuid
+  skip_before_action :find_objects_for_index, raise: false
+  skip_before_action :ensure_arvados_api_exists
 
   def destroy
     session.clear
index 90b7be564d4d12c5d8c593ea0ec0040a16a1c9a4..0e45daa1df095d1ec7829f79886a206ea28dd9e6 100644 (file)
@@ -2,9 +2,11 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+require "app_version"
+
 class StatusController < ApplicationController
-  skip_around_filter :require_thread_api_token
-  skip_before_filter :find_object_by_uuid
+  skip_around_action :require_thread_api_token
+  skip_before_action :find_object_by_uuid
   def status
     # Allow non-credentialed cross-origin requests
     headers['Access-Control-Allow-Origin'] = '*'
index 5d2de4e5a974698455a91925c591c1391e5fccb4..73c1f4f34a6b2e8a760d5dad03bfc074e699b769 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class TestsController < ApplicationController
-  skip_before_filter :find_object_by_uuid
+  skip_before_action :find_object_by_uuid
   def mithril
   end
 end
index 2797c4c682fae377d92891a5072bea656148a900..bdfaa240335922e3b616bf4c10d791f34ded70ac 100644 (file)
@@ -3,9 +3,9 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class UserAgreementsController < ApplicationController
-  skip_before_filter :check_user_agreements
-  skip_before_filter :find_object_by_uuid
-  skip_before_filter :check_user_profile
+  skip_before_action :check_user_agreements
+  skip_before_action :find_object_by_uuid
+  skip_before_action :check_user_profile
 
   def index
     if unsigned_user_agreements.empty?
index c954944e0b4b8ad75ff75a805caf2927893a8c50..d934af796509e2c172dcda8d2941daf889acd716 100644 (file)
@@ -3,11 +3,11 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class UsersController < ApplicationController
-  skip_around_filter :require_thread_api_token, only: :welcome
-  skip_before_filter :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
-  skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
-  skip_before_filter :find_object_by_uuid, only: [:welcome, :activity, :storage]
-  before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
+  skip_around_action :require_thread_api_token, only: :welcome
+  skip_before_action :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
+  skip_before_action :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
+  skip_before_action :find_object_by_uuid, only: [:welcome, :activity, :storage]
+  before_action :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
 
   def show
     if params[:uuid] == current_user.uuid
index e6fa5afade6180d1c2d65757b7518a84e06dd6d3..35993dc20edfcbb3e0397979a790c68177aa7cea 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class WebsocketController < ApplicationController
-  skip_before_filter :find_objects_for_index
+  skip_before_action :find_objects_for_index, raise: false
 
   def index
   end
index d3ded867c198f5c265fafb7b49a89d50e1515fc9..0f0033ce4965663ef76a7f2e479d8e38d7642dfb 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class WorkUnitsController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show_child_component' == ctrl.action_name
   }
index 3b98413e23a2224ef8da829f3ba8c6255d1adced..b7f99e855e69dab591e633d9a07a54baa6f03282 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class WorkflowsController < ApplicationController
-  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+  skip_around_action :require_thread_api_token, if: proc { |ctrl|
     Rails.configuration.anonymous_user_token and
     'show' == ctrl.action_name
   }
index 4c4b5ff34df52c471fa2ceaf566e8f9a5b606d02..3f72d5a2aae7015f00f6a4526aed33d65c811455 100644 (file)
@@ -25,7 +25,7 @@ module ApplicationHelper
   end
 
   def human_readable_bytes_html(n)
-    return h(n) unless n.is_a? Fixnum
+    return h(n) unless n.is_a? Integer
     return "0 bytes" if (n == 0)
 
     orders = {
diff --git a/apps/workbench/app/models/application_record.rb b/apps/workbench/app/models/application_record.rb
new file mode 100644 (file)
index 0000000..759034d
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApplicationRecord < ActiveRecord::Base
+  self.abstract_class = true
+end
\ No newline at end of file
index d7a65bdcee182d61aa47fe56bd4f648811de012d..9e3ea46b10b6504ee16bdc261afa32a62757e65a 100644 (file)
@@ -2,11 +2,53 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-class ArvadosBase < ActiveRecord::Base
-  self.abstract_class = true
+class ArvadosBase
+  include ActiveModel::Validations
+  include ActiveModel::Conversion
+  include ActiveModel::Serialization
+  include ActiveModel::Dirty
+  include ActiveModel::AttributeAssignment
+  extend ActiveModel::Naming
+
+  Column = Struct.new("Column", :name)
+
   attr_accessor :attribute_sortkey
   attr_accessor :create_params
 
+  class Error < StandardError; end
+
+  module Type
+    class Hash < ActiveModel::Type::Value
+      def type
+        :hash
+      end
+
+      def default_value
+        {}
+      end
+
+      private
+      def cast_value(value)
+        (value.class == String) ? ::JSON.parse(value) : value
+      end
+    end
+
+    class Array < ActiveModel::Type::Value
+      def type
+        :array
+      end
+
+      def default_value
+        []
+      end
+
+      private
+      def cast_value(value)
+        (value.class == String) ? ::JSON.parse(value) : value
+      end
+    end
+  end
+
   def self.arvados_api_client
     ArvadosApiClient.new_or_current
   end
@@ -35,7 +77,7 @@ class ArvadosBase < ActiveRecord::Base
   end
 
   def initialize raw_params={}, create_params={}
-    super self.class.permit_attribute_params(raw_params)
+    self.class.permit_attribute_params(raw_params)
     @create_params = create_params
     @attribute_sortkey ||= {
       'id' => nil,
@@ -58,6 +100,10 @@ class ArvadosBase < ActiveRecord::Base
       'uuid' => '999',
     }
     @loaded_attributes = {}
+    attributes = self.class.columns.map { |c| [c.name.to_sym, nil] }.to_h.merge(raw_params)
+    attributes.symbolize_keys.each do |name, value|
+      send("#{name}=", value)
+    end
   end
 
   def self.columns
@@ -77,29 +123,70 @@ class ArvadosBase < ActiveRecord::Base
         else
           # Hash, Array
           @discovered_columns << column(k, coldef[:type], coldef[:type].constantize.new)
-          serialize k, coldef[:type].constantize
-        end
-        define_method k do
-          unless new_record? or @loaded_attributes.include? k.to_s
-            Rails.logger.debug "BUG: access non-loaded attribute #{k}"
-            # We should...
-            # raise ActiveModel::MissingAttributeError, "missing attribute: #{k}"
-          end
-          super()
         end
+        attr_reader k
         @attribute_info[k] = coldef
       end
     end
     @discovered_columns
   end
 
+  def new_record?
+    # dup method doesn't reset the uuid attr
+    @uuid.nil? || @new_record || false
+  end
+
+  def initialize_dup(other)
+    super
+    @new_record = true
+    @created_at = nil
+  end
+
   def self.column(name, sql_type = nil, default = nil, null = true)
-    if sql_type == 'datetime'
-      cast_type = "ActiveRecord::Type::DateTime".constantize.new
-    else
-      cast_type = ActiveRecord::Base.connection.lookup_cast_type(sql_type)
+    caster = case sql_type
+              when 'integer'
+                ActiveModel::Type::Integer
+              when 'string', 'text'
+                ActiveModel::Type::String
+              when 'float'
+                ActiveModel::Type::Float
+              when 'datetime'
+                ActiveModel::Type::DateTime
+              when 'boolean'
+                ActiveModel::Type::Boolean
+              when 'Hash'
+                ArvadosBase::Type::Hash
+              when 'Array'
+                ArvadosBase::Type::Array
+              when 'jsonb'
+                ArvadosBase::Type::Hash
+              else
+                raise ArvadosBase::Error.new("Type unknown: #{sql_type}")
+            end
+    define_method "#{name}=" do |val|
+      val = default if val.nil?
+      casted_value = caster.new.cast(val)
+      attribute_will_change!(name) if send(name) != casted_value
+      set_attribute_after_cast(name, casted_value)
     end
-    ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, cast_type, sql_type.to_s, null)
+    Column.new(name.to_s)
+  end
+
+  def set_attribute_after_cast(name, casted_value)
+    instance_variable_set("@#{name}", casted_value)
+  end
+
+  def [](attr_name)
+    begin
+      send(attr_name)
+    rescue
+      Rails.logger.debug "BUG: access non-loaded attribute #{attr_name}"
+      nil
+    end
+  end
+
+  def []=(attr_name, attr_val)
+    send("#{attr_name}=", attr_val)
   end
 
   def self.attribute_info
@@ -185,17 +272,36 @@ class ArvadosBase < ActiveRecord::Base
     # The following permit! is necessary even with
     # "ActionController::Parameters.permit_all_parameters = true",
     # because permit_all does not permit nested attributes.
-    ActionController::Parameters.new(raw_params).permit!
+    if !raw_params.is_a? ActionController::Parameters
+      raw_params = ActionController::Parameters.new(raw_params)
+    end
+    raw_params.permit!
   end
 
   def self.create raw_params={}, create_params={}
-    x = super(permit_attribute_params(raw_params))
-    x.create_params = create_params
+    x = new(permit_attribute_params(raw_params), create_params)
+    x.save
     x
   end
 
+  def self.create! raw_params={}, create_params={}
+    x = new(permit_attribute_params(raw_params), create_params)
+    x.save!
+    x
+  end
+
+  def self.table_name
+    self.name.underscore.pluralize.downcase
+  end
+
   def update_attributes raw_params={}
-    super(self.class.permit_attribute_params(raw_params))
+    assign_attributes(self.class.permit_attribute_params(raw_params))
+    save
+  end
+
+  def update_attributes! raw_params={}
+    assign_attributes(self.class.permit_attribute_params(raw_params))
+    save!
   end
 
   def save
@@ -219,7 +325,10 @@ class ArvadosBase < ActiveRecord::Base
       obdata.delete :uuid
       resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
     else
-      postdata.merge!(@create_params) if @create_params
+      if @create_params
+        @create_params = @create_params.to_unsafe_hash if @create_params.is_a? ActionController::Parameters
+        postdata.merge!(@create_params)
+      end
       resp = arvados_api_client.api(self.class, '', postdata)
     end
     return false if !resp[:etag] || !resp[:uuid]
@@ -245,6 +354,14 @@ class ArvadosBase < ActiveRecord::Base
     self.save or raise Exception.new("Save failed")
   end
 
+  def persisted?
+    (!new_record? && !destroyed?) ? true : false
+  end
+
+  def destroyed?
+    !(new_record? || etag || uuid)
+  end
+
   def destroy
     if etag || uuid
       postdata = { '_method' => 'DELETE' }
@@ -333,6 +450,11 @@ class ArvadosBase < ActiveRecord::Base
     forget_uuid!
   end
 
+  def attributes
+    kv = self.class.columns.collect {|c| c.name}.map {|key| [key, send(key)]}
+    kv.to_h
+  end
+
   def attributes_for_display
     self.attributes.reject { |k,v|
       attribute_sortkey.has_key?(k) and !attribute_sortkey[k]
index 865ff6e9519cacf613b248df446fd4a1e0b24636..34e8181515c887fbe9e09659d09ebee4ab40f24f 100644 (file)
@@ -109,7 +109,7 @@ class User < ArvadosBase
     false
   end
 
-   def self.creatable?
+  def self.creatable?
     current_user and current_user.is_admin
-   end
+  end
 end
index 7f3542083e91180f9c943e3a93c16bf416c2930b..c4656e659d51cd08b2d75f8ad1859ad5aa7e1ba7 100644 (file)
@@ -56,7 +56,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
            <% else %>
              data-object-uuid="<%= @object.uuid %>"
            <% end %>
-           data-pane-content-url="<%= url_for(params.merge(tab_pane: pane_name)) %>"
+           data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: pane_name)) %>"
            style="margin-top:0.5em;"
            >
         <div class="pane-content">
index 1bf8065c31c804148b49bcd90ed80bd1ce91c497..4ac7601c8e57d91a3e460b559d7c0db7e1274b5a 100644 (file)
@@ -4,7 +4,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
 
 <% content_for :tab_line_buttons do %>
   <div class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
-       data-pane-content-url="<%= url_for(params.merge(tab_pane: "job_buttons")) %>"
+       data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: "job_buttons")) %>"
        data-object-uuid="<%= @object.uuid %>"
        style="display: inline">
   <%= render partial: 'show_job_buttons', locals: {object: @object}%>
index 881d77102c656e1c7408d72c3d970e32dd5c89ad..e573bf52a23554fc35bb0ef143b6464375f0bbb7 100644 (file)
@@ -23,7 +23,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
 
   <div id="pipeline-instance-tab-buttons"
        class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
-       data-pane-content-url="<%= url_for(params.merge(tab_pane: "tab_buttons")) %>"
+       data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: "tab_buttons")) %>"
        data-object-uuid="<%= @object.uuid %>"
        >
     <%= render partial: 'show_tab_buttons', locals: {object: @object}%>
diff --git a/apps/workbench/bin/bundle b/apps/workbench/bin/bundle
new file mode 100755 (executable)
index 0000000..9447ba8
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+load Gem.bin_path('bundler', 'bundle')
diff --git a/apps/workbench/bin/rails b/apps/workbench/bin/rails
new file mode 100755 (executable)
index 0000000..4ab9539
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+APP_PATH = File.expand_path('../config/application', __dir__)
+require_relative '../config/boot'
+require 'rails/commands'
diff --git a/apps/workbench/bin/rake b/apps/workbench/bin/rake
new file mode 100755 (executable)
index 0000000..c69c1c4
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require_relative '../config/boot'
+require 'rake'
+Rake.application.run
diff --git a/apps/workbench/bin/setup b/apps/workbench/bin/setup
new file mode 100755 (executable)
index 0000000..50c3fa0
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'pathname'
+require 'fileutils'
+include FileUtils
+
+# path to your application root.
+APP_ROOT = Pathname.new File.expand_path('../../', __FILE__)
+
+def system!(*args)
+  system(*args) || abort("\n== Command #{args} failed ==")
+end
+
+chdir APP_ROOT do
+  # This script is a starting point to setup your application.
+  # Add necessary setup steps to this file.
+
+  puts '== Installing dependencies =='
+  system! 'gem install bundler --conservative'
+  system('bundle check') || system!('bundle install')
+
+  # puts "\n== Copying sample files =="
+  # unless File.exist?('config/database.yml')
+  #   cp 'config/database.yml.sample', 'config/database.yml'
+  # end
+
+  puts "\n== Preparing database =="
+  system! 'bin/rails db:setup'
+
+  puts "\n== Removing old logs and tempfiles =="
+  system! 'bin/rails log:clear tmp:clear'
+
+  puts "\n== Restarting application server =="
+  system! 'bin/rails restart'
+end
diff --git a/apps/workbench/bin/update b/apps/workbench/bin/update
new file mode 100755 (executable)
index 0000000..b56771e
--- /dev/null
@@ -0,0 +1,33 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'pathname'
+require 'fileutils'
+include FileUtils
+
+# path to your application root.
+APP_ROOT = Pathname.new File.expand_path('../../', __FILE__)
+
+def system!(*args)
+  system(*args) || abort("\n== Command #{args} failed ==")
+end
+
+chdir APP_ROOT do
+  # This script is a way to update your development environment automatically.
+  # Add necessary update steps to this file.
+
+  puts '== Installing dependencies =='
+  system! 'gem install bundler --conservative'
+  system('bundle check') || system!('bundle install')
+
+  puts "\n== Updating database =="
+  system! 'bin/rails db:migrate'
+
+  puts "\n== Removing old logs and tempfiles =="
+  system! 'bin/rails log:clear tmp:clear'
+
+  puts "\n== Restarting application server =="
+  system! 'bin/rails restart'
+end
index ccc7e4bbddaaf8c6396fe33c863b96f1bbf54235..d38742248b52fac025d72bf95f89c86e91c29380 100644 (file)
@@ -66,7 +66,6 @@ production:
   eager_load: true
   consider_all_requests_local: false
   action_controller.perform_caching: true
-  serve_static_files: false
   assets.compile: false
   assets.digest: true
   i18n.fallbacks: true
@@ -89,8 +88,6 @@ production:
 test:
   cache_classes: true
   eager_load: false
-  serve_static_files: true
-  static_cache_control: public, max-age=3600
   consider_all_requests_local: true
   action_controller.perform_caching: false
   action_dispatch.show_exceptions: false
@@ -218,7 +215,7 @@ common:
   # would be enabled in a collection's show page.
   # It is sufficient to list only applications here.
   # No need to list text and image types.
-  application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
+  application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, x-python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
 
   # the maximum number of bytes to load in the log viewer
   log_viewer_max_bytes: 1000000
index 891dd432c0dccfedf4f773cd41748c0222124e88..1c7a9d0dac8866511f795e1fea048d7f4989a300 100644 (file)
@@ -4,7 +4,18 @@
 
 require File.expand_path('../boot', __FILE__)
 
-require 'rails/all'
+require "rails"
+# Pick only the frameworks we need:
+require "active_model/railtie"
+require "active_job/railtie"
+require "active_record/railtie"
+require "action_controller/railtie"
+require "action_mailer/railtie"
+require "action_view/railtie"
+# Skip ActionCable (new in Rails 5.0) as it adds '/cable' routes that we're not using
+# require "action_cable/engine"
+require "sprockets/railtie"
+require "rails/test_unit/railtie"
 
 Bundler.require(:default, Rails.env)
 
@@ -15,8 +26,9 @@ module ArvadosWorkbench
     # -- all .rb files in that directory are automatically loaded.
 
     # Custom directories with classes and modules you want to be autoloadable.
+    # Autoload paths shouldn't be used anymore since Rails 5.0
+    # See #15258 and https://github.com/rails/rails/issues/13142#issuecomment-74586224
     # config.autoload_paths += %W(#{config.root}/extras)
-    config.autoload_paths += %W(#{config.root}/lib)
 
     # Only load the plugins named here, in the order given (default is alphabetical).
     # :all can be used as a placeholder for all plugins not explicitly named.
diff --git a/apps/workbench/config/cable.yml b/apps/workbench/config/cable.yml
new file mode 100644 (file)
index 0000000..c906069
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+development:
+  adapter: async
+
+test:
+  adapter: async
+
+production:
+  adapter: redis
+  url: redis://localhost:6379/1
index d6b6a00e8dd158918394fea1f1c2d99aa8805834..cd706940a389752fd6263bb32fc82a057fc3c583 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 # Load the rails application
-require File.expand_path('../application', __FILE__)
+require_relative 'application'
 
 # Initialize the rails application
-ArvadosWorkbench::Application.initialize!
+Rails.application.initialize!
index 8b656c5a7746519eaffe4fdf13a8a50a9145efd2..ea2cf34e3dbe5d17b438ce6ab4a88e376b3b8be9 100644 (file)
@@ -13,7 +13,7 @@ ArvadosWorkbench::Application.configure do
   config.action_controller.perform_caching = true
 
   # Disable Rails's static asset server (Apache or nginx will already do this)
-  config.serve_static_files = false
+  config.public_file_server.enabled = false
 
   # Compress JavaScripts and CSS
   config.assets.js_compressor = :uglifier
index 7ce5082701274c0564dd3b22a73375a656fa08a5..373618c1d66a081c82fdf340649f6f23d887d050 100644 (file)
@@ -12,8 +12,8 @@ ArvadosWorkbench::Application.configure do
   config.cache_classes = true
 
   # Configure static asset server for tests with Cache-Control for performance
-  config.serve_static_files = true
-  config.static_cache_control = "public, max-age=3600"
+  config.public_file_server.enabled = true
+  config.public_file_server.headers = { 'Cache-Control' => 'public, max-age=3600' }
 
   # Show full error reports and disable caching
   config.consider_all_requests_local       = true
diff --git a/apps/workbench/config/initializers/application_controller_renderer.rb b/apps/workbench/config/initializers/application_controller_renderer.rb
new file mode 100644 (file)
index 0000000..525d6ad
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# ActiveSupport::Reloader.to_prepare do
+#   ApplicationController.renderer.defaults.merge!(
+#     http_host: 'example.org',
+#     https: false
+#   )
+# end
diff --git a/apps/workbench/config/initializers/assets.rb b/apps/workbench/config/initializers/assets.rb
new file mode 100644 (file)
index 0000000..f02c87b
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Version of your assets, change this if you want to expire all your assets.
+Rails.application.config.assets.version = '1.0'
+
+# Add additional assets to the asset load path
+# Rails.application.config.assets.paths << Emoji.images_path
+
+# Precompile additional assets.
+# application.js, application.css, and all non-JS/CSS in app/assets folder are already added.
+# Rails.application.config.assets.precompile += %w( search.js )
diff --git a/apps/workbench/config/initializers/cookies_serializer.rb b/apps/workbench/config/initializers/cookies_serializer.rb
new file mode 100644 (file)
index 0000000..5409f55
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Specify a serializer for the signed and encrypted cookie jars.
+# Valid options are :json, :marshal, and :hybrid.
+Rails.application.config.action_dispatch.cookies_serializer = :marshal
diff --git a/apps/workbench/config/initializers/filter_parameter_logging.rb b/apps/workbench/config/initializers/filter_parameter_logging.rb
new file mode 100644 (file)
index 0000000..f26d0ad
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Configure sensitive parameters which will be filtered from the log file.
+Rails.application.config.filter_parameters += [:password]
diff --git a/apps/workbench/config/initializers/new_framework_defaults.rb b/apps/workbench/config/initializers/new_framework_defaults.rb
new file mode 100644 (file)
index 0000000..b8dca33
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+#
+# This file contains migration options to ease your Rails 5.0 upgrade.
+#
+# Once upgraded flip defaults one by one to migrate to the new default.
+#
+# Read the Guide for Upgrading Ruby on Rails for more info on each option.
+
+Rails.application.config.action_controller.raise_on_unfiltered_parameters = true
+
+# Enable per-form CSRF tokens. Previous versions had false.
+Rails.application.config.action_controller.per_form_csrf_tokens = false
+
+# Enable origin-checking CSRF mitigation. Previous versions had false.
+Rails.application.config.action_controller.forgery_protection_origin_check = false
+
+# Make Ruby 2.4 preserve the timezone of the receiver when calling `to_time`.
+# Previous versions had false.
+ActiveSupport.to_time_preserves_timezone = false
+
+# Require `belongs_to` associations by default. Previous versions had false.
+Rails.application.config.active_record.belongs_to_required_by_default = false
+
+# Do not halt callback chains when a callback returns false. Previous versions had true.
+ActiveSupport.halt_callback_chains_on_return_false = true
index b53e9ef9155cc28f0b902c985493447fc15ff570..7a2f297207691f37df4d4ac75543bfa0b969d00c 100644 (file)
@@ -4,7 +4,7 @@
 
 # Be sure to restart your server when you modify this file.
 
-ArvadosWorkbench::Application.config.session_store :cookie_store, key: '_arvados_workbench_session'
+Rails.application.config.session_store :cookie_store, key: '_arvados_workbench_session'
 
 # Use the database for sessions instead of the cookie-based default,
 # which shouldn't be used to store highly confidential information
index f9096486c1b273c0350ade315555942b5ef36949..0a8f07c4bed0c1bedf3e565ff63b49f4318f847f 100644 (file)
@@ -2,6 +2,8 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+require 'config_validators'
+
 include ConfigValidators
 
 ConfigValidators::validate_wb2_url_config()
\ No newline at end of file
diff --git a/apps/workbench/config/puma.rb b/apps/workbench/config/puma.rb
new file mode 100644 (file)
index 0000000..e087396
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Puma can serve each request in a thread from an internal thread pool.
+# The `threads` method setting takes two numbers a minimum and maximum.
+# Any libraries that use thread pools should be configured to match
+# the maximum value specified for Puma. Default is set to 5 threads for minimum
+# and maximum, this matches the default thread size of Active Record.
+#
+threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 }.to_i
+threads threads_count, threads_count
+
+# Specifies the `port` that Puma will listen on to receive requests, default is 3000.
+#
+port        ENV.fetch("PORT") { 3000 }
+
+# Specifies the `environment` that Puma will run in.
+#
+environment ENV.fetch("RAILS_ENV") { "development" }
+
+# Specifies the number of `workers` to boot in clustered mode.
+# Workers are forked webserver processes. If using threads and workers together
+# the concurrency of the application would be max `threads` * `workers`.
+# Workers do not work on JRuby or Windows (both of which do not support
+# processes).
+#
+# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
+
+# Use the `preload_app!` method when specifying a `workers` number.
+# This directive tells Puma to first boot the application and load code
+# before forking the application. This takes advantage of Copy On Write
+# process behavior so workers use less memory. If you use this option
+# you need to make sure to reconnect any threads in the `on_worker_boot`
+# block.
+#
+# preload_app!
+
+# The code in the `on_worker_boot` will be called if you are using
+# clustered mode by specifying a number of `workers`. After each worker
+# process is booted this block will be run, if you are using `preload_app!`
+# option you will want to use this block to reconnect to any threads
+# or connections that may have been created at application boot, Ruby
+# cannot share connections between processes.
+#
+# on_worker_boot do
+#   ActiveRecord::Base.establish_connection if defined?(ActiveRecord)
+# end
+
+# Allow puma to be restarted by `rails restart` command.
+plugin :tmp_restart
diff --git a/apps/workbench/config/secrets.yml b/apps/workbench/config/secrets.yml
new file mode 100644 (file)
index 0000000..bc8a0d0
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Your secret key is used for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+# You can use `rails secret` to generate a secure secret key.
+
+# Make sure the secrets in this file are kept private
+# if you're sharing your code publicly.
+
+development:
+  secret_key_base: 33e2d171ec6c67cf8e9a9fbfadc1071328bdab761297e2fe28b9db7613dd542c1ba3bdb3bd3e636d1d6f74ab73a2d90c4e9c0ecc14fde8ccd153045f94e9cc41
+
+test:
+  secret_key_base: d4c07cab3530fccf5d86565ecdc359eb2a853b8ede3b06edb2885e4423d7a726f50a3e415bb940fd4861e8fec16459665fd377acc8cdd98ea63294d2e0d12bb2
+
+# Do not keep production secrets in the repository,
+# instead read values from the environment.
+production:
+  secret_key_base: <%= ENV["SECRET_KEY_BASE"] %>
diff --git a/apps/workbench/config/spring.rb b/apps/workbench/config/spring.rb
new file mode 100644 (file)
index 0000000..101e684
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+%w(
+  .ruby-version
+  .rbenv-vars
+  tmp/restart.txt
+  tmp/caching-dev.txt
+).each { |path| Spring.watch(path) }
index e768c6c7426d4db4bd7cd151d33ab4ebd3241878..86aa304e472324f992b7e72b5b1e7d27580f38e3 100644 (file)
@@ -7,7 +7,7 @@ require 'test_helper'
 class ActionsControllerTest < ActionController::TestCase
 
   test "send report" do
-    post :report_issue, {format: 'js'}, session_for(:admin)
+    post :report_issue, params: {format: 'js'}, session: session_for(:admin)
     assert_response :success
 
     found_email = false
@@ -21,13 +21,13 @@ class ActionsControllerTest < ActionController::TestCase
   end
 
   test "combine files into new collection" do
-    post(:combine_selected_files_into_collection, {
+    post(:combine_selected_files_into_collection, params: {
            selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
                        'zzzzz-4zz18-ehbhgtheo8909or/bar',
                        'zzzzz-4zz18-y9vne9npefyxh8g/baz',
                        '7a6ef4c162a5c6413070a8bd0bffc818+150'],
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
 
     assert_response 302   # collection created and redirected to new collection page
 
@@ -46,7 +46,7 @@ class ActionsControllerTest < ActionController::TestCase
   end
 
   test "combine files  with repeated names into new collection" do
-    post(:combine_selected_files_into_collection, {
+    post(:combine_selected_files_into_collection, params: {
            selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
                        'zzzzz-4zz18-00000nonamecoll/foo',
                        'zzzzz-4zz18-abcd6fx123409f7/foo',
@@ -54,7 +54,7 @@ class ActionsControllerTest < ActionController::TestCase
                        'zzzzz-4zz18-y9vne9npefyxh8g/baz',
                        '7a6ef4c162a5c6413070a8bd0bffc818+150'],
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
 
     assert_response 302   # collection created and redirected to new collection page
 
@@ -74,13 +74,13 @@ class ActionsControllerTest < ActionController::TestCase
   end
 
   test "combine collections with repeated filenames in almost similar directories and expect files with proper suffixes" do
-    post(:combine_selected_files_into_collection, {
+    post(:combine_selected_files_into_collection, params: {
            selection: ['zzzzz-4zz18-duplicatenames1',
                        'zzzzz-4zz18-duplicatenames2',
                        'zzzzz-4zz18-znfnqtbbv4spc3w/foo',
                        'zzzzz-4zz18-00000nonamecoll/foo',],
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
 
     assert_response 302   # collection created and redirected to new collection page
 
@@ -116,11 +116,11 @@ class ActionsControllerTest < ActionController::TestCase
   end
 
   test "combine collections with same filename in two different streams and expect no suffixes for filenames" do
-    post(:combine_selected_files_into_collection, {
+    post(:combine_selected_files_into_collection, params: {
            selection: ['zzzzz-4zz18-znfnqtbbv4spc3w',
                        'zzzzz-4zz18-foonbarfilesdir'],
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
 
     assert_response 302   # collection created and redirected to new collection page
 
@@ -144,11 +144,11 @@ class ActionsControllerTest < ActionController::TestCase
   end
 
   test "combine foo files from two different collection streams and expect proper filename suffixes" do
-    post(:combine_selected_files_into_collection, {
+    post(:combine_selected_files_into_collection, params: {
            selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
                        'zzzzz-4zz18-foonbarfilesdir/dir1/foo'],
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
 
     assert_response 302   # collection created and redirected to new collection page
 
@@ -174,7 +174,7 @@ class ActionsControllerTest < ActionController::TestCase
   ].each do |dm, fixture|
     test "access show method for public #{dm} and expect to see page" do
       Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
-      get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+      get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
       assert_response :redirect
       if dm == 'groups'
         assert_includes @response.redirect_url, "projects/#{fixture['uuid']}"
@@ -194,7 +194,7 @@ class ActionsControllerTest < ActionController::TestCase
   ].each do |dm, fixture, expected|
     test "access show method for non-public #{dm} and expect #{expected}" do
       Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
-      get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+      get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
       assert_response expected
       if expected == 404
         assert_includes @response.inspect, 'Log in'
index 45952ceba3ef46fa14378c8e3351fc79d7031df0..1b13d8f328def28af9064afff27191a6052195fe 100644 (file)
@@ -334,7 +334,7 @@ class ApplicationControllerTest < ActionController::TestCase
     # We're really testing ApplicationController's find_object_by_uuid.
     # It's easiest to do that by instantiating a concrete controller.
     @controller = NodesController.new
-    get(:show, {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session_for(:admin))
+    get(:show, params: {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session: session_for(:admin))
     assert_response 404
   end
 
@@ -350,7 +350,7 @@ class ApplicationControllerTest < ActionController::TestCase
       api_fixture("api_client_authorizations", "anonymous", "api_token")
     @controller = ProjectsController.new
     test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
-    get(:show, {id: test_uuid})
+    get(:show, params: {id: test_uuid})
 
     assert_not_nil got_header
     assert_includes got_header, 'X-Request-Id'
@@ -359,13 +359,13 @@ class ApplicationControllerTest < ActionController::TestCase
 
   test "current request_id is nil after a request" do
     @controller = NodesController.new
-    get(:index, {}, session_for(:active))
+    get(:index, params: {}, session: session_for(:active))
     assert_nil Thread.current[:request_id]
   end
 
   test "X-Request-Id header" do
     @controller = NodesController.new
-    get(:index, {}, session_for(:active))
+    get(:index, params: {}, session: session_for(:active))
     assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']
   end
 
@@ -378,7 +378,7 @@ class ApplicationControllerTest < ActionController::TestCase
         api_fixture("api_client_authorizations", "anonymous", "api_token")
       @controller = ProjectsController.new
       test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
-      get(:show, {id: test_uuid})
+      get(:show, params: {id: test_uuid})
       login_link = css_select(css_selector).first
       assert_not_nil(login_link, "failed to select login link")
       login_href = URI.unescape(login_link.attributes["href"].value)
@@ -399,7 +399,7 @@ class ApplicationControllerTest < ActionController::TestCase
       # network.  100::/64 is the IPv6 discard prefix, so it's perfect.
       Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
       @controller = NodesController.new
-      get(:index, {}, session_for(:active))
+      get(:index, params: {}, session: session_for(:active))
       assert_includes(405..422, @response.code.to_i,
                       "bad response code when API server is unreachable")
     ensure
@@ -428,7 +428,7 @@ class ApplicationControllerTest < ActionController::TestCase
 
       @controller = controller
 
-      get(:show, {id: fixture['uuid']})
+      get(:show, params: {id: fixture['uuid']})
 
       if anon_config
         assert_response 200
@@ -452,7 +452,7 @@ class ApplicationControllerTest < ActionController::TestCase
       Rails.configuration.include_accept_encoding_header_in_api_requests = config
 
       @controller = CollectionsController.new
-      get(:show, {id: api_fixture('collections')['foo_file']['uuid']}, session_for(:admin))
+      get(:show, params: {id: api_fixture('collections')['foo_file']['uuid']}, session: session_for(:admin))
 
       assert_equal([['.', 'foo', 3]], assigns(:object).files)
     end
@@ -461,13 +461,13 @@ class ApplicationControllerTest < ActionController::TestCase
   test 'Edit name and verify that a duplicate is not created' do
     @controller = ProjectsController.new
     project = api_fixture("groups")["aproject"]
-    post :update, {
+    post :update, params: {
       id: project["uuid"],
       project: {
         name: 'test name'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_includes @response.body, 'test name'
     updated = assigns(:object)
     assert_equal updated.uuid, project["uuid"]
@@ -481,7 +481,7 @@ class ApplicationControllerTest < ActionController::TestCase
     test "access #{controller.controller_name} index as admin and verify Home link is#{' not' if !expect_home_link} shown" do
       @controller = controller
 
-      get :index, {}, session_for(:admin)
+      get :index, params: {}, session: session_for(:admin)
 
       assert_response 200
       assert_includes @response.body, expect_str
@@ -503,7 +503,7 @@ class ApplicationControllerTest < ActionController::TestCase
     test "access #{controller.controller_name} index as admin and verify Delete option is#{' not' if !expect_delete_link} shown" do
       @controller = controller
 
-      get :index, {}, session_for(:admin)
+      get :index, params: {}, session: session_for(:admin)
 
       assert_response 200
       assert_includes @response.body, expect_str
index 3ff02a82a2711e983ce507421b2e290317effca8..88287cd3f3d1cd0b5e65ed980b4c58b6ce4b13dd 100644 (file)
@@ -32,7 +32,11 @@ class CollectionsControllerTest < ActionController::TestCase
 
   def assert_hash_includes(actual_hash, expected_hash, msg=nil)
     expected_hash.each do |key, value|
-      assert_equal(value, actual_hash[key], msg)
+      if value.nil?
+        assert_nil(actual_hash[key], msg)
+      else
+        assert_equal(value, actual_hash[key], msg)
+      end
     end
   end
 
@@ -51,7 +55,7 @@ class CollectionsControllerTest < ActionController::TestCase
   def show_collection(params, session={}, response=:success)
     params = collection_params(params) if not params.is_a? Hash
     session = session_for(session) if not session.is_a? Hash
-    get(:show, params, session)
+    get(:show, params: params, session: session)
     assert_response response
   end
 
@@ -68,10 +72,10 @@ class CollectionsControllerTest < ActionController::TestCase
   test "download a file with spaces in filename" do
     setup_for_keep_web
     collection = api_fixture('collections')['w_a_z_file']
-    get :show_file, {
+    get :show_file, params: {
       uuid: collection['uuid'],
       file: 'w a z'
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :redirect
     assert_match /w%20a%20z/, response.redirect_url
   end
@@ -123,7 +127,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test "viewing collection files with a reader token" do
     params = collection_params(:foo_file)
     params[:reader_token] = api_token("active_all_collections")
-    get(:show_file_links, params)
+    get(:show_file_links, params: params)
     assert_response :redirect
     assert_no_session
   end
@@ -132,7 +136,7 @@ class CollectionsControllerTest < ActionController::TestCase
     setup_for_keep_web
     params = collection_params(:foo_file, "foo")
     params[:reader_token] = api_token("active_all_collections")
-    get(:show_file, params)
+    get(:show_file, params: params)
     assert_response :redirect
     assert_match /foo/, response.redirect_url
     assert_no_session
@@ -141,7 +145,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test "reader token Collection links end with trailing slash" do
     # Testing the fix for #2937.
     session = session_for(:active_trustedclient)
-    post(:share, collection_params(:foo_file), session)
+    post(:share, params: collection_params(:foo_file), session: session)
     assert(@controller.download_link.ends_with? '/',
            "Collection share link does not end with slash for wget")
   end
@@ -150,7 +154,7 @@ class CollectionsControllerTest < ActionController::TestCase
     setup_for_keep_web
     params = collection_params(:foo_file, 'foo')
     sess = session_for(:active)
-    get(:show_file, params, sess)
+    get(:show_file, params: params, session: sess)
     assert_response :redirect
     assert_match /foo/, response.redirect_url
   end
@@ -158,7 +162,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test 'anonymous download' do
     setup_for_keep_web
     config_anonymous true
-    get :show_file, {
+    get :show_file, params: {
       uuid: api_fixture('collections')['user_agreement_in_anonymously_accessible_project']['uuid'],
       file: 'GNU_General_Public_License,_version_3.pdf',
     }
@@ -169,7 +173,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test "can't get a file from Keep without permission" do
     params = collection_params(:foo_file, 'foo')
     sess = session_for(:spectator)
-    get(:show_file, params, sess)
+    get(:show_file, params: params, session: sess)
     assert_response 404
   end
 
@@ -178,7 +182,7 @@ class CollectionsControllerTest < ActionController::TestCase
     params = collection_params(:foo_file, 'foo')
     read_token = api_token('active')
     params[:reader_token] = read_token
-    get(:show_file, params)
+    get(:show_file, params: params)
     assert_response :redirect
     assert_match /foo/, response.redirect_url
     assert_not_equal(read_token, session[:arvados_api_token],
@@ -191,7 +195,7 @@ class CollectionsControllerTest < ActionController::TestCase
       params = collection_params(:foo_file, 'foo')
       params[:reader_token] =
         api_token('active_noscope')
-      get(:show_file, params)
+      get(:show_file, params: params)
       if anon
         # Some files can be shown without a valid token, but not this one.
         assert_response 404
@@ -209,7 +213,7 @@ class CollectionsControllerTest < ActionController::TestCase
     sess = session_for(:expired)
     read_token = api_token('active')
     params[:reader_token] = read_token
-    get(:show_file, params, sess)
+    get(:show_file, params: params, session: sess)
     assert_response :redirect
     assert_not_equal(read_token, session[:arvados_api_token],
                      "using a reader token set the session's API token")
@@ -220,10 +224,10 @@ class CollectionsControllerTest < ActionController::TestCase
     ua_collection = api_fixture('collections')['user_agreement']
     # Here we don't test whether the agreement can be retrieved from
     # Keep. We only test that show_file decides to send file content.
-    get :show_file, {
+    get :show_file, params: {
       uuid: ua_collection['uuid'],
       file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
-    }, session_for(:inactive)
+    }, session: session_for(:inactive)
     assert_nil(assigns(:unsigned_user_agreements),
                "Did not skip check_user_agreements filter " +
                "when showing the user agreement.")
@@ -238,7 +242,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test "show file in a subdirectory of a collection" do
     setup_for_keep_web
     params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
-    get(:show_file, params, session_for(:user1_with_load))
+    get(:show_file, params: params, session: session_for(:user1_with_load))
     assert_response :redirect
     assert_match /subdir2\/subdir3\/subdir4\/file1_in_subdir4\.txt/, response.redirect_url
   end
@@ -320,11 +324,11 @@ class CollectionsControllerTest < ActionController::TestCase
     show_collection(fixture_name, :active)
     fixture = api_fixture('collections')[fixture_name.to_s]
     assert_equal(fixture['name'], assigns(:object).name)
-    assert_equal(fixture['properties'][0], assigns(:object).properties[0])
+    assert_equal(fixture['properties'].values[0], assigns(:object).properties.values[0])
   end
 
   test "create collection with properties" do
-    post :create, {
+    post :create, params: {
       collection: {
         name: 'collection created with properties',
         manifest_text: '',
@@ -333,7 +337,7 @@ class CollectionsControllerTest < ActionController::TestCase
         },
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
     assert_not_nil assigns(:object).uuid
     assert_equal 'collection created with properties', assigns(:object).name
@@ -342,13 +346,13 @@ class CollectionsControllerTest < ActionController::TestCase
 
   test "update description and check manifest_text is not lost" do
     collection = api_fixture("collections")["multilevel_collection_1"]
-    post :update, {
+    post :update, params: {
       id: collection["uuid"],
       collection: {
         description: 'test description update'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
     assert_not_nil assigns(:object)
     # Ensure the Workbench response still has the original manifest_text
@@ -416,7 +420,7 @@ class CollectionsControllerTest < ActionController::TestCase
   test "anonymous user accesses collection in shared project" do
     config_anonymous true
     collection = api_fixture('collections')['public_text_file']
-    get(:show, {id: collection['uuid']})
+    get(:show, params: {id: collection['uuid']})
 
     response_object = assigns(:object)
     assert_equal collection['name'], response_object['name']
@@ -427,19 +431,19 @@ class CollectionsControllerTest < ActionController::TestCase
   end
 
   test "can view empty collection" do
-    get :show, {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session_for(:active)
+    get :show, params: {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session: session_for(:active)
     assert_includes @response.body, 'The following collections have this content'
   end
 
   test "collection portable data hash redirect" do
     di = api_fixture('collections')['docker_image']
-    get :show, {id: di['portable_data_hash']}, session_for(:active)
+    get :show, params: {id: di['portable_data_hash']}, session: session_for(:active)
     assert_match /\/collections\/#{di['uuid']}/, @response.redirect_url
   end
 
   test "collection portable data hash with multiple matches" do
     pdh = api_fixture('collections')['foo_file']['portable_data_hash']
-    get :show, {id: pdh}, session_for(:admin)
+    get :show, params: {id: pdh}, session: session_for(:admin)
     matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
     assert matches.size > 1
 
@@ -455,13 +459,15 @@ class CollectionsControllerTest < ActionController::TestCase
 
   test "collection page renders name" do
     collection = api_fixture('collections')['foo_file']
-    get :show, {id: collection['uuid']}, session_for(:active)
+    get :show, params: {id: collection['uuid']}, session: session_for(:active)
     assert_includes @response.body, collection['name']
     assert_match /not authorized to manage collection sharing links/, @response.body
   end
 
   test "No Upload tab on non-writable collection" do
-    get :show, {id: api_fixture('collections')['user_agreement']['uuid']}, session_for(:active)
+    get :show,
+        params: {id: api_fixture('collections')['user_agreement']['uuid']},
+        session: session_for(:active)
     assert_not_includes @response.body, '<a href="#Upload"'
   end
 
@@ -475,7 +481,9 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web
       tok = api_token('active')
       id = api_fixture('collections')['w_a_z_file'][id_type]
-      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      get :show_file,
+          params: {uuid: id, file: "w a z"},
+          session: session_for(:active)
       assert_response :redirect
       assert_equal "https://#{id.sub '+', '-'}.example/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
     end
@@ -484,7 +492,9 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web
       tok = api_token('active')
       id = api_fixture('collections')['w_a_z_file'][id_type]
-      get :show_file, {uuid: id, file: "w a z", reader_token: tok}, session_for(:expired)
+      get :show_file,
+          params: {uuid: id, file: "w a z", reader_token: tok},
+          session: session_for(:expired)
       assert_response :redirect
       assert_equal "https://#{id.sub '+', '-'}.example/t=#{URI.escape tok}/_/w%20a%20z", @response.redirect_url
     end
@@ -493,7 +503,7 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web
       config_anonymous true
       id = api_fixture('collections')['public_text_file'][id_type]
-      get :show_file, {uuid: id, file: "Hello World.txt"}
+      get :show_file, params: {uuid: id, file: "Hello World.txt"}
       assert_response :redirect
       assert_equal "https://#{id.sub '+', '-'}.example/_/Hello%20World.txt", @response.redirect_url
     end
@@ -502,7 +512,7 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web
       config_anonymous true
       id = api_fixture('collections')['public_text_file'][id_type]
-      get :show_file, {
+      get :show_file, params: {
         uuid: id,
         file: "Hello World.txt",
         disposition: 'attachment',
@@ -516,7 +526,7 @@ class CollectionsControllerTest < ActionController::TestCase
                          'https://download.example/c=%{uuid_or_pdh}')
       tok = api_token('active')
       id = api_fixture('collections')['w_a_z_file'][id_type]
-      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
       assert_response :redirect
       assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
     end
@@ -527,7 +537,7 @@ class CollectionsControllerTest < ActionController::TestCase
                          'https://download.example/c=%{uuid_or_pdh}')
       tok = api_token('active')
       id = api_fixture('collections')['w_a_z_file'][id_type]
-      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
       assert_response :redirect
       assert_equal "https://collections.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
     end
@@ -538,7 +548,7 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web
       config_anonymous anon
       id = api_fixture('collections')['w_a_z_file']['uuid']
-      get :show_file, {uuid: id, file: "w a z"}, session_for(:spectator)
+      get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:spectator)
       assert_response 404
     end
 
@@ -548,11 +558,11 @@ class CollectionsControllerTest < ActionController::TestCase
                          'https://download.example/c=%{uuid_or_pdh}')
       tok = api_token('active')
       id = api_fixture('collections')['public_text_file']['uuid']
-      get :show_file, {
+      get :show_file, params: {
         uuid: id,
         file: 'Hello world.txt',
         disposition: 'attachment',
-      }, session_for(:active)
+      }, session: session_for(:active)
       assert_response :redirect
       expect_url = "https://download.example/c=#{id.sub '+', '-'}/_/Hello%20world.txt"
       if not anon
@@ -567,7 +577,7 @@ class CollectionsControllerTest < ActionController::TestCase
     # cannot read this collection without a session token.
     setup_for_keep_web 'https://collections.example/c=%{uuid_or_pdh}', false
     id = api_fixture('collections')['w_a_z_file']['uuid']
-    get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+    get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
     assert_response 422
   end
 
@@ -577,7 +587,7 @@ class CollectionsControllerTest < ActionController::TestCase
       setup_for_keep_web false, 'https://download.example/c=%{uuid_or_pdh}'
       tok = api_token('active')
       id = api_fixture('collections')['w_a_z_file']['uuid']
-      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
       assert_response :redirect
       assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
     end
@@ -594,14 +604,15 @@ class CollectionsControllerTest < ActionController::TestCase
     assert_includes(collection['manifest_text'], "0:0:file1")
 
     # now remove all files named 'file1' from the collection
-    post :remove_selected_files, {
+    post :remove_selected_files, params: {
       id: collection['uuid'],
       selection: ["#{collection['uuid']}/file1",
                   "#{collection['uuid']}/dir1/file1"],
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
+    use_token :active
     # verify no 'file1' in the updated collection
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_not_includes(collection['manifest_text'], "0:0:file1")
@@ -618,15 +629,16 @@ class CollectionsControllerTest < ActionController::TestCase
     assert_includes(collection['manifest_text'], "0:0:file1")
 
     # now remove all files from "dir1" subdir of the collection
-    post :remove_selected_files, {
+    post :remove_selected_files, params: {
       id: collection['uuid'],
       selection: ["#{collection['uuid']}/dir1/file1",
                   "#{collection['uuid']}/dir1/file2"],
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
     # verify that "./dir1" no longer exists in this collection's manifest text
+    use_token :active
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1 0:0:file2\n$/, collection['manifest_text']
     assert_not_includes(collection['manifest_text'], 'dir1')
@@ -642,57 +654,61 @@ class CollectionsControllerTest < ActionController::TestCase
     assert_includes(collection['manifest_text'], "0:0:file1")
 
     # rename 'file1' as 'file1renamed' and verify
-    post :update, {
+    post :update, params: {
       id: collection['uuid'],
       collection: {
         'rename-file-path:file1' => 'file1renamed'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
+    use_token :active
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed 0:0:file2\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png\n$/, collection['manifest_text']
 
     # now rename 'file2' such that it is moved into 'dir1'
     @test_counter = 0
-    post :update, {
+    post :update, params: {
       id: collection['uuid'],
       collection: {
         'rename-file-path:file2' => 'dir1/file2'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
+    use_token :active
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n$/, collection['manifest_text']
 
     # now rename 'dir1/dir1file1' such that it is moved into a new subdir
     @test_counter = 0
-    post :update, {
+    post :update, params: {
       id: collection['uuid'],
       collection: {
         'rename-file-path:dir1/dir1file1' => 'dir2/dir3/dir1file1moved'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
+    use_token :active
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
 
     # now rename the image file 'dir1/dir1imagefile.png'
     @test_counter = 0
-    post :update, {
+    post :update, params: {
       id: collection['uuid'],
       collection: {
         'rename-file-path:dir1/dir1imagefile.png' => 'dir1/dir1imagefilerenamed.png'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
 
+    use_token :active
     collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
     assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefilerenamed.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
   end
@@ -701,13 +717,13 @@ class CollectionsControllerTest < ActionController::TestCase
     use_token :active
 
     # rename 'file2' as 'file1' and expect error
-    post :update, {
+    post :update, params: {
       id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
       collection: {
         'rename-file-path:file2' => 'file1'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response 422
     assert_includes json_response['errors'], 'Duplicate file path'
   end
@@ -716,13 +732,13 @@ class CollectionsControllerTest < ActionController::TestCase
     use_token :active
 
     # rename 'file1' as 'dir1/file1' and expect error
-    post :update, {
+    post :update, params: {
       id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
       collection: {
         'rename-file-path:file1' => 'dir1/file1'
       },
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response 422
     assert_includes json_response['errors'], 'Duplicate file path'
   end
index 6e96839e25f617f6ffca29ca1a44e81065c0cce2..93686aa6b14668d762185a5f19a5431d8398a60f 100644 (file)
@@ -12,7 +12,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
     container_uuid = cr['container_uuid']
     container = Container.find(container_uuid)
 
-    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    get :show, params: {id: cr['uuid'], tab_pane: 'Log'}, session: session_for(:active)
     assert_response :success
 
     assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
@@ -27,7 +27,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
     container_uuid = cr['container_uuid']
     container = Container.find(container_uuid)
 
-    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    get :show, params: {id: cr['uuid'], tab_pane: 'Log'}, session: session_for(:active)
     assert_response :success
 
     assert_includes @response.body, '<pre id="event_log_div"'
@@ -39,7 +39,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
 
     uuid = api_fixture('container_requests')['completed']['uuid']
 
-    get :show, {id: uuid}, session_for(:active)
+    get :show, params: {id: uuid}, session: session_for(:active)
     assert_response :success
 
     assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
@@ -47,7 +47,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
 
   test "cancel request for queued container" do
     cr_fixture = api_fixture('container_requests')['queued']
-    post :cancel, {id: cr_fixture['uuid']}, session_for(:active)
+    post :cancel, params: {id: cr_fixture['uuid']}, session: session_for(:active)
     assert_response 302
 
     use_token 'active'
@@ -72,13 +72,12 @@ class ContainerRequestsControllerTest < ActionController::TestCase
       if reuse_enabled
         copy_params.merge!({use_existing: true})
       end
-      post(:copy, copy_params, session_for(:active))
+      post(:copy, params: copy_params, session: session_for(:active))
       assert_response 302
       copied_cr = assigns(:object)
       assert_not_nil copied_cr
       assert_equal 'Uncommitted', copied_cr[:state]
       assert_equal "Copy of #{completed_cr['name']}", copied_cr['name']
-      assert_equal completed_cr['cmd'], copied_cr['cmd']
       assert_equal completed_cr['runtime_constraints']['ram'], copied_cr['runtime_constraints'][:ram]
       if reuse_enabled
         assert copied_cr[:use_existing]
@@ -114,8 +113,8 @@ class ContainerRequestsControllerTest < ActionController::TestCase
       cr = api_fixture('container_requests')[cr_fixture]
       assert_not_nil cr
       get(:show,
-          {id: cr['uuid']},
-          session_for(:active))
+          params: {id: cr['uuid']},
+          session: session_for(:active))
       assert_response :success
       if should_show
         assert_includes @response.body, "href=\"#Provenance\""
@@ -130,7 +129,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
 
     cr = api_fixture('container_requests')['completed_with_input_mounts']
 
-    get :show, {id: cr['uuid']}, session_for(:active)
+    get :show, params: {id: cr['uuid']}, session: session_for(:active)
     assert_response :success
 
     assert_match /hello/, @response.body
index a6a299932cddf74f9c43169d4871179c8d12cebb..ff7584e20b0317373ca53f642361d337894a3213 100644 (file)
@@ -10,7 +10,9 @@ class ContainersControllerTest < ActionController::TestCase
 
     container = api_fixture('containers')['completed']
 
-    get :show, {id: container['uuid'], tab_pane: 'Log'}, session_for(:active)
+    get :show,
+        params: {id: container['uuid'], tab_pane: 'Log'},
+        session: session_for(:active)
     assert_response :success
 
     assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
index 913f2b972834fce585bab576ddab67b576344221..556b958d00a8f0fe50c1a9af9d81b62c2eeb1771 100644 (file)
@@ -16,7 +16,7 @@ class DisabledApiTest < ActionController::TestCase
     dd[:resources][:pipeline_instances][:methods].delete(:index)
     ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
 
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_includes @response.body, "zzzzz-xvhdp-cr4runningcntnr" # expect crs
     assert_not_includes @response.body, "zzzzz-d1hrv-"   # expect no pipelines
     assert_includes @response.body, "Run a process"
@@ -29,7 +29,7 @@ class DisabledApiTest < ActionController::TestCase
     dd[:resources][:pipeline_instances][:methods].delete(:index)
     ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
 
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_not_includes @response.body, "compute-node-summary-pane"
   end
 
@@ -46,7 +46,7 @@ class DisabledApiTest < ActionController::TestCase
       dd[:resources][ctrl_name][:methods].delete(:index)
       ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
 
-      get :index, {}, session_for(:active)
+      get :index, params: {}, session: session_for(:active)
       assert_response 404
     end
   end
@@ -68,9 +68,9 @@ class DisabledApiTest < ActionController::TestCase
       proj_uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
 
       if user
-        get(:show, {id: proj_uuid}, session_for(user))
+        get(:show, params: {id: proj_uuid}, session: session_for(user))
       else
-        get(:show, {id: proj_uuid})
+        get(:show, params: {id: proj_uuid})
       end
 
       resp = @response.body
index f854eaa77fae4723e00bda563441c2c1a2f59e64..29a8efc24f8630188e653deabfead02541ddfe91 100644 (file)
@@ -6,22 +6,22 @@ require 'test_helper'
 
 class JobsControllerTest < ActionController::TestCase
   test "visit jobs index page" do
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_response :success
   end
 
   test "job page lists pipelines and jobs in which it is used" do
     get(:show,
-        {id: api_fixture('jobs')['completed_job_in_publicly_accessible_project']['uuid']},
-        session_for(:active))
+        params: {id: api_fixture('jobs')['completed_job_in_publicly_accessible_project']['uuid']},
+        session: session_for(:active))
     assert_response :success
 
     assert_select "div.used-in-pipelines" do
-      assert_select "a[href=/pipeline_instances/zzzzz-d1hrv-n68vc490mloy4fi]"
+      assert_select "a[href=\"/pipeline_instances/zzzzz-d1hrv-n68vc490mloy4fi\"]"
     end
 
     assert_select "div.used-in-jobs" do
-      assert_select "a[href=/jobs/zzzzz-8i9sb-with2components]"
+      assert_select "a[href=\"/jobs/zzzzz-8i9sb-with2components\"]"
     end
   end
 end
index 6887cac8924e182ae3b9e881f1ec2356dcf9fbf2..c76244d11cd1538e1584701ca4894a7318ef87c4 100644 (file)
@@ -10,12 +10,12 @@ class PipelineInstancesControllerTest < ActionController::TestCase
   def create_instance_long_enough_to(instance_attrs={})
     # create 'two_part' pipeline with the given instance attributes
     pt_fixture = api_fixture('pipeline_templates')['two_part']
-    post :create, {
+    post :create, params: {
       pipeline_instance: instance_attrs.merge({
         pipeline_template_uuid: pt_fixture['uuid']
       }),
       format: :json
-    }, session_for(:active)
+    }, session: session_for(:active)
     assert_response :success
     pi_uuid = assigns(:object).uuid
     assert_not_nil assigns(:object)
@@ -38,14 +38,14 @@ class PipelineInstancesControllerTest < ActionController::TestCase
   test "can render pipeline instance with tagged collections" do
     # Make sure to pass in a tagged collection to test that part of the rendering behavior.
     get(:show,
-        {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
-        session_for(:active))
+        params: {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
+        session: session_for(:active))
     assert_response :success
   end
 
   test "update script_parameters one at a time using merge param" do
       template_fixture = api_fixture('pipeline_templates')['two_part']
-      post :update, {
+      post :update, params: {
         id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
         pipeline_instance: {
           components: {
@@ -63,7 +63,7 @@ class PipelineInstancesControllerTest < ActionController::TestCase
         },
         merge: true,
         format: :json
-      }, session_for(:active)
+      }, session: session_for(:active)
       assert_response :success
       assert_not_nil assigns(:object)
       orig_params = template_fixture['components']['part-two']['script_parameters']
@@ -77,15 +77,15 @@ class PipelineInstancesControllerTest < ActionController::TestCase
 
   test "component rendering copes with unexpected components format" do
     get(:show,
-        {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
-        session_for(:active))
+        params: {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
+        session: session_for(:active))
     assert_response :success
   end
 
   test "dates in JSON components are parsed" do
     get(:show,
-        {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
-        session_for(:active))
+        params: {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
+        session: session_for(:active))
     assert_response :success
     assert_not_nil assigns(:object)
     assert_not_nil assigns(:object).components[:foo][:job]
@@ -103,7 +103,7 @@ class PipelineInstancesControllerTest < ActionController::TestCase
 
   test "copy pipeline instance with components=use_latest" do
     post(:copy,
-         {
+         params: {
            id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
            components: 'use_latest',
            script: 'use_latest',
@@ -111,7 +111,7 @@ class PipelineInstancesControllerTest < ActionController::TestCase
              state: 'RunningOnServer'
            }
          },
-         session_for(:active))
+         session: session_for(:active))
     assert_response 302
     assert_not_nil assigns(:object)
 
@@ -136,7 +136,7 @@ class PipelineInstancesControllerTest < ActionController::TestCase
 
   test "copy pipeline instance on newer template works with script=use_same" do
     post(:copy,
-         {
+         params: {
            id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
            components: 'use_latest',
            script: 'use_same',
@@ -144,7 +144,7 @@ class PipelineInstancesControllerTest < ActionController::TestCase
              state: 'RunningOnServer'
            }
          },
-         session_for(:active))
+         session: session_for(:active))
     assert_response 302
     assert_not_nil assigns(:object)
 
index 1f733c48eb0284897cf7ae1060461cc9eb9da961..4752f328a98056d5de770fa2ed344b012a4f0acd 100644 (file)
@@ -7,8 +7,8 @@ require 'test_helper'
 class PipelineTemplatesControllerTest < ActionController::TestCase
   test "component rendering copes with unexpeceted components format" do
     get(:show,
-        {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
-        session_for(:active))
+        params: {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
+        session: session_for(:active))
     assert_response :success
   end
 end
index 21b3361c1612d8df920217b0a43775b9f372a9de..09a6950cead6fad12ce20262c3eda4a6d978d91c 100644 (file)
@@ -9,7 +9,7 @@ class ProjectsControllerTest < ActionController::TestCase
   include ShareObjectHelper
 
   test "invited user is asked to sign user agreements on front page" do
-    get :index, {}, session_for(:inactive)
+    get :index, params: {}, session: session_for(:inactive)
     assert_response :redirect
     assert_match(/^#{Regexp.escape(user_agreements_url)}\b/,
                  @response.redirect_url,
@@ -17,7 +17,7 @@ class ProjectsControllerTest < ActionController::TestCase
   end
 
   test "uninvited user is asked to wait for activation" do
-    get :index, {}, session_for(:inactive_uninvited)
+    get :index, params: {}, session: session_for(:inactive_uninvited)
     assert_response :redirect
     assert_match(/^#{Regexp.escape(inactive_users_url)}\b/,
                  @response.redirect_url,
@@ -28,9 +28,9 @@ class ProjectsControllerTest < ActionController::TestCase
    [:project_viewer, false]].each do |which_user, should_show|
     test "create subproject button #{'not ' unless should_show} shown to #{which_user}" do
       readonly_project_uuid = api_fixture('groups')['aproject']['uuid']
-      get :show, {
+      get :show, params: {
         id: readonly_project_uuid
-      }, session_for(which_user)
+      }, session: session_for(which_user)
       buttons = css_select('[data-method=post]').select do |el|
         el.attributes['data-remote-href'].value.match /project.*owner_uuid.*#{readonly_project_uuid}/
       end
@@ -46,22 +46,22 @@ class ProjectsControllerTest < ActionController::TestCase
   test "sharing a project with a user and group" do
     uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
                  api_fixture("users")["future_project_user"]["uuid"]]
-    post(:share_with, {
+    post(:share_with, params: {
            id: api_fixture("groups")["asubproject"]["uuid"],
            uuids: uuid_list,
            format: "json"},
-         session_for(:active))
+         session: session_for(:active))
     assert_response :success
     assert_equal(uuid_list, json_response["success"])
   end
 
   test "user with project read permission can't add permissions" do
     share_uuid = api_fixture("users")["spectator"]["uuid"]
-    post(:share_with, {
+    post(:share_with, params: {
            id: api_fixture("groups")["aproject"]["uuid"],
            uuids: [share_uuid],
            format: "json"},
-         session_for(:project_viewer))
+         session: session_for(:project_viewer))
     assert_response 422
     assert(json_response["errors"].andand.
              any? { |msg| msg.start_with?("#{share_uuid}: ") },
@@ -98,8 +98,8 @@ class ProjectsControllerTest < ActionController::TestCase
     # detected. The test passes quickly, but fails slowly.
     Timeout::timeout 10 do
       get(:show,
-          { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
-          session_for(:admin))
+          params: { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
+          session: session_for(:admin))
     end
     assert_response :success
   end
@@ -111,10 +111,10 @@ class ProjectsControllerTest < ActionController::TestCase
     coll_key = "collection_to_remove_from_subproject"
     coll_uuid = api_fixture("collections")[coll_key]["uuid"]
     delete(:remove_item,
-           { id: api_fixture("groups")["asubproject"]["uuid"],
+           params: { id: api_fixture("groups")["asubproject"]["uuid"],
              item_uuid: coll_uuid,
              format: "js" },
-           session_for(:subproject_admin))
+           session: session_for(:subproject_admin))
     assert_response :success
     assert_match(/\b#{coll_uuid}\b/, @response.body,
                  "removed object not named in response")
@@ -130,10 +130,10 @@ class ProjectsControllerTest < ActionController::TestCase
     # should be implicitly moved to the user's Home project when removed.
     specimen_uuid = api_fixture('specimens', 'in_asubproject')['uuid']
     delete(:remove_item,
-           { id: api_fixture('groups', 'asubproject')['uuid'],
+           params: { id: api_fixture('groups', 'asubproject')['uuid'],
              item_uuid: specimen_uuid,
              format: 'js' },
-           session_for(:subproject_admin))
+           session: session_for(:subproject_admin))
     assert_response :success
     assert_match(/\b#{specimen_uuid}\b/, @response.body,
                  "removed object not named in response")
@@ -151,10 +151,10 @@ class ProjectsControllerTest < ActionController::TestCase
     test "removing #{dm} from a subproject results in renaming it when there is another such object with same name in home project" do
       object = api_fixture(dm, fixture)
       delete(:remove_item,
-             { id: api_fixture('groups', 'asubproject')['uuid'],
+             params: { id: api_fixture('groups', 'asubproject')['uuid'],
                item_uuid: object['uuid'],
                format: 'js' },
-             session_for(:active))
+             session: session_for(:active))
       assert_response :success
       assert_match(/\b#{object['uuid']}\b/, @response.body,
                    "removed object not named in response")
@@ -236,12 +236,12 @@ class ProjectsControllerTest < ActionController::TestCase
     encoded_params = Hash[params.map { |k,v|
                             [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
                           }]
-    get :show, encoded_params, session_for(:active)
+    get :show, params: encoded_params, session: session_for(:active)
   end
 
   test "visit non-public project as anonymous when anonymous browsing is enabled and expect page not found" do
     Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
-    get(:show, {id: api_fixture('groups')['aproject']['uuid']})
+    get(:show, params: {id: api_fixture('groups')['aproject']['uuid']})
     assert_response 404
     assert_match(/log ?in/i, @response.body)
   end
@@ -261,7 +261,7 @@ class ProjectsControllerTest < ActionController::TestCase
       Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
 
       if user
-        get :public, {}, session_for(user)
+        get :public, params: {}, session: session_for(user)
       else
         get :public
       end
@@ -276,18 +276,22 @@ class ProjectsControllerTest < ActionController::TestCase
   end
 
   test "visit public projects page when anon config is not enabled as active user and expect 404" do
-    get :public, {}, session_for(:active)
+    Rails.configuration.anonymous_user_token = nil
+    Rails.configuration.enable_public_projects_page = false
+    get :public, params: {}, session: session_for(:active)
     assert_response 404
   end
 
   test "visit public projects page when anon config is enabled but public projects page is disabled as active user and expect 404" do
     Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
     Rails.configuration.enable_public_projects_page = false
-    get :public, {}, session_for(:active)
+    get :public, params: {}, session: session_for(:active)
     assert_response 404
   end
 
   test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
+    Rails.configuration.anonymous_user_token = nil
+    Rails.configuration.enable_public_projects_page = false
     get :public
     assert_response :redirect
     assert_match /\/users\/welcome/, @response.redirect_url
@@ -317,7 +321,7 @@ class ProjectsControllerTest < ActionController::TestCase
     found = Group.find(project['uuid'])
     found.description = 'test description update'
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
     assert_includes @response.body, 'test description update'
   end
 
@@ -327,7 +331,7 @@ class ProjectsControllerTest < ActionController::TestCase
     found = Group.find(project['uuid'])
     found.description = '*test bold description for textile formatting*'
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
     assert_includes @response.body, '<strong>test bold description for textile formatting</strong>'
   end
 
@@ -337,7 +341,7 @@ class ProjectsControllerTest < ActionController::TestCase
     found = Group.find(project['uuid'])
     found.description = '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
     assert_includes @response.body, '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
   end
 
@@ -347,7 +351,7 @@ class ProjectsControllerTest < ActionController::TestCase
     found = Group.find(project['uuid'])
     found.description = 'Textile description with unsafe script tag <script language="javascript">alert("Hello there")</script>.'
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
     assert_includes @response.body, 'Textile description with unsafe script tag alert("Hello there").'
   end
 
@@ -364,7 +368,7 @@ EOT
     found = Group.find(project['uuid'])
     found.description = textile_table
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
     assert_includes @response.body, '<th>First Header'
     assert_includes @response.body, '<td>Content Cell'
   end
@@ -377,7 +381,7 @@ EOT
     # uses 'Link to object' as a hyperlink for the object
     found.description = '"Link to object":' + api_fixture('groups')['asubproject']['uuid']
     found.save!
-    get(:show, {id: project['uuid']}, session_for(:active))
+    get(:show, params: {id: project['uuid']}, session: session_for(:active))
 
     # check that input was converted to textile, not staying as inputted
     refute_includes  @response.body,'"Link to object"'
@@ -386,7 +390,7 @@ EOT
 
   test "project viewer can't see project sharing tab" do
     project = api_fixture('groups')['aproject']
-    get(:show, {id: project['uuid']}, session_for(:project_viewer))
+    get(:show, params: {id: project['uuid']}, session: session_for(:project_viewer))
     refute_includes @response.body, '<div id="Sharing"'
     assert_includes @response.body, '<div id="Data_collections"'
   end
@@ -397,7 +401,7 @@ EOT
   ].each do |username|
     test "#{username} can see project sharing tab" do
      project = api_fixture('groups')['aproject']
-     get(:show, {id: project['uuid']}, session_for(username))
+     get(:show, params: {id: project['uuid']}, session: session_for(username))
      assert_includes @response.body, '<div id="Sharing"'
      assert_includes @response.body, '<div id="Data_collections"'
     end
@@ -409,7 +413,7 @@ EOT
     ['project_viewer',false],
   ].each do |user, can_move|
     test "#{user} can move subproject from project #{can_move}" do
-      get(:show, {id: api_fixture('groups')['aproject']['uuid']}, session_for(user))
+      get(:show, params: {id: api_fixture('groups')['aproject']['uuid']}, session: session_for(user))
       if can_move
         assert_includes @response.body, 'Move project...'
       else
@@ -423,7 +427,7 @@ EOT
     [:active, false],
   ].each do |user, expect_all_nodes|
     test "in dashboard other index page links as #{user}" do
-      get :index, {}, session_for(user)
+      get :index, params: {}, session: session_for(user)
 
       [["processes", "/all_processes"],
        ["collections", "/collections"],
@@ -443,7 +447,7 @@ EOT
   end
 
   test "dashboard should show the correct status for processes" do
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_select 'div.panel-body.recent-processes' do
       [
         {
@@ -499,7 +503,7 @@ EOT
   test "visit a public project and verify the public projects page link exists" do
     Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
     uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
-    get :show, {id: uuid}
+    get :show, params: {id: uuid}
     project = assigns(:object)
     assert_equal uuid, project['uuid']
     refute_empty css_select("[href=\"/projects/#{project['uuid']}\"]")
@@ -509,12 +513,12 @@ EOT
   test 'all_projects unaffected by params after use by ProjectsController (#6640)' do
     @controller = ProjectsController.new
     project_uuid = api_fixture('groups')['aproject']['uuid']
-    get :index, {
+    get :index, params: {
       filters: [['uuid', '<', project_uuid]].to_json,
       limit: 0,
       offset: 1000,
-    }, session_for(:active)
-    assert_select "#projects-menu + ul li.divider ~ li a[href=/projects/#{project_uuid}]"
+    }, session: session_for(:active)
+    assert_select "#projects-menu + ul li.divider ~ li a[href=\"/projects/#{project_uuid}\"]"
   end
 
   [
@@ -580,7 +584,7 @@ EOT
 
     # share it again
     @controller = LinksController.new
-    post :create, {
+    post :create, params: {
       link: {
         link_class: 'permission',
         name: 'can_read',
@@ -588,7 +592,7 @@ EOT
         tail_uuid: api_fixture('users')['project_viewer']['uuid'],
       },
       format: :json
-    }, session_for(:system_user)
+    }, session: session_for(:system_user)
 
     # verify that the project is again included in starred projects
     use_token :project_viewer
index b81e2384c98db7975349e0ea4a4c387f95223a2a..a5d72097649c0302cffa6bd18970505ff8803ff4 100644 (file)
@@ -17,11 +17,11 @@ class RepositoriesControllerTest < ActionController::TestCase
     test "#{user} shares repository with a user and group" do
       uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
                    api_fixture("users")["future_project_user"]["uuid"]]
-      post(:share_with, {
+      post(:share_with, params: {
              id: api_fixture("repositories")["foo"]["uuid"],
              uuids: uuid_list,
              format: "json"},
-           session_for(user))
+           session: session_for(user))
       assert_response :success
       assert_equal(uuid_list, json_response["success"])
     end
@@ -29,11 +29,11 @@ class RepositoriesControllerTest < ActionController::TestCase
 
   test "user with repository read permission cannot add permissions" do
     share_uuid = api_fixture("users")["project_viewer"]["uuid"]
-    post(:share_with, {
+    post(:share_with, params: {
            id: api_fixture("repositories")["arvados"]["uuid"],
            uuids: [share_uuid],
            format: "json"},
-         session_for(:spectator))
+         session: session_for(:spectator))
     assert_response 422
     assert(json_response["errors"].andand.
              any? { |msg| msg.start_with?("#{share_uuid}: ") },
@@ -57,9 +57,9 @@ class RepositoriesControllerTest < ActionController::TestCase
     [:admin,  ['#Attributes', '#Sharing', '#Advanced']],
   ].each do |user, expected_panes|
     test "#{user} sees panes #{expected_panes}" do
-      get :show, {
+      get :show, params: {
         id: api_fixture('repositories')['foo']['uuid']
-      }, session_for(user)
+      }, session: session_for(user)
       assert_response :success
 
       panes = css_select('[data-toggle=tab]').each do |pane|
@@ -75,10 +75,10 @@ class RepositoriesControllerTest < ActionController::TestCase
     test "show tree to #{user}" do
       reset_api_fixtures_after_test false
       sha1, _, _ = stub_repo_content
-      get :show_tree, {
+      get :show_tree, params: {
         id: api_fixture('repositories')['foo']['uuid'],
         commit: sha1,
-      }, session_for(user)
+      }, session: session_for(user)
       assert_response :success
       assert_select 'tr td a', 'COPYING'
       assert_select 'tr td', '625 bytes'
@@ -91,10 +91,10 @@ class RepositoriesControllerTest < ActionController::TestCase
     test "show commit to #{user}" do
       reset_api_fixtures_after_test false
       sha1, commit, _ = stub_repo_content
-      get :show_commit, {
+      get :show_commit, params: {
         id: api_fixture('repositories')['foo']['uuid'],
         commit: sha1,
-      }, session_for(user)
+      }, session: session_for(user)
       assert_response :success
       assert_select 'pre', commit
     end
@@ -102,11 +102,11 @@ class RepositoriesControllerTest < ActionController::TestCase
     test "show blob to #{user}" do
       reset_api_fixtures_after_test false
       sha1, _, filedata = stub_repo_content filename: 'COPYING'
-      get :show_blob, {
+      get :show_blob, params: {
         id: api_fixture('repositories')['foo']['uuid'],
         commit: sha1,
         path: 'COPYING',
-      }, session_for(user)
+      }, session: session_for(user)
       assert_response :success
       assert_select 'pre', filedata
     end
@@ -116,11 +116,11 @@ class RepositoriesControllerTest < ActionController::TestCase
     test "show tree with path '#{path}'" do
       reset_api_fixtures_after_test false
       sha1, _, _ = stub_repo_content filename: 'COPYING'
-      get :show_tree, {
+      get :show_tree, params: {
         id: api_fixture('repositories')['foo']['uuid'],
         commit: sha1,
         path: path,
-      }, session_for(:active)
+      }, session: session_for(:active)
       assert_response :success
       assert_select 'tr td', 'COPYING'
     end
@@ -131,7 +131,7 @@ class RepositoriesControllerTest < ActionController::TestCase
       partial: :repositories_rows,
       format: :json,
     }
-    get :index, params, session_for(:active)
+    get :index, params: params, session: session_for(:active)
     assert_response :success
     repos = assigns(:objects)
     assert repos
index c57d70533efa23e90a07dd64982c4c1cbcd2f655..e620fbd8617c8fd135a1d19d08706e90e2f4406d 100644 (file)
@@ -13,43 +13,43 @@ class SearchControllerTest < ActionController::TestCase
   include Rails.application.routes.url_helpers
 
   test 'Get search dialog' do
-    xhr :get, :choose, {
+    get :choose, params: {
       format: :js,
       title: 'Search',
       action_name: 'Show',
       action_href: url_for(host: 'localhost', controller: :actions, action: :show),
       action_data: {}.to_json,
-    }, session_for(:active)
+    }, session: session_for(:active), xhr: true
     assert_response :success
   end
 
   test 'Get search results for all projects' do
-    xhr :get, :choose, {
+    get :choose, params: {
       format: :json,
       partial: true,
-    }, session_for(:active)
+    }, session: session_for(:active), xhr: true
     assert_response :success
     assert_not_empty(json_response['content'],
                      'search results for all projects should not be empty')
   end
 
   test 'Get search results for empty project' do
-    xhr :get, :choose, {
+    get :choose, params: {
       format: :json,
       partial: true,
       project_uuid: api_fixture('groups')['empty_project']['uuid'],
-    }, session_for(:active)
+    }, session: session_for(:active), xhr: true
     assert_response :success
     assert_empty(json_response['content'],
                  'search results for empty project should be empty')
   end
 
   test 'search results for aproject and verify recursive contents' do
-    xhr :get, :choose, {
+    get :choose, params: {
       format: :json,
       partial: true,
       project_uuid: api_fixture('groups')['aproject']['uuid'],
-    }, session_for(:active)
+    }, session: session_for(:active), xhr: true
     assert_response :success
     assert_not_empty(json_response['content'],
                  'search results for aproject should not be empty')
index 40a017b3ede18e8de81eee17a631f54dfab40888..c4090f0cef7dde572f3faec563b006c2bf3883fe 100644 (file)
@@ -8,10 +8,10 @@ class TrashItemsControllerTest < ActionController::TestCase
   test "untrash collection with same name as another collection" do
     collection = api_fixture('collections')['trashed_collection_to_test_name_conflict_on_untrash']
     items = [collection['uuid']]
-    post :untrash_items, {
+    post :untrash_items, params: {
       selection: items,
       format: :js
-    }, session_for(:active)
+    }, session: session_for(:active)
 
     assert_response :success
   end
index 1733058a4794af160b161e24a1e7cb6d77a663f9..4c6e41dcee16af57f0fa349487770fcca96e66af 100644 (file)
@@ -6,12 +6,12 @@ require 'test_helper'
 
 class UserAgreementsControllerTest < ActionController::TestCase
   test 'User agreements page shows form if some user agreements are not signed' do
-    get :index, {}, session_for(:inactive)
+    get :index, params: {}, session: session_for(:inactive)
     assert_response 200
   end
 
   test 'User agreements page redirects if all user agreements signed' do
-    get :index, {return_to: root_path}, session_for(:active)
+    get :index, params: {return_to: root_path}, session: session_for(:active)
     assert_response :redirect
     assert_equal(root_url,
                  @response.redirect_url,
index 393b864dc53a61f3a81a91af6abd49e836a5e831..57b8705963d752e41123fcb094f1f4505a8a3862 100644 (file)
@@ -7,12 +7,12 @@ require 'test_helper'
 class UsersControllerTest < ActionController::TestCase
 
   test "valid token works in controller test" do
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_response :success
   end
 
   test "ignore previously valid token (for deleted user), don't crash" do
-    get :activity, {}, session_for(:valid_token_deleted_user)
+    get :activity, params: {}, session: session_for(:valid_token_deleted_user)
     assert_response :redirect
     assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
     assert_nil assigns(:my_jobs)
@@ -20,9 +20,9 @@ class UsersControllerTest < ActionController::TestCase
   end
 
   test "expired token redirects to api server login" do
-    get :show, {
+    get :show, params: {
       id: api_fixture('users')['active']['uuid']
-    }, session_for(:expired_trustedclient)
+    }, session: session_for(:expired_trustedclient)
     assert_response :redirect
     assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
     assert_nil assigns(:my_jobs)
@@ -30,15 +30,15 @@ class UsersControllerTest < ActionController::TestCase
   end
 
   test "show welcome page if no token provided" do
-    get :index, {}
+    get :index, params: {}
     assert_response :redirect
     assert_match /\/users\/welcome/, @response.redirect_url
   end
 
   test "'log in as user' feature uses a v2 token" do
-    post :sudo, {
+    post :sudo, params: {
       id: api_fixture('users')['active']['uuid']
-    }, session_for('admin_trustedclient')
+    }, session: session_for('admin_trustedclient')
     assert_response :redirect
     assert_match /api_token=v2%2F/, @response.redirect_url
   end
@@ -48,10 +48,10 @@ class UsersControllerTest < ActionController::TestCase
 
     ActionMailer::Base.deliveries = []
 
-    post :request_shell_access, {
+    post :request_shell_access, params: {
       id: user['uuid'],
       format: 'js'
-    }, session_for(:spectator)
+    }, session: session_for(:spectator)
     assert_response :success
 
     full_name = "#{user['first_name']} #{user['last_name']}"
@@ -73,7 +73,7 @@ class UsersControllerTest < ActionController::TestCase
     test "access users page as #{username} and verify show button is available" do
       admin_user = api_fixture('users','admin')
       active_user = api_fixture('users','active')
-      get :index, {}, session_for(username)
+      get :index, params: {}, session: session_for(username)
       if username == 'admin'
         assert_match /<a href="\/projects\/#{admin_user['uuid']}">Home<\/a>/, @response.body
         assert_match /<a href="\/projects\/#{active_user['uuid']}">Home<\/a>/, @response.body
@@ -97,9 +97,9 @@ class UsersControllerTest < ActionController::TestCase
     test "access settings drop down menu as #{username}" do
       admin_user = api_fixture('users','admin')
       active_user = api_fixture('users','active')
-      get :show, {
+      get :show, params: {
         id: api_fixture('users')[username]['uuid']
-      }, session_for(username)
+      }, session: session_for(username)
       if username == 'admin'
         assert_includes @response.body, admin_user['email']
         refute_empty css_select('[id="system-menu"]')
index a698b8df3c773c4b0f66e5b4625b3f255d42780d..6f74955cd1c8d0940c979b70867a6cbbfda5aacb 100644 (file)
@@ -67,6 +67,6 @@ class WorkUnitsControllerTest < ActionController::TestCase
     encoded_params = Hash[params.map { |k,v|
                             [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
                           }]
-    get :index, encoded_params, session_for(:active)
+    get :index, params: encoded_params, session: session_for(:active)
   end
 end
index d73809a26688df7f09ef0e1793aecef431512c30..0877e59328dbe6454460b7bbf09f5cd1d739b1d9 100644 (file)
@@ -6,7 +6,7 @@ require 'test_helper'
 
 class WorkflowsControllerTest < ActionController::TestCase
   test "index" do
-    get :index, {}, session_for(:active)
+    get :index, params: {}, session: session_for(:active)
     assert_response :success
     assert_includes @response.body, 'Valid workflow with no definition yaml'
   end
@@ -16,7 +16,7 @@ class WorkflowsControllerTest < ActionController::TestCase
 
     wf = api_fixture('workflows')['workflow_with_input_specifications']
 
-    get :show, {id: wf['uuid']}, session_for(:active)
+    get :show, params: {id: wf['uuid']}, session: session_for(:active)
     assert_response :success
 
     assert_includes @response.body, "a short label for this parameter (optional)"
diff --git a/apps/workbench/test/functional/.gitkeep b/apps/workbench/test/functional/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
index 454cb2c3689bc33373dc14a4955f2f39ba3c3ab4..e31f196498d47a3963d108d01873ba1799c323d7 100644 (file)
@@ -71,7 +71,7 @@ module ShareObjectHelper
   end
 
   def user_can_manage(user_sym, fixture)
-    get(:show, {id: fixture["uuid"]}, session_for(user_sym))
+    get(:show, params: {id: fixture["uuid"]}, session: session_for(user_sym))
     is_manager = assigns(:user_is_manager)
     assert_not_nil(is_manager, "user_is_manager flag not set")
     if not is_manager
index b3f704cdd98feb3be31326b2cbaf6451c5aa7925..51c3720985a85e0fbfd3ebd60ff1473134af78ab 100644 (file)
@@ -3,6 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'integration_helper'
+require 'config_validators'
 
 class ApplicationLayoutTest < ActionDispatch::IntegrationTest
   # These tests don't do state-changing API calls. Save some time by
index 47e385a4e27830b4ab5293c9996a933f1496c3f6..801609fbb6747f74ee52d0e0bc38b9d7a57ae12b 100644 (file)
@@ -140,9 +140,9 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
     click_link 'API response'
     api_response = JSON.parse(find('div#advanced_api_response pre').text)
     input_params = api_response['components']['part-one']['script_parameters']['input']
-    assert_equal input_params['value'], collection['portable_data_hash']
-    assert_equal input_params['selection_name'], collection['name']
-    assert_equal input_params['selection_uuid'], collection['uuid']
+    assert_equal collection['portable_data_hash'], input_params['value']
+    assert_equal collection['name'], input_params['selection_name']
+    assert_equal collection['uuid'], input_params['selection_uuid']
 
     # "Run" button is now enabled
     page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
@@ -445,13 +445,13 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
 
     api_response = JSON.parse(find('div#advanced_api_response pre').text)
     input_params = api_response['components']['part-one']['script_parameters']['input']
-    assert_equal(input_params['selection_uuid'], collection['uuid'], "Not found expected input param uuid")
+    assert_equal(collection['uuid'], input_params['selection_uuid'], "Not found expected input param uuid")
     if choose_file
-      assert_equal(input_params['value'], collection['portable_data_hash']+'/foo', "Not found expected input file param value")
-      assert_equal(input_params['selection_name'], collection['name']+'/foo', "Not found expected input file param name")
+      assert_equal(collection['portable_data_hash']+'/foo', input_params['value'], "Not found expected input file param value")
+      assert_equal(collection['name']+'/foo', input_params['selection_name'], "Not found expected input file param name")
     else
-      assert_equal(input_params['value'], collection['portable_data_hash'], "Not found expected input param value")
-      assert_equal(input_params['selection_name'], collection['name'], "Not found expected input selection name")
+      assert_equal(collection['portable_data_hash'], input_params['value'], "Not found expected input param value")
+      assert_equal(collection['name'], input_params['selection_name'], "Not found expected input selection name")
     end
 
     # "Run" button present and enabled
diff --git a/apps/workbench/test/mailers/.gitkeep b/apps/workbench/test/mailers/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/models/.gitkeep b/apps/workbench/test/models/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
index 8435eb4b7c6331f54483a3b363d1af67f1c7521b..bbd733bb47d98d345381763476c9dd48c5f6f883 100644 (file)
@@ -26,7 +26,7 @@ end
 
 require File.expand_path('../../config/environment', __FILE__)
 require 'rails/test_help'
-require 'mocha/mini_test'
+require 'mocha/minitest'
 
 class ActiveSupport::TestCase
   # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
index 1daf582553bf5ce0283e2cd4dfc0550753ca846e..eaf65c578dbfb3afbfbdbe3d39081ee25cdee778 100644 (file)
@@ -32,12 +32,18 @@ class WorkUnitTest < ActiveSupport::TestCase
 
       if label != nil
         assert_equal(label, wu.label)
+      elsif obj.name.nil?
+        assert_nil(wu.label)
       else
         assert_equal(obj.name, wu.label)
       end
       assert_equal(obj['uuid'], wu.uuid)
       assert_equal(state, wu.state_label)
-      assert_equal(success, wu.success?)
+      if success.nil?
+        assert_nil(wu.success?)
+      else
+        assert_equal(success, wu.success?)
+      end
       assert_equal(progress, wu.progress)
 
       assert_equal(num_children, wu.children.size)
@@ -74,7 +80,11 @@ class WorkUnitTest < ActiveSupport::TestCase
       if walltime
         assert_equal true, (wu.walltime >= walltime)
       else
-        assert_equal walltime, wu.walltime
+        if walltime.nil?
+          assert_nil wu.walltime
+        else
+          assert_equal walltime, wu.walltime
+        end
       end
 
       if cputime
@@ -85,6 +95,8 @@ class WorkUnitTest < ActiveSupport::TestCase
 
       if queuedtime
         assert_equal true, (wu.queuedtime >= queuedtime)
+      elsif queuedtime.nil?
+        assert_nil wu.queuedtime
       else
         assert_equal queuedtime, wu.queuedtime
       end
index 6d11ea864ccf30496123cae696e09a3246d1c5be..82bc9898aa87c350b38774db6db349294330bc9f 100644 (file)
@@ -19,7 +19,7 @@ setup_before_nginx_restart() {
   # initialize git_internal_dir
   # usually /var/lib/arvados/internal.git (set in application.default.yml )
   if [ "$APPLICATION_READY" = "1" ]; then
-      GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:check 2>&1 | grep git_internal_dir | awk '{ print $2 }')
+      GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:dump 2>&1 | grep GitInternalDir | awk '{ print $2 }' |tr -d '"')
       if [ ! -e "$GIT_INTERNAL_DIR" ]; then
         run_and_report "Creating git_internal_dir '$GIT_INTERNAL_DIR'" \
           mkdir -p "$GIT_INTERNAL_DIR"
index 01a6a06c14afffa2806673a3c7ac0f98d8009ab5..1aa3e3cfd1147ebe15b0c041637a055b015f3c93 100755 (executable)
@@ -11,7 +11,7 @@
 LICENSE_PACKAGE_TS=20151208015500
 
 if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then
-    RAILS_PACKAGE_ITERATION=8
+    RAILS_PACKAGE_ITERATION=1
 else
     RAILS_PACKAGE_ITERATION="$ARVADOS_BUILDING_ITERATION"
 fi
@@ -638,10 +638,13 @@ fpm_build_virtualenv () {
     done
   fi
 
-  # the libpam module should place this file in the historically correct place
-  # so as not to break backwards compatibility
-  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/python2.7/dist/libpam-arvados/lib/security/libpam_arvados.py" ]]; then
-    COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/data/lib/security/libpam_arvados.py=/usr/data/lib/security/")
+  # the libpam module should place a few files in the correct place for the pam
+  # subsystem
+  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/$PYTHON_PKG/lib/security/libpam_arvados.py" ]]; then
+    COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/lib/security/libpam_arvados.py=/usr/lib/security/")
+  fi
+  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/$PYTHON_PKG/share/pam-configs/arvados" ]]; then
+    COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/share/pam-configs/arvados=/usr/share/pam-configs/")
   fi
 
   # the python-arvados-cwl-runner package comes with cwltool, expose that version
index d9dd1bbdd3d093e411a479712495ecd0d9849018..1dcb2d990620154ce366ef8739e3f0947372da86 100755 (executable)
@@ -105,6 +105,7 @@ services/crunch-dispatch-slurm
 services/ws
 sdk/cli
 sdk/pam
+sdk/pam:py3
 sdk/python
 sdk/python:py3
 sdk/ruby
@@ -380,6 +381,20 @@ checkpidfile() {
     echo "${svc} pid ${pid} ok"
 }
 
+checkhealth() {
+    svc="$1"
+    port="$(cat "$WORKSPACE/tmp/${svc}.port")"
+    scheme=http
+    if [[ ${svc} =~ -ssl$ || ${svc} = wss ]]; then
+        scheme=https
+    fi
+    url="$scheme://localhost:${port}/_health/ping"
+    if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
+        echo "${url} failed"
+        return 1
+    fi
+}
+
 checkdiscoverydoc() {
     dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
     if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
@@ -413,12 +428,15 @@ start_services() {
         && checkdiscoverydoc $ARVADOS_API_HOST \
         && python sdk/python/tests/run_test_server.py start_controller \
         && checkpidfile controller \
+        && checkhealth controller \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
         && checkpidfile keepproxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
         && checkpidfile keep-web \
+        && checkhealth keep-web \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
         && checkpidfile arv-git-httpd \
+        && checkhealth arv-git-httpd \
         && python sdk/python/tests/run_test_server.py start_ws \
         && checkpidfile ws \
         && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
@@ -989,6 +1007,7 @@ gostuff=(
     lib/cloud
     lib/cloud/azure
     lib/cloud/ec2
+    lib/config
     lib/dispatchcloud
     lib/dispatchcloud/container
     lib/dispatchcloud/scheduler
@@ -1088,17 +1107,17 @@ test_services/nodemanager_integration() {
 
 test_apps/workbench_units() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
 }
 
 test_apps/workbench_functionals() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
 }
 
 test_apps/workbench_integration() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
 }
 
 test_apps/workbench_benchmark() {
index cd15d25dda760a41c427b8bfd4b621fb43e2130a..983159382297dab0a5d95fbf1f35f440fc015720 100644 (file)
@@ -8,6 +8,7 @@ import (
        "os"
 
        "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/config"
        "git.curoverse.com/arvados.git/lib/controller"
        "git.curoverse.com/arvados.git/lib/dispatchcloud"
 )
@@ -19,6 +20,8 @@ var (
                "-version":  cmd.Version(version),
                "--version": cmd.Version(version),
 
+               "config-check":   config.CheckCommand,
+               "config-dump":    config.DumpCommand,
                "controller":     controller.Command,
                "dispatch-cloud": dispatchcloud.Command,
        })
index dedd960f883f5c768017a771f17dc54403342053..a1cdb18dcf49f154a2177012b26982ba3754af7b 100644 (file)
@@ -25,11 +25,24 @@ h3. API Server configuration
 
 To solve the problem mentioned above, the API server offers the possibility to limit the amount of log information stored on the table:
 
+<pre>
+# Attributes to suppress in events and audit logs.  Notably,
+# specifying ["manifest_text"] here typically makes the database
+# smaller and faster.
+#
+# Warning: Using any non-empty value here can have undesirable side
+# effects for any client or component that relies on event logs.
+# Use at your own risk.
+unlogged_attributes: []
+</pre>
+
+The above setting affects all events being logged, independently of how much time they will be kept on the database.
+
 <pre>
 # Time to keep audit logs (a row in the log table added each time an
 # Arvados object is created, modified, or deleted) in the PostgreSQL
 # database. Currently, websocket event notifications rely on audit
-# logs, so this should not be set lower than 600 (10 minutes).
+# logs, so this should not be set lower than 300 (5 minutes).
 max_audit_log_age: 1209600
 </pre>
 
index b69730c930e0d5ab50ecf57a3e5d285c3dde8fdb..c6188095ea90f63ce5eb35e15737c1a60583f73d 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: admin
-title: "Migrating a user to a federated account"
+title: "Migrating users to federated accounts"
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -9,39 +9,72 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-When you use federation capabilities to connect two or more clusters that were already operating, some users might already have accounts on multiple clusters. Typically, they will want to choose a single account on one of the clusters and abandon the rest, transferring all data or permissions from their old “remote” accounts to a single “home” account.
+When using multiple Arvados clusters, prior to federation capabilities described here, a user would have to create a separate account on each cluster.  Unfortunately, because each account represents a separate "identity", in this system permissions granted to a user on one cluster do not transfer to another cluster, even if the accounts are associated with the same user.
 
-This effect can be achieved by changing the UUIDs of the user records on the remote clusters. This should be done before the user has ever used federation features to access cluster B with cluster A credentials. Otherwise, see "managing conflicting accounts" below.
+To address this, Arvados supports "federated user accounts".  A federated user account is associated with a specific "home" cluster, and can be used to access other clusters in the federation that trust the home cluster.  When a user arrives at another cluster's Workbench, they select and log in to their home cluster, and then are returned to the starting cluster logged in with the federated user account.
 
-For example, a user might have:
-* an account A on cluster A with uuid @aaaaa-tpzed-abcdefghijklmno@, and
-* an account B on cluster B with uuid @bbbbb-tpzed-lmnopqrstuvwxyz@
+When setting up federation capabilities on existing clusters, some users might already have accounts on multiple clusters.  In order to have a single federated identity, users should be assigned a "home" cluster, and accounts associated with that user on the other (non-home) clusters should be migrated to the new federated user account.  The @arv-federation-migrate@ tool assists with this.
 
-An administrator at cluster B can merge the two accounts by renaming account B to account A.
+h2. arv-federation-migrate
 
-<notextile>
-<pre><code>#!/usr/bin/env python
-import arvados
-arvados.api('v1').users().update_uuid(
-    uuid="<span class="userinput">bbbbb-tpzed-lmnopqrstuvwxyz</span>",
-    new_uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>").execute()
-</code></pre></notextile>
+The tool @arv-federation-migrate@ is part of the @arvados-python-client@ package.
 
-This should be done when the user is idle, i.e., not logged in and not running any jobs or containers.
+This tool is designed to help an administrator who has access to all clusters in a federation to migrate users who have multiple accounts to a single federated account.
 
-h2. Managing conflicting accounts
+As part of migrating a user, any data or permissions associated with old user accounts will be reassigned to the federated account.
 
-If the user has already used federation capabilities to access cluster B using account A before the above migration has been done, this will have already created a database entry for account A on cluster B, and the above program will error out. To fix this, the same "update_uuid API call":../api/methods/users.html#update_uuid can be used to move the conflicting account out of the way first.
+h2. Get user report
 
-<notextile>
-<pre><code>#!/usr/bin/env python
-import arvados
-import random
-import string
-random_chars = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(15))
-arvados.api('v1').users().update_uuid(
-    uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
-    new_uuid="bbbbb-tpzed-"+random_chars).execute()
-</code></pre></notextile>
+The first step is to create @tokens.csv@ and list each cluster and API token to access the cluster.  API tokens must be trusted tokens with administrator access.  This is a simple comma separated value file and can be created in a text editor.  Example:
 
-After this is done and the migration is complete, the affected user should wait 5 minutes for the authorization cache to expire before using the remote cluster.
+_tokens.csv_
+
+<pre>
+x3982.arvadosapi.com,v2/x3982-gj3su-sb6meh2jf145s7x/98d40d70d8862e33d7398213435d1a71a96cf870
+x6b1s.arvadosapi.com,v2/x6b1s-gj3su-dxc87btfv5kg91z/5575d980d3ff6231bb0c692281c42a7541c59417
+</pre>
+
+Next, run @arv-federation-migrate@ with the @--tokens@ and @--report@ flags:
+
+<pre>
+$ arv-federation-migrate --tokens tokens.csv --report users.csv
+Reading tokens.csv
+Getting user list from x6b1s
+Getting user list from x3982
+Wrote users.csv
+</pre>
+
+This will produce a report of users across all clusters listed in @tokens.csv@, sorted by email address.  This file can be loaded into a text editor or spreadsheet program for ease of viewing and editing.
+
+_users.csv_
+
+<pre>
+email,user uuid,primary cluster/user
+person_a@example.com,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s
+person_b@example.com,x3982-tpzed-1vl3k7knf7qihbe,
+person_b@example.com,x6b1s-tpzed-w4nhkx2rmrhlr54,
+</pre>
+
+The third column describes that user's home cluster.  If a user only has one account (identified by email address), the column will be filled in and there is nothing to do.  If the column is blank, that means there is more than one Arvados account associated with the user.  Edit the file and provide the desired home cluster for each user.  In this example, <code>person_b@example.com</code> is assigned the home cluster @x3982@.
+
+_users.csv_
+
+<pre>
+email,user uuid,primary cluster/user
+person_a@example.com,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s
+person_b@example.com,x3982-tpzed-1vl3k7knf7qihbe,x3982
+person_b@example.com,x6b1s-tpzed-w4nhkx2rmrhlr54,x3982
+</pre>
+
+h2. Migrate users
+
+To avoid disruption, advise users to log out and avoid running workflows while performing the migration.
+
+After updating @users.csv@, use the @--migrate@ option:
+
+<pre>
+$ arv-federation-migrate --tokens tokens.csv --migrate users.csv
+(person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe
+</pre>
+
+After migration, users should select their home cluster when logging into Arvados Workbench.  If a user attempts to log into a migrated user account, they will be redirected to log in with their home cluster.
index 09bef2a62acd18c5f2d0b02ef022248e50033956..053acb220e0b764609592b974481360eec148cd9 100644 (file)
@@ -30,7 +30,13 @@ Note to developers: Add new items at the top. Include the date, issue number, co
 TODO: extract this information based on git commit messages and generate changelogs / release notes automatically.
 {% endcomment %}
 
-h3. current master branch
+h3. v1.4.0 (2019-05-31)
+
+h4. Populating the new file_count and file_size_total columns on the collections table
+
+As part of story "#14484":https://dev.arvados.org/issues/14484, two new columns were added to the collections table in a database migration. If your installation has a large collections table, this migration may take some time. We've seen it take ~5 minutes on an installation with 250k collections, but your mileage may vary.
+
+The new columns are initialized with a zero value. In order to populate them, it is necessary to run a script called <code class="userinput">populate-file-info-columns-in-collections.rb</code> from the scripts directory of the API server. This can be done out of band, ideally directly after the API server has been upgraded to v1.4.0.
 
 h4. Stricter collection manifest validation on the API server
 
index 3e94b290d54076e77a12a44097061f6ed935f79f..394aa0fdf7801c074874cbbd500c07b6f5870f5b 100644 (file)
@@ -92,12 +92,13 @@ Create the cluster configuration file @/etc/arvados/config.yml@ using the follow
 <notextile>
 <pre><code>Clusters:
   <span class="userinput">uuid_prefix</span>:
-    NodeProfiles:
-      apiserver:
-        arvados-controller:
-          Listen: ":<span class="userinput">9004</span>" # must match the "upstream controller" section of your Nginx config
+    Services:
+      Controller:
+        InternalURLs:
+          "http://localhost:<span class="userinput">9004</span>": {} # must match the "upstream controller" section of your Nginx config
+      RailsAPI:
         arvados-api-server:
-          Listen: ":<span class="userinput">8000</span>" # must match the "upstream api" section of your Nginx config
+          "http://localhost:<span class="userinput">8000</span>": {} # must match the "upstream api" section of your Nginx config
     PostgreSQL:
       ConnectionPool: 128
       Connection:
index 42c814b8791f018d51ff66622f2d6f8669655578..bc3be8f1d7e88f463d1e954245bec978a3ab967b 100644 (file)
@@ -66,22 +66,20 @@ Add or update the following portions of your cluster configuration file, @/etc/a
   <span class="userinput">uuid_prefix</span>:
     ManagementToken: xyzzy
     SystemRootToken: <span class="userinput">zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</span>
-    NodeProfiles:
-      # The key "apiserver" corresponds to ARVADOS_NODE_PROFILE in environment file (see below).
-      apiserver:
-        arvados-dispatch-cloud:
-          Listen: ":9006"
     Services:
       Controller:
         ExternalURL: "https://<span class="userinput">uuid_prefix.arvadosapi.com</span>"
-    CloudVMs:
-      # BootProbeCommand is a shell command that succeeds when an instance is ready for service
-      BootProbeCommand: "sudo systemctl status docker"
+      DispatchCloud:
+        InternalURLs:
+          "http://localhost:9006": {}
+    Containers:
+      CloudVMs:
+        # BootProbeCommand is a shell command that succeeds when an instance is ready for service
+        BootProbeCommand: "sudo systemctl status docker"
 
-      <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>
+        <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>
 
-    Dispatch:
-      PrivateKey: |
+      DispatchPrivateKey: |
         -----BEGIN RSA PRIVATE KEY-----
         MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
         ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
@@ -113,18 +111,19 @@ Minimal configuration example for Amazon EC2:
 <notextile>
 <pre><code>Clusters:
   <span class="userinput">uuid_prefix</span>:
-    CloudVMs:
-      ImageID: ami-01234567890abcdef
-      Driver: ec2
-      DriverParameters:
-        AccessKeyID: EALMF21BJC7MKNF9FVVR
-        SecretAccessKey: yKJAPmoCQOMtYWzEUQ1tKTyrocTcbH60CRvGP3pM
-        SecurityGroupIDs:
-        - sg-0123abcd
-        SubnetID: subnet-0123abcd
-        Region: us-east-1
-        EBSVolumeType: gp2
-        AdminUsername: debian
+    Containers:
+      CloudVMs:
+        ImageID: ami-01234567890abcdef
+        Driver: ec2
+        DriverParameters:
+          AccessKeyID: EALMF21BJC7MKNF9FVVR
+          SecretAccessKey: yKJAPmoCQOMtYWzEUQ1tKTyrocTcbH60CRvGP3pM
+          SecurityGroupIDs:
+          - sg-0123abcd
+          SubnetID: subnet-0123abcd
+          Region: us-east-1
+          EBSVolumeType: gp2
+          AdminUsername: debian
 </code></pre>
 </notextile>
 
@@ -133,30 +132,24 @@ Minimal configuration example for Azure:
 <notextile>
 <pre><code>Clusters:
   <span class="userinput">uuid_prefix</span>:
-    CloudVMs:
-      ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
-      Driver: azure
-      DriverParameters:
-        SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-        ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-        ClientSecret: 2WyXt0XFbEtutnf2hp528t6Wk9S5bOHWkRaaWwavKQo=
-        TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-        CloudEnvironment: AzurePublicCloud
-        ResourceGroup: zzzzz
-        Location: centralus
-        Network: zzzzz
-        Subnet: zzzzz-subnet-private
-        StorageAccount: example
-        BlobContainer: vhds
-        DeleteDanglingResourcesAfter: 20s
-        AdminUsername: arvados
-</code></pre>
-</notextile>
-
-Create the host configuration file @/etc/arvados/environment@.
-
-<notextile>
-<pre><code>ARVADOS_NODE_PROFILE=apiserver
+    Containers:
+      CloudVMs:
+        ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
+        Driver: azure
+        DriverParameters:
+          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientSecret: 2WyXt0XFbEtutnf2hp528t6Wk9S5bOHWkRaaWwavKQo=
+          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          CloudEnvironment: AzurePublicCloud
+          ResourceGroup: zzzzz
+          Location: centralus
+          Network: zzzzz
+          Subnet: zzzzz-subnet-private
+          StorageAccount: example
+          BlobContainer: vhds
+          DeleteDanglingResourcesAfter: 20s
+          AdminUsername: arvados
 </code></pre>
 </notextile>
 
index 6169734768e47c538ffd7b7cc4e4b3ad36b0dffa..b900dc932ffcb105a217686dbeac67aecee9e23b 100644 (file)
@@ -19,7 +19,7 @@ If you are logged in to an Arvados VM, the @arv-mount@ utility should already be
 To use the FUSE driver elsewhere, you can install from a distribution package, PyPI, or source.
 
 {% include 'notebox_begin' %}
-The Python SDK requires Python 2.7.
+The Arvados FUSE driver requires Python 2.7
 {% include 'notebox_end' %}
 
 h4. Option 1: Install from distribution packages
@@ -46,7 +46,7 @@ On Debian-based systems:
 
 h4. Option 2: Install with pip
 
-Run @pip-2.7 install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+Run @pip install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
 
 h4. Option 3: Install from source
 
@@ -55,7 +55,7 @@ Install the @python-setuptools@ package from your distribution.  Then run the fo
 <notextile>
 <pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
 ~$ <span class="userinput">cd arvados/services/fuse</span>
-~/arvados/services/fuse$ <span class="userinput">python2.7 setup.py install</span>
+~/arvados/services/fuse$ <span class="userinput">python setup.py install</span>
 </code></pre>
 </notextile>
 
index be824399a4c94def5b64af218fa0b93d154d739b..9c0ec475b4328eda8875b161bbb77795ddbb2d53 100644 (file)
@@ -18,17 +18,15 @@ If you are logged in to an Arvados VM, the Python SDK should be installed.
 
 To use the Python SDK elsewhere, you can install from PyPI or a distribution package.
 
-{% include 'notebox_begin' %}
-The Python SDK requires Python 2.7.
-{% include 'notebox_end' %}
+The Python SDK supports Python 2.7 and 3.4+
 
 h3. Option 1: Install with pip
 
 This installation method is recommended to make the SDK available for use in your own Python programs. It can coexist with the system-wide installation method from a distribution package (option 2, below).
 
-Run @pip-2.7 install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
+Run @pip install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
 
-If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip-2.7 install --pre arvados-python-client@.
+If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip install --pre arvados-python-client@.
 
 h3. Option 2: Install from a distribution package
 
index ac7ff14cc2539ff7c1305fc7df393c7e36d0a795..ab14d6681eb34b30f1845dd47f4a37f3afc7aced 100644 (file)
@@ -50,8 +50,6 @@ type azureInstanceSetConfig struct {
        AdminUsername                string
 }
 
-const tagKeyInstanceSecret = "InstanceSecret"
-
 type containerWrapper interface {
        GetBlobReference(name string) *storage.Blob
        ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error)
@@ -213,7 +211,7 @@ type azureInstanceSet struct {
        logger       logrus.FieldLogger
 }
 
-func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
        azcfg := azureInstanceSetConfig{}
        err = json.Unmarshal(config, &azcfg)
        if err != nil {
@@ -352,14 +350,11 @@ func (az *azureInstanceSet) Create(
 
        name = az.namePrefix + name
 
-       timestamp := time.Now().Format(time.RFC3339Nano)
-
-       tags := make(map[string]*string)
-       tags["created-at"] = &timestamp
+       tags := map[string]*string{}
        for k, v := range newTags {
-               newstr := v
-               tags["dispatch-"+k] = &newstr
+               tags[k] = to.StringPtr(v)
        }
+       tags["created-at"] = to.StringPtr(time.Now().Format(time.RFC3339Nano))
 
        nicParameters := network.Interface{
                Location: &az.azconfig.Location,
@@ -482,26 +477,24 @@ func (az *azureInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, err
                return nil, wrapAzureError(err)
        }
 
-       instances := make([]cloud.Instance, 0)
-
+       var instances []cloud.Instance
        for ; result.NotDone(); err = result.Next() {
                if err != nil {
                        return nil, wrapAzureError(err)
                }
-               if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
-                       instances = append(instances, &azureInstance{
-                               provider: az,
-                               vm:       result.Value(),
-                               nic:      interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID]})
-               }
+               instances = append(instances, &azureInstance{
+                       provider: az,
+                       vm:       result.Value(),
+                       nic:      interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID],
+               })
        }
        return instances, nil
 }
 
 // ManageNics returns a list of Azure network interface resources.
-// Also performs garbage collection of NICs which have "namePrefix", are
-// not associated with a virtual machine and have a "create-at" time
-// more than DeleteDanglingResourcesAfter (to prevent racing and
+// Also performs garbage collection of NICs which have "namePrefix",
+// are not associated with a virtual machine and have a "created-at"
+// time more than DeleteDanglingResourcesAfter (to prevent racing and
 // deleting newly created NICs) in the past are deleted.
 func (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {
        az.stopWg.Add(1)
@@ -603,16 +596,12 @@ func (ai *azureInstance) SetTags(newTags cloud.InstanceTags) error {
        ai.provider.stopWg.Add(1)
        defer ai.provider.stopWg.Done()
 
-       tags := make(map[string]*string)
-
+       tags := map[string]*string{}
        for k, v := range ai.vm.Tags {
-               if !strings.HasPrefix(k, "dispatch-") {
-                       tags[k] = v
-               }
+               tags[k] = v
        }
        for k, v := range newTags {
-               newstr := v
-               tags["dispatch-"+k] = &newstr
+               tags[k] = to.StringPtr(v)
        }
 
        vmParameters := compute.VirtualMachine{
@@ -629,14 +618,10 @@ func (ai *azureInstance) SetTags(newTags cloud.InstanceTags) error {
 }
 
 func (ai *azureInstance) Tags() cloud.InstanceTags {
-       tags := make(map[string]string)
-
+       tags := cloud.InstanceTags{}
        for k, v := range ai.vm.Tags {
-               if strings.HasPrefix(k, "dispatch-") {
-                       tags[k[9:]] = *v
-               }
+               tags[k] = *v
        }
-
        return tags
 }
 
index 96bfb4fefbfd8c8c13c199a5621977776f762505..152b7e73b7a32f2af80862fcde1d4e68b43df9e1 100644 (file)
@@ -39,6 +39,7 @@ import (
        "net"
        "net/http"
        "os"
+       "strings"
        "testing"
        "time"
 
@@ -66,6 +67,8 @@ type AzureInstanceSetSuite struct{}
 
 var _ = check.Suite(&AzureInstanceSetSuite{})
 
+const testNamePrefix = "compute-test123-"
+
 type VirtualMachinesClientStub struct{}
 
 func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
@@ -141,7 +144,7 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
                        return nil, cloud.ImageID(""), cluster, err
                }
 
-               ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+               ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
                return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
        }
        ap := azureInstanceSet{
@@ -149,7 +152,7 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
                        BlobContainer: "vhds",
                },
                dispatcherID: "test123",
-               namePrefix:   "compute-test123-",
+               namePrefix:   testNamePrefix,
                logger:       logrus.StandardLogger(),
                deleteNIC:    make(chan string),
                deleteBlob:   make(chan storage.Blob),
@@ -228,7 +231,7 @@ func (*AzureInstanceSetSuite) TestDestroyInstances(c *check.C) {
        l, err := ap.Instances(nil)
        c.Assert(err, check.IsNil)
 
-       for _, i := range l {
+       for _, i := range filterInstances(c, l) {
                c.Check(i.Destroy(), check.IsNil)
        }
 }
@@ -287,17 +290,20 @@ func (*AzureInstanceSetSuite) TestSetTags(c *check.C) {
        if err != nil {
                c.Fatal("Error making provider", err)
        }
+
        l, err := ap.Instances(nil)
        c.Assert(err, check.IsNil)
-
+       l = filterInstances(c, l)
        if len(l) > 0 {
                err = l[0].SetTags(map[string]string{"foo": "bar"})
                if err != nil {
                        c.Fatal("Error setting tags", err)
                }
        }
+
        l, err = ap.Instances(nil)
        c.Assert(err, check.IsNil)
+       l = filterInstances(c, l)
 
        if len(l) > 0 {
                tg := l[0].Tags()
@@ -312,6 +318,7 @@ func (*AzureInstanceSetSuite) TestSSH(c *check.C) {
        }
        l, err := ap.Instances(nil)
        c.Assert(err, check.IsNil)
+       l = filterInstances(c, l)
 
        if len(l) > 0 {
                sshclient, err := SetupSSHClient(c, l[0])
@@ -372,3 +379,15 @@ func SetupSSHClient(c *check.C, inst cloud.Instance) (*ssh.Client, error) {
 
        return client, nil
 }
+
+func filterInstances(c *check.C, instances []cloud.Instance) []cloud.Instance {
+       var r []cloud.Instance
+       for _, i := range instances {
+               if !strings.HasPrefix(i.String(), testNamePrefix) {
+                       c.Logf("ignoring instance %s", i)
+                       continue
+               }
+               r = append(r, i)
+       }
+       return r
+}
index c5565d424559f0bba2841dd46df62d3af883cc19..079c32802ca4d3a038b1a395b5d56188b99a7cce 100644 (file)
@@ -13,7 +13,6 @@ import (
        "encoding/json"
        "fmt"
        "math/big"
-       "strings"
        "sync"
 
        "git.curoverse.com/arvados.git/lib/cloud"
@@ -26,9 +25,6 @@ import (
        "golang.org/x/crypto/ssh"
 )
 
-const arvadosDispatchID = "arvados-dispatch-id"
-const tagPrefix = "arvados-dispatch-tag-"
-
 // Driver is the ec2 implementation of the cloud.Driver interface.
 var Driver = cloud.DriverFunc(newEC2InstanceSet)
 
@@ -52,18 +48,18 @@ type ec2Interface interface {
 }
 
 type ec2InstanceSet struct {
-       ec2config    ec2InstanceSetConfig
-       dispatcherID cloud.InstanceSetID
-       logger       logrus.FieldLogger
-       client       ec2Interface
-       keysMtx      sync.Mutex
-       keys         map[string]string
+       ec2config     ec2InstanceSetConfig
+       instanceSetID cloud.InstanceSetID
+       logger        logrus.FieldLogger
+       client        ec2Interface
+       keysMtx       sync.Mutex
+       keys          map[string]string
 }
 
-func newEC2InstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
        instanceSet := &ec2InstanceSet{
-               dispatcherID: dispatcherID,
-               logger:       logger,
+               instanceSetID: instanceSetID,
+               logger:        logger,
        }
        err = json.Unmarshal(config, &instanceSet.ec2config)
        if err != nil {
@@ -157,19 +153,10 @@ func (instanceSet *ec2InstanceSet) Create(
        }
        instanceSet.keysMtx.Unlock()
 
-       ec2tags := []*ec2.Tag{
-               &ec2.Tag{
-                       Key:   aws.String(arvadosDispatchID),
-                       Value: aws.String(string(instanceSet.dispatcherID)),
-               },
-               &ec2.Tag{
-                       Key:   aws.String("arvados-class"),
-                       Value: aws.String("dynamic-compute"),
-               },
-       }
+       ec2tags := []*ec2.Tag{}
        for k, v := range newTags {
                ec2tags = append(ec2tags, &ec2.Tag{
-                       Key:   aws.String(tagPrefix + k),
+                       Key:   aws.String(k),
                        Value: aws.String(v),
                })
        }
@@ -191,12 +178,12 @@ func (instanceSet *ec2InstanceSet) Create(
                        }},
                DisableApiTermination:             aws.Bool(false),
                InstanceInitiatedShutdownBehavior: aws.String("terminate"),
-               UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
                TagSpecifications: []*ec2.TagSpecification{
                        &ec2.TagSpecification{
                                ResourceType: aws.String("instance"),
                                Tags:         ec2tags,
                        }},
+               UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
        }
 
        if instanceType.AddedScratch > 0 {
@@ -230,13 +217,15 @@ func (instanceSet *ec2InstanceSet) Create(
        }, nil
 }
 
-func (instanceSet *ec2InstanceSet) Instances(cloud.InstanceTags) (instances []cloud.Instance, err error) {
-       dii := &ec2.DescribeInstancesInput{
-               Filters: []*ec2.Filter{&ec2.Filter{
-                       Name:   aws.String("tag:" + arvadosDispatchID),
-                       Values: []*string{aws.String(string(instanceSet.dispatcherID))},
-               }}}
-
+func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances []cloud.Instance, err error) {
+       var filters []*ec2.Filter
+       for k, v := range tags {
+               filters = append(filters, &ec2.Filter{
+                       Name:   aws.String("tag:" + k),
+                       Values: []*string{aws.String(v)},
+               })
+       }
+       dii := &ec2.DescribeInstancesInput{Filters: filters}
        for {
                dio, err := instanceSet.client.DescribeInstances(dii)
                if err != nil {
@@ -278,15 +267,10 @@ func (inst *ec2Instance) ProviderType() string {
 }
 
 func (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {
-       ec2tags := []*ec2.Tag{
-               &ec2.Tag{
-                       Key:   aws.String(arvadosDispatchID),
-                       Value: aws.String(string(inst.provider.dispatcherID)),
-               },
-       }
+       var ec2tags []*ec2.Tag
        for k, v := range newTags {
                ec2tags = append(ec2tags, &ec2.Tag{
-                       Key:   aws.String(tagPrefix + k),
+                       Key:   aws.String(k),
                        Value: aws.String(v),
                })
        }
@@ -303,9 +287,7 @@ func (inst *ec2Instance) Tags() cloud.InstanceTags {
        tags := make(map[string]string)
 
        for _, t := range inst.instance.Tags {
-               if strings.HasPrefix(*t.Key, tagPrefix) {
-                       tags[(*t.Key)[len(tagPrefix):]] = *t.Value
-               }
+               tags[*t.Key] = *t.Value
        }
 
        return tags
index 50ba01174e6045d3967f5e7cb7f8f668847f59bd..8b754eacac454b0993e5961b01754538c150980e 100644 (file)
@@ -121,15 +121,15 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
                        return nil, cloud.ImageID(""), cluster, err
                }
 
-               ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+               ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
                return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
        }
        ap := ec2InstanceSet{
-               ec2config:    ec2InstanceSetConfig{},
-               dispatcherID: "test123",
-               logger:       logrus.StandardLogger(),
-               client:       &ec2stub{},
-               keys:         make(map[string]string),
+               ec2config:     ec2InstanceSetConfig{},
+               instanceSetID: "test123",
+               logger:        logrus.StandardLogger(),
+               client:        &ec2stub{},
+               keys:          make(map[string]string),
        }
        return &ap, cloud.ImageID("blob"), cluster, nil
 }
index 792e737a914a1ce7d39d98c05c1a9428e77fb1ff..7410f9d0e0ea2fe61e5ac6dfedecd3ac740f7ebe 100644 (file)
@@ -36,6 +36,7 @@ type QuotaError interface {
        error
 }
 
+type SharedResourceTags map[string]string
 type InstanceSetID string
 type InstanceTags map[string]string
 type InstanceID string
@@ -145,6 +146,10 @@ type InitCommand string
 // A Driver returns an InstanceSet that uses the given InstanceSetID
 // and driver-dependent configuration parameters.
 //
+// If the driver creates cloud resources that aren't attached to a
+// single VM instance (like SSH key pairs on AWS) and support tagging,
+// they should be tagged with the provided SharedResourceTags.
+//
 // The supplied id will be of the form "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
 // where each z can be any alphanum. The returned InstanceSet must use
 // this id to tag long-lived cloud resources that it creates, and must
@@ -154,13 +159,17 @@ type InitCommand string
 // other mechanism. The tags must be visible to another instance of
 // the same driver running on a different host.
 //
-// The returned InstanceSet must ignore existing resources that are
-// visible but not tagged with the given id, except that it should log
-// a summary of such resources -- only once -- when it starts
-// up. Thus, two identically configured InstanceSets running on
-// different hosts with different ids should log about the existence
-// of each other's resources at startup, but will not interfere with
-// each other.
+// The returned InstanceSet must not modify or delete cloud resources
+// unless they are tagged with the given InstanceSetID or the caller
+// (dispatcher) calls Destroy() on them. It may log a summary of
+// untagged resources once at startup, though. Thus, two identically
+// configured InstanceSets running on different hosts with different
+// ids should log about the existence of each other's resources at
+// startup, but will not interfere with each other.
+//
+// The dispatcher always passes the InstanceSetID as a tag when
+// calling Create() and Instances(), so the driver does not need to
+// tag/filter VMs by InstanceSetID itself.
 //
 // Example:
 //
@@ -171,7 +180,7 @@ type InitCommand string
 //
 //     type exampleDriver struct {}
 //
-//     func (*exampleDriver) InstanceSet(config json.RawMessage, id InstanceSetID) (InstanceSet, error) {
+//     func (*exampleDriver) InstanceSet(config json.RawMessage, id cloud.InstanceSetID, tags cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
 //             var is exampleInstanceSet
 //             if err := json.Unmarshal(config, &is); err != nil {
 //                     return nil, err
@@ -182,17 +191,17 @@ type InitCommand string
 //
 //     var _ = registerCloudDriver("example", &exampleDriver{})
 type Driver interface {
-       InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+       InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
 }
 
 // DriverFunc makes a Driver using the provided function as its
 // InstanceSet method. This is similar to http.HandlerFunc.
-func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
+func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
        return driverFunc(fn)
 }
 
-type driverFunc func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+type driverFunc func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
 
-func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
-       return df(config, id, logger)
+func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error) {
+       return df(config, id, tags, logger)
 }
diff --git a/lib/config/cmd.go b/lib/config/cmd.go
new file mode 100644 (file)
index 0000000..41a1d7d
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "os/exec"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/ghodss/yaml"
+)
+
+var DumpCommand cmd.Handler = dumpCommand{}
+
+type dumpCommand struct{}
+
+func (dumpCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var err error
+       defer func() {
+               if err != nil {
+                       fmt.Fprintf(stderr, "%s\n", err)
+               }
+       }()
+       if len(args) != 0 {
+               err = fmt.Errorf("usage: %s <config-src.yaml >config-min.yaml", prog)
+               return 2
+       }
+       log := ctxlog.New(stderr, "text", "info")
+       cfg, err := Load(stdin, log)
+       if err != nil {
+               return 1
+       }
+       out, err := yaml.Marshal(cfg)
+       if err != nil {
+               return 1
+       }
+       _, err = stdout.Write(out)
+       if err != nil {
+               return 1
+       }
+       return 0
+}
+
+var CheckCommand cmd.Handler = checkCommand{}
+
+type checkCommand struct{}
+
+func (checkCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var err error
+       defer func() {
+               if err != nil {
+                       fmt.Fprintf(stderr, "%s\n", err)
+               }
+       }()
+       if len(args) != 0 {
+               err = fmt.Errorf("usage: %s <config-src.yaml && echo 'no changes needed'", prog)
+               return 2
+       }
+       log := &plainLogger{w: stderr}
+       buf, err := ioutil.ReadAll(stdin)
+       if err != nil {
+               return 1
+       }
+       withoutDepr, err := load(bytes.NewBuffer(buf), log, false)
+       if err != nil {
+               return 1
+       }
+       withDepr, err := load(bytes.NewBuffer(buf), nil, true)
+       if err != nil {
+               return 1
+       }
+       cmd := exec.Command("diff", "-u", "--label", "without-deprecated-configs", "--label", "relying-on-deprecated-configs", "/dev/fd/3", "/dev/fd/4")
+       for _, obj := range []interface{}{withoutDepr, withDepr} {
+               y, _ := yaml.Marshal(obj)
+               pr, pw, err := os.Pipe()
+               if err != nil {
+                       return 1
+               }
+               defer pr.Close()
+               go func() {
+                       io.Copy(pw, bytes.NewBuffer(y))
+                       pw.Close()
+               }()
+               cmd.ExtraFiles = append(cmd.ExtraFiles, pr)
+       }
+       diff, err := cmd.CombinedOutput()
+       if bytes.HasPrefix(diff, []byte("--- ")) {
+               fmt.Fprintln(stdout, "Your configuration is relying on deprecated entries. Suggest making the following changes.")
+               stdout.Write(diff)
+               return 1
+       } else if len(diff) > 0 {
+               fmt.Fprintf(stderr, "Unexpected diff output:\n%s", diff)
+               return 1
+       } else if err != nil {
+               return 1
+       }
+       if log.used {
+               return 1
+       }
+       return 0
+}
+
+type plainLogger struct {
+       w    io.Writer
+       used bool
+}
+
+func (pl *plainLogger) Warnf(format string, args ...interface{}) {
+       pl.used = true
+       fmt.Fprintf(pl.w, format+"\n", args...)
+}
diff --git a/lib/config/cmd_test.go b/lib/config/cmd_test.go
new file mode 100644 (file)
index 0000000..e4d838f
--- /dev/null
@@ -0,0 +1,115 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "bytes"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CommandSuite{})
+
+type CommandSuite struct{}
+
+func (s *CommandSuite) TestBadArg(c *check.C) {
+       var stderr bytes.Buffer
+       code := DumpCommand.RunCommand("arvados config-dump", []string{"-badarg"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)
+       c.Check(code, check.Equals, 2)
+       c.Check(stderr.String(), check.Matches, `(?ms)usage: .*`)
+}
+
+func (s *CommandSuite) TestEmptyInput(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       code := DumpCommand.RunCommand("arvados config-dump", nil, &bytes.Buffer{}, &stdout, &stderr)
+       c.Check(code, check.Equals, 1)
+       c.Check(stderr.String(), check.Matches, `config does not define any clusters\n`)
+}
+
+func (s *CommandSuite) TestCheckNoDeprecatedKeys(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       in := `
+Clusters:
+ z1234:
+  API:
+    MaxItemsPerResponse: 1234
+`
+       code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+       c.Check(code, check.Equals, 0)
+       c.Check(stdout.String(), check.Equals, "")
+       c.Check(stderr.String(), check.Equals, "")
+}
+
+func (s *CommandSuite) TestCheckDeprecatedKeys(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       in := `
+Clusters:
+ z1234:
+  RequestLimits:
+    MaxItemsPerResponse: 1234
+`
+       code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+       c.Check(code, check.Equals, 1)
+       c.Check(stdout.String(), check.Matches, `(?ms).*API:\n\- +.*MaxItemsPerResponse: 1000\n\+ +MaxItemsPerResponse: 1234\n.*`)
+}
+
+func (s *CommandSuite) TestCheckUnknownKey(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       in := `
+Clusters:
+ z1234:
+  Bogus1: foo
+  BogusSection:
+    Bogus2: foo
+  API:
+    Bogus3:
+     Bogus4: true
+  PostgreSQL:
+    ConnectionPool:
+      {Bogus5: true}
+`
+       code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+       c.Log(stderr.String())
+       c.Check(code, check.Equals, 1)
+       c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.Bogus1\n.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.BogusSection\n.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.API.Bogus3\n.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*unexpected object in config entry: Clusters.z1234.PostgreSQL.ConnectionPool\n.*`)
+}
+
+func (s *CommandSuite) TestDumpFormatting(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       in := `
+Clusters:
+ z1234:
+  Containers:
+   CloudVMs:
+    TimeoutBooting: 600s
+  Services:
+   Controller:
+    InternalURLs:
+     http://localhost:12345: {}
+`
+       code := DumpCommand.RunCommand("arvados config-dump", nil, bytes.NewBufferString(in), &stdout, &stderr)
+       c.Check(code, check.Equals, 0)
+       c.Check(stdout.String(), check.Matches, `(?ms).*TimeoutBooting: 10m\n.*`)
+       c.Check(stdout.String(), check.Matches, `(?ms).*http://localhost:12345: {}\n.*`)
+}
+
+func (s *CommandSuite) TestDumpUnknownKey(c *check.C) {
+       var stdout, stderr bytes.Buffer
+       in := `
+Clusters:
+ z1234:
+  UnknownKey: foobar
+  ManagementToken: secret
+`
+       code := DumpCommand.RunCommand("arvados config-dump", nil, bytes.NewBufferString(in), &stdout, &stderr)
+       c.Check(code, check.Equals, 0)
+       c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.UnknownKey.*`)
+       c.Check(stdout.String(), check.Matches, `(?ms)Clusters:\n  z1234:\n.*`)
+       c.Check(stdout.String(), check.Matches, `(?ms).*\n *ManagementToken: secret\n.*`)
+       c.Check(stdout.String(), check.Not(check.Matches), `(?ms).*UnknownKey.*`)
+}
index bea6387532f0d5ac476b82858ac46171162a0e06..94cd8fcbf65d2181c918818f7ba4779408b281a0 100644 (file)
@@ -21,11 +21,7 @@ Clusters:
     Services:
       RailsAPI:
         InternalURLs: {}
-      GitHTTP:
-        InternalURLs: {}
-        ExternalURL: ""
-      Keepstore:
-        InternalURLs: {}
+        ExternalURL: "-"
       Controller:
         InternalURLs: {}
         ExternalURL: ""
@@ -34,6 +30,7 @@ Clusters:
         ExternalURL: ""
       Keepbalance:
         InternalURLs: {}
+        ExternalURL: "-"
       GitHTTP:
         InternalURLs: {}
         ExternalURL: ""
@@ -41,6 +38,7 @@ Clusters:
         ExternalURL: ""
       DispatchCloud:
         InternalURLs: {}
+        ExternalURL: "-"
       SSO:
         ExternalURL: ""
       Keepproxy:
@@ -54,6 +52,7 @@ Clusters:
         ExternalURL: ""
       Keepstore:
         InternalURLs: {}
+        ExternalURL: "-"
       Composer:
         ExternalURL: ""
       WebShell:
@@ -63,17 +62,24 @@ Clusters:
         ExternalURL: ""
       Workbench2:
         ExternalURL: ""
+      Nodemanager:
+        InternalURLs: {}
+        ExternalURL: "-"
+      Health:
+        InternalURLs: {}
+        ExternalURL: "-"
+
     PostgreSQL:
       # max concurrent connections per arvados server daemon
       ConnectionPool: 32
       Connection:
         # All parameters here are passed to the PG client library in a connection string;
         # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
-        Host: ""
-        Port: 0
-        User: ""
-        Password: ""
-        DBName: ""
+        host: ""
+        port: ""
+        user: ""
+        password: ""
+        dbname: ""
     API:
       # Maximum size (in bytes) allowed for a single API request.  This
       # limit is published in the discovery document for use by clients.
@@ -109,11 +115,18 @@ Clusters:
       # update on the permission view in the future, if not already scheduled.
       AsyncPermissionsUpdateInterval: 20
 
+      # Maximum number of concurrent outgoing requests to make while
+      # serving a single incoming multi-cluster (federated) request.
+      MaxRequestAmplification: 4
+
       # RailsSessionSecretToken is a string of alphanumeric characters
       # used by Rails to sign session tokens. IMPORTANT: This is a
       # site secret. It should be at least 50 characters.
       RailsSessionSecretToken: ""
 
+      # Maximum wall clock time to spend handling an incoming request.
+      RequestTimeout: 5m
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -158,8 +171,8 @@ Clusters:
       # Arvados object is created, modified, or deleted.)
       #
       # Currently, websocket event notifications rely on audit logs, so
-      # this should not be set lower than 600 (5 minutes).
-      MaxAge: 1209600
+      # this should not be set lower than 300 (5 minutes).
+      MaxAge: 336h
 
       # Maximum number of log rows to delete in a single SQL transaction.
       #
@@ -181,6 +194,14 @@ Clusters:
       UnloggedAttributes: []
 
     SystemLogs:
+
+      # Logging threshold: panic, fatal, error, warn, info, debug, or
+      # trace
+      LogLevel: info
+
+      # Logging format: json or text
+      Format: json
+
       # Maximum characters of (JSON-encoded) query parameters to include
       # in each request log entry. When params exceed this size, they will
       # be JSON-encoded, truncated to this size, and logged as
@@ -230,11 +251,11 @@ Clusters:
       # blob_signing_key note above.
       #
       # The default is 2 weeks.
-      BlobSigningTTL: 1209600
+      BlobSigningTTL: 336h
 
       # Default lifetime for ephemeral collections: 2 weeks. This must not
       # be less than blob_signature_ttl.
-      DefaultTrashLifetime: 1209600
+      DefaultTrashLifetime: 336h
 
       # Interval (seconds) between trash sweeps. During a trash sweep,
       # collections are marked as trash if their trash_at time has
@@ -267,6 +288,8 @@ Clusters:
       Repositories: /var/lib/arvados/git/repositories
 
     TLS:
+      Certificate: ""
+      Key: ""
       Insecure: false
 
     Containers:
@@ -319,6 +342,16 @@ Clusters:
       # troubleshooting purposes.
       LogReuseDecisions: false
 
+      # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
+      # (experimental) cloud dispatcher for executing containers on
+      # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+      # and ends with "\n-----END RSA PRIVATE KEY-----\n".
+      DispatchPrivateKey: none
+
+      # Maximum time to wait for workers to come up before abandoning
+      # stale locks from a previous dispatch process.
+      StaleLockTimeout: 1m
+
       Logging:
         # When you run the db:delete_old_container_logs task, it will find
         # containers that have been finished for at least this many seconds,
@@ -441,6 +474,129 @@ Clusters:
         # original job reuse behavior, and is still the default).
         ReuseJobIfOutputsDiffer: false
 
+      CloudVMs:
+        # Enable the cloud scheduler (experimental).
+        Enable: false
+
+        # Name/number of port where workers' SSH services listen.
+        SSHPort: "22"
+
+        # Interval between queue polls.
+        PollInterval: 10s
+
+        # Shell command to execute on each worker to determine whether
+        # the worker is booted and ready to run containers. It should
+        # exit zero if the worker is ready.
+        BootProbeCommand: "docker ps"
+
+        # Minimum interval between consecutive probes to a single
+        # worker.
+        ProbeInterval: 10s
+
+        # Maximum probes per second, across all workers in a pool.
+        MaxProbesPerSecond: 10
+
+        # Time before repeating SIGTERM when killing a container.
+        TimeoutSignal: 5s
+
+        # Time to give up on SIGTERM and write off the worker.
+        TimeoutTERM: 2m
+
+        # Maximum create/destroy-instance operations per second (0 =
+        # unlimited).
+        MaxCloudOpsPerSecond: 0
+
+        # Interval between cloud provider syncs/updates ("list all
+        # instances").
+        SyncInterval: 1m
+
+        # Time to leave an idle worker running (in case new containers
+        # appear in the queue that it can run) before shutting it
+        # down.
+        TimeoutIdle: 1m
+
+        # Time to wait for a new worker to boot (i.e., pass
+        # BootProbeCommand) before giving up and shutting it down.
+        TimeoutBooting: 10m
+
+        # Maximum time a worker can stay alive with no successful
+        # probes before being automatically shut down.
+        TimeoutProbe: 10m
+
+        # Time after shutting down a worker to retry the
+        # shutdown/destroy operation.
+        TimeoutShutdown: 10s
+
+        # Worker VM image ID.
+        ImageID: ami-01234567890abcdef
+
+        # Tags to add on all resources (VMs, NICs, disks) created by
+        # the container dispatcher. (Arvados's own tags --
+        # InstanceType, IdleBehavior, and InstanceSecret -- will also
+        # be added.)
+        ResourceTags:
+          SAMPLE: "tag value"
+
+        # Prefix for predefined tags used by Arvados (InstanceSetID,
+        # InstanceType, InstanceSecret, IdleBehavior). With the
+        # default value "Arvados", tags are "ArvadosInstanceSetID",
+        # "ArvadosInstanceSecret", etc.
+        #
+        # This should only be changed while no cloud resources are in
+        # use and the cloud dispatcher is not running. Otherwise,
+        # VMs/resources that were added using the old tag prefix will
+        # need to be detected and cleaned up manually.
+        TagKeyPrefix: Arvados
+
+        # Cloud driver: "azure" (Microsoft Azure) or "ec2" (Amazon AWS).
+        Driver: ec2
+
+        # Cloud-specific driver parameters.
+        DriverParameters:
+
+          # (ec2) Credentials.
+          AccessKeyID: ""
+          SecretAccessKey: ""
+
+          # (ec2) Instance configuration.
+          SecurityGroupIDs:
+            - ""
+          SubnetID: ""
+          Region: ""
+          EBSVolumeType: gp2
+          AdminUsername: debian
+
+          # (azure) Credentials.
+          SubscriptionID: ""
+          ClientID: ""
+          ClientSecret: ""
+          TenantID: ""
+
+          # (azure) Instance configuration.
+          CloudEnvironment: AzurePublicCloud
+          ResourceGroup: ""
+          Location: centralus
+          Network: ""
+          Subnet: ""
+          StorageAccount: ""
+          BlobContainer: ""
+          DeleteDanglingResourcesAfter: 20s
+          AdminUsername: arvados
+
+    InstanceTypes:
+
+      # Use the instance type name as the key (in place of "SAMPLE" in
+      # this sample entry).
+      SAMPLE:
+        # Cloud provider's instance type. Defaults to the configured type name.
+        ProviderType: ""
+        VCPUs: 1
+        RAM: 128MiB
+        IncludedScratch: 16GB
+        AddedScratch: 0
+        Price: 0.1
+        Preemptible: false
+
     Mail:
       MailchimpAPIKey: ""
       MailchimpListID: ""
@@ -451,5 +607,14 @@ Clusters:
       EmailFrom: ""
     RemoteClusters:
       "*":
+        Host: ""
+        Proxy: false
+        Scheme: https
+        Insecure: false
+        ActivateUsers: false
+      SAMPLE:
+        Host: sample.arvadosapi.com
         Proxy: false
+        Scheme: https
+        Insecure: false
         ActivateUsers: false
diff --git a/lib/config/deprecated.go b/lib/config/deprecated.go
new file mode 100644 (file)
index 0000000..8ffa2a5
--- /dev/null
@@ -0,0 +1,102 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "fmt"
+       "os"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/ghodss/yaml"
+)
+
+type deprRequestLimits struct {
+       MaxItemsPerResponse            *int
+       MultiClusterRequestConcurrency *int
+}
+
+type deprCluster struct {
+       RequestLimits deprRequestLimits
+       NodeProfiles  map[string]nodeProfile
+}
+
+type deprecatedConfig struct {
+       Clusters map[string]deprCluster
+}
+
+type nodeProfile struct {
+       Controller    systemServiceInstance `json:"arvados-controller"`
+       Health        systemServiceInstance `json:"arvados-health"`
+       Keepbalance   systemServiceInstance `json:"keep-balance"`
+       Keepproxy     systemServiceInstance `json:"keepproxy"`
+       Keepstore     systemServiceInstance `json:"keepstore"`
+       Keepweb       systemServiceInstance `json:"keep-web"`
+       Nodemanager   systemServiceInstance `json:"arvados-node-manager"`
+       DispatchCloud systemServiceInstance `json:"arvados-dispatch-cloud"`
+       RailsAPI      systemServiceInstance `json:"arvados-api-server"`
+       Websocket     systemServiceInstance `json:"arvados-ws"`
+       Workbench1    systemServiceInstance `json:"arvados-workbench"`
+}
+
+type systemServiceInstance struct {
+       Listen   string
+       TLS      bool
+       Insecure bool
+}
+
+func applyDeprecatedConfig(cfg *arvados.Config, configdata []byte, log logger) error {
+       var dc deprecatedConfig
+       err := yaml.Unmarshal(configdata, &dc)
+       if err != nil {
+               return err
+       }
+       hostname, err := os.Hostname()
+       if err != nil {
+               return err
+       }
+       for id, dcluster := range dc.Clusters {
+               cluster, ok := cfg.Clusters[id]
+               if !ok {
+                       return fmt.Errorf("can't load legacy config %q that is not present in current config", id)
+               }
+               for name, np := range dcluster.NodeProfiles {
+                       if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
+                               name = "localhost"
+                       } else if log != nil {
+                               log.Warnf("overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)", id, id, name, name)
+                       }
+                       applyDeprecatedNodeProfile(name, np.RailsAPI, &cluster.Services.RailsAPI)
+                       applyDeprecatedNodeProfile(name, np.Controller, &cluster.Services.Controller)
+                       applyDeprecatedNodeProfile(name, np.DispatchCloud, &cluster.Services.DispatchCloud)
+               }
+               if dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {
+                       *dst = *n
+               }
+               if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
+                       *dst = *n
+               }
+               cfg.Clusters[id] = cluster
+       }
+       return nil
+}
+
+func applyDeprecatedNodeProfile(hostname string, ssi systemServiceInstance, svc *arvados.Service) {
+       scheme := "https"
+       if !ssi.TLS {
+               scheme = "http"
+       }
+       if svc.InternalURLs == nil {
+               svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+       }
+       host := ssi.Listen
+       if host == "" {
+               return
+       }
+       if strings.HasPrefix(host, ":") {
+               host = hostname + host
+       }
+       svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
+}
diff --git a/lib/config/deprecated_test.go b/lib/config/deprecated_test.go
new file mode 100644 (file)
index 0000000..308b0cc
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "os"
+
+       check "gopkg.in/check.v1"
+)
+
+func (s *LoadSuite) TestDeprecatedNodeProfilesToServices(c *check.C) {
+       hostname, err := os.Hostname()
+       c.Assert(err, check.IsNil)
+       s.checkEquivalent(c, `
+Clusters:
+ z1111:
+  NodeProfiles:
+   "*":
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+   dispatch-host:
+    arvados-dispatch-cloud:
+     listen: ":9006"
+`, `
+Clusters:
+ z1111:
+  Services:
+   RailsAPI:
+    InternalURLs:
+     "http://localhost:8000": {}
+   Controller:
+    InternalURLs:
+     "http://localhost:9004": {}
+   DispatchCloud:
+    InternalURLs:
+     "http://dispatch-host:9006": {}
+  NodeProfiles:
+   "*":
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+   dispatch-host:
+    arvados-dispatch-cloud:
+     listen: ":9006"
+`)
+}
diff --git a/lib/config/generate.go b/lib/config/generate.go
new file mode 100644 (file)
index 0000000..c192d7b
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// +build ignore
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+)
+
+func main() {
+       err := generate()
+       if err != nil {
+               log.Fatal(err)
+       }
+}
+
+func generate() error {
+       outfn := "generated_config.go"
+       tmpfile, err := ioutil.TempFile(".", "."+outfn+".")
+       if err != nil {
+               return err
+       }
+       defer os.Remove(tmpfile.Name())
+
+       gofmt := exec.Command("gofmt", "-s")
+       gofmt.Stdout = tmpfile
+       gofmt.Stderr = os.Stderr
+       w, err := gofmt.StdinPipe()
+       if err != nil {
+               return err
+       }
+       gofmt.Start()
+
+       // copyright header: same as this file
+       cmd := exec.Command("head", "-n", "4", "generate.go")
+       cmd.Stdout = w
+       cmd.Stderr = os.Stderr
+       err = cmd.Run()
+       if err != nil {
+               return err
+       }
+
+       data, err := ioutil.ReadFile("config.default.yml")
+       if err != nil {
+               return err
+       }
+       _, err = fmt.Fprintf(w, "package config\nvar DefaultYAML = []byte(`%s`)", bytes.Replace(data, []byte{'`'}, []byte("`+\"`\"+`"), -1))
+       if err != nil {
+               return err
+       }
+       err = w.Close()
+       if err != nil {
+               return err
+       }
+       err = gofmt.Wait()
+       if err != nil {
+               return err
+       }
+       err = tmpfile.Close()
+       if err != nil {
+               return err
+       }
+       return os.Rename(tmpfile.Name(), outfn)
+}
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
new file mode 100644 (file)
index 0000000..3492615
--- /dev/null
@@ -0,0 +1,627 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+var DefaultYAML = []byte(`# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
+#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.default.yml
+
+Clusters:
+  xxxxx:
+    SystemRootToken: ""
+
+    # Token to be included in all healthcheck requests. Disabled by default.
+    # Server expects request header of the format "Authorization: Bearer xxx"
+    ManagementToken: ""
+
+    Services:
+      RailsAPI:
+        InternalURLs: {}
+        ExternalURL: "-"
+      Controller:
+        InternalURLs: {}
+        ExternalURL: ""
+      Websocket:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepbalance:
+        InternalURLs: {}
+        ExternalURL: "-"
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      GitSSH:
+        ExternalURL: ""
+      DispatchCloud:
+        InternalURLs: {}
+        ExternalURL: "-"
+      SSO:
+        ExternalURL: ""
+      Keepproxy:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAV:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAVDownload:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+        ExternalURL: "-"
+      Composer:
+        ExternalURL: ""
+      WebShell:
+        ExternalURL: ""
+      Workbench1:
+        InternalURLs: {}
+        ExternalURL: ""
+      Workbench2:
+        ExternalURL: ""
+      Nodemanager:
+        InternalURLs: {}
+        ExternalURL: "-"
+      Health:
+        InternalURLs: {}
+        ExternalURL: "-"
+
+    PostgreSQL:
+      # max concurrent connections per arvados server daemon
+      ConnectionPool: 32
+      Connection:
+        # All parameters here are passed to the PG client library in a connection string;
+        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+        host: ""
+        port: ""
+        user: ""
+        password: ""
+        dbname: ""
+    API:
+      # Maximum size (in bytes) allowed for a single API request.  This
+      # limit is published in the discovery document for use by clients.
+      # Note: You must separately configure the upstream web server or
+      # proxy to actually enforce the desired maximum request size on the
+      # server side.
+      MaxRequestSize: 134217728
+
+      # Limit the number of bytes read from the database during an index
+      # request (by retrieving and returning fewer rows than would
+      # normally be returned in a single response).
+      # Note 1: This setting never reduces the number of returned rows to
+      # zero, no matter how big the first data row is.
+      # Note 2: Currently, this is only checked against a specific set of
+      # columns that tend to get large (collections.manifest_text,
+      # containers.mounts, workflows.definition). Other fields (e.g.,
+      # "properties" hashes) are not counted against this limit.
+      MaxIndexDatabaseRead: 134217728
+
+      # Maximum number of items to return when responding to a APIs that
+      # can return partial result sets using limit and offset parameters
+      # (e.g., *.index, groups.contents). If a request specifies a "limit"
+      # parameter higher than this value, this value is used instead.
+      MaxItemsPerResponse: 1000
+
+      # API methods to disable. Disabled methods are not listed in the
+      # discovery document, and respond 404 to all requests.
+      # Example: ["jobs.create", "pipeline_instances.create"]
+      DisabledAPIs: []
+
+      # Interval (seconds) between asynchronous permission view updates. Any
+      # permission-updating API called with the 'async' parameter schedules a an
+      # update on the permission view in the future, if not already scheduled.
+      AsyncPermissionsUpdateInterval: 20
+
+      # Maximum number of concurrent outgoing requests to make while
+      # serving a single incoming multi-cluster (federated) request.
+      MaxRequestAmplification: 4
+
+      # RailsSessionSecretToken is a string of alphanumeric characters
+      # used by Rails to sign session tokens. IMPORTANT: This is a
+      # site secret. It should be at least 50 characters.
+      RailsSessionSecretToken: ""
+
+      # Maximum wall clock time to spend handling an incoming request.
+      RequestTimeout: 5m
+
+    Users:
+      # Config parameters to automatically setup new users.  If enabled,
+      # this users will be able to self-activate.  Enable this if you want
+      # to run an open instance where anyone can create an account and use
+      # the system without requiring manual approval.
+      #
+      # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+      # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+      AutoSetupNewUsers: false
+      AutoSetupNewUsersWithVmUUID: ""
+      AutoSetupNewUsersWithRepository: false
+      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+      # When new_users_are_active is set to true, new users will be active
+      # immediately.  This skips the "self-activate" step which enforces
+      # user agreements.  Should only be enabled for development.
+      NewUsersAreActive: false
+
+      # The e-mail address of the user you would like to become marked as an admin
+      # user on their first login.
+      # In the default configuration, authentication happens through the Arvados SSO
+      # server, which uses OAuth2 against Google's servers, so in that case this
+      # should be an address associated with a Google account.
+      AutoAdminUserWithEmail: ""
+
+      # If auto_admin_first_user is set to true, the first user to log in when no
+      # other admin users exist will automatically become an admin user.
+      AutoAdminFirstUser: false
+
+      # Email address to notify whenever a user creates a profile for the
+      # first time
+      UserProfileNotificationAddress: ""
+      AdminNotifierEmailFrom: arvados@example.com
+      EmailSubjectPrefix: "[ARVADOS] "
+      UserNotifierEmailFrom: arvados@example.com
+      NewUserNotificationRecipients: []
+      NewInactiveUserNotificationRecipients: []
+
+    AuditLogs:
+      # Time to keep audit logs, in seconds. (An audit log is a row added
+      # to the "logs" table in the PostgreSQL database each time an
+      # Arvados object is created, modified, or deleted.)
+      #
+      # Currently, websocket event notifications rely on audit logs, so
+      # this should not be set lower than 300 (5 minutes).
+      MaxAge: 336h
+
+      # Maximum number of log rows to delete in a single SQL transaction.
+      #
+      # If max_audit_log_delete_batch is 0, log entries will never be
+      # deleted by Arvados. Cleanup can be done by an external process
+      # without affecting any Arvados system processes, as long as very
+      # recent (<5 minutes old) logs are not deleted.
+      #
+      # 100000 is a reasonable batch size for most sites.
+      MaxDeleteBatch: 0
+
+      # Attributes to suppress in events and audit logs.  Notably,
+      # specifying ["manifest_text"] here typically makes the database
+      # smaller and faster.
+      #
+      # Warning: Using any non-empty value here can have undesirable side
+      # effects for any client or component that relies on event logs.
+      # Use at your own risk.
+      UnloggedAttributes: []
+
+    SystemLogs:
+
+      # Logging threshold: panic, fatal, error, warn, info, debug, or
+      # trace
+      LogLevel: info
+
+      # Logging format: json or text
+      Format: json
+
+      # Maximum characters of (JSON-encoded) query parameters to include
+      # in each request log entry. When params exceed this size, they will
+      # be JSON-encoded, truncated to this size, and logged as
+      # params_truncated.
+      MaxRequestLogParamsSize: 2000
+
+    Collections:
+      # Allow clients to create collections by providing a manifest with
+      # unsigned data blob locators. IMPORTANT: This effectively disables
+      # access controls for data stored in Keep: a client who knows a hash
+      # can write a manifest that references the hash, pass it to
+      # collections.create (which will create a permission link), use
+      # collections.get to obtain a signature for that data locator, and
+      # use that signed locator to retrieve the data from Keep. Therefore,
+      # do not turn this on if your users expect to keep data private from
+      # one another!
+      BlobSigning: true
+
+      # blob_signing_key is a string of alphanumeric characters used to
+      # generate permission signatures for Keep locators. It must be
+      # identical to the permission key given to Keep. IMPORTANT: This is
+      # a site secret. It should be at least 50 characters.
+      #
+      # Modifying blob_signing_key will invalidate all existing
+      # signatures, which can cause programs to fail (e.g., arv-put,
+      # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
+      # no such processes are running.
+      BlobSigningKey: ""
+
+      # Default replication level for collections. This is used when a
+      # collection's replication_desired attribute is nil.
+      DefaultReplication: 2
+
+      # Lifetime (in seconds) of blob permission signatures generated by
+      # the API server. This determines how long a client can take (after
+      # retrieving a collection record) to retrieve the collection data
+      # from Keep. If the client needs more time than that (assuming the
+      # collection still has the same content and the relevant user/token
+      # still has permission) the client can retrieve the collection again
+      # to get fresh signatures.
+      #
+      # This must be exactly equal to the -blob-signature-ttl flag used by
+      # keepstore servers.  Otherwise, reading data blocks and saving
+      # collections will fail with HTTP 403 permission errors.
+      #
+      # Modifying blob_signature_ttl invalidates existing signatures; see
+      # blob_signing_key note above.
+      #
+      # The default is 2 weeks.
+      BlobSigningTTL: 336h
+
+      # Default lifetime for ephemeral collections: 2 weeks. This must not
+      # be less than blob_signature_ttl.
+      DefaultTrashLifetime: 336h
+
+      # Interval (seconds) between trash sweeps. During a trash sweep,
+      # collections are marked as trash if their trash_at time has
+      # arrived, and deleted if their delete_at time has arrived.
+      TrashSweepInterval: 60
+
+      # If true, enable collection versioning.
+      # When a collection's preserve_version field is true or the current version
+      # is older than the amount of seconds defined on preserve_version_if_idle,
+      # a snapshot of the collection's previous state is created and linked to
+      # the current collection.
+      CollectionVersioning: false
+
+      #   0 = auto-create a new version on every update.
+      #  -1 = never auto-create new versions.
+      # > 0 = auto-create a new version when older than the specified number of seconds.
+      PreserveVersionIfIdle: -1
+
+    Login:
+      # These settings are provided by your OAuth2 provider (e.g.,
+      # sso-provider).
+      ProviderAppSecret: ""
+      ProviderAppID: ""
+
+    Git:
+      # Git repositories must be readable by api server, or you won't be
+      # able to submit crunch jobs. To pass the test suites, put a clone
+      # of the arvados tree in {git_repositories_dir}/arvados.git or
+      # {git_repositories_dir}/arvados/.git
+      Repositories: /var/lib/arvados/git/repositories
+
+    TLS:
+      Certificate: ""
+      Key: ""
+      Insecure: false
+
+    Containers:
+      # List of supported Docker Registry image formats that compute nodes
+      # are able to use. ` + "`" + `arv keep docker` + "`" + ` will error out if a user tries
+      # to store an image with an unsupported format. Use an empty array
+      # to skip the compatibility check (and display a warning message to
+      # that effect).
+      #
+      # Example for sites running docker < 1.10: ["v1"]
+      # Example for sites running docker >= 1.10: ["v2"]
+      # Example for disabling check: []
+      SupportedDockerImageFormats: ["v2"]
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      # Default value for keep_cache_ram of a container's runtime_constraints.
+      DefaultKeepCacheRAM: 268435456
+
+      # Number of times a container can be unlocked before being
+      # automatically cancelled.
+      MaxDispatchAttempts: 5
+
+      # Default value for container_count_max for container requests.  This is the
+      # number of times Arvados will create a new container to satisfy a container
+      # request.  If a container is cancelled it will retry a new container if
+      # container_count < container_count_max on any container requests associated
+      # with the cancelled container.
+      MaxRetryAttempts: 3
+
+      # The maximum number of compute nodes that can be in use simultaneously
+      # If this limit is reduced, any existing nodes with slot number >= new limit
+      # will not be counted against the new limit. In other words, the new limit
+      # won't be strictly enforced until those nodes with higher slot numbers
+      # go down.
+      MaxComputeVMs: 64
+
+      # Preemptible instance support (e.g. AWS Spot Instances)
+      # When true, child containers will get created with the preemptible
+      # scheduling parameter parameter set.
+      UsePreemptibleInstances: false
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
+      # (experimental) cloud dispatcher for executing containers on
+      # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+      # and ends with "\n-----END RSA PRIVATE KEY-----\n".
+      DispatchPrivateKey: none
+
+      # Maximum time to wait for workers to come up before abandoning
+      # stale locks from a previous dispatch process.
+      StaleLockTimeout: 1m
+
+      Logging:
+        # When you run the db:delete_old_container_logs task, it will find
+        # containers that have been finished for at least this many seconds,
+        # and delete their stdout, stderr, arv-mount, crunch-run, and
+        # crunchstat logs from the logs table.
+        MaxAge: 720h
+
+        # These two settings control how frequently log events are flushed to the
+        # database.  Log lines are buffered until either crunch_log_bytes_per_event
+        # has been reached or crunch_log_seconds_between_events has elapsed since
+        # the last flush.
+        LogBytesPerEvent: 4096
+        LogSecondsBetweenEvents: 1
+
+        # The sample period for throttling logs, in seconds.
+        LogThrottlePeriod: 60
+
+        # Maximum number of bytes that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleBytes: 65536
+
+        # Maximum number of lines that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleLines: 1024
+
+        # Maximum bytes that may be logged by a single job.  Log bytes that are
+        # silenced by throttling are not counted against this total.
+        LimitLogBytesPerJob: 67108864
+
+        LogPartialLineThrottlePeriod: 5
+
+        # Container logs are written to Keep and saved in a collection,
+        # which is updated periodically while the container runs.  This
+        # value sets the interval (given in seconds) between collection
+        # updates.
+        LogUpdatePeriod: 1800
+
+        # The log collection is also updated when the specified amount of
+        # log data (given in bytes) is produced in less than one update
+        # period.
+        LogUpdateSize: 33554432
+
+      SLURM:
+        Managed:
+          # Path to dns server configuration directory
+          # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+          # files or touch restart.txt (see below).
+          DNSServerConfDir: ""
+
+          # Template file for the dns server host snippets. See
+          # unbound.template in this directory for an example. If false, do
+          # not write any config files.
+          DNSServerConfTemplate: ""
+
+          # String to write to {dns_server_conf_dir}/restart.txt (with a
+          # trailing newline) after updating local data. If false, do not
+          # open or write the restart.txt file.
+          DNSServerReloadCommand: ""
+
+          # Command to run after each DNS update. Template variables will be
+          # substituted; see the "unbound" example below. If false, do not run
+          # a command.
+          DNSServerUpdateCommand: ""
+
+          ComputeNodeDomain: ""
+          ComputeNodeNameservers:
+            - 192.168.1.1
+
+          # Hostname to assign to a compute node when it sends a "ping" and the
+          # hostname in its Node record is nil.
+          # During bootstrapping, the "ping" script is expected to notice the
+          # hostname given in the ping response, and update its unix hostname
+          # accordingly.
+          # If false, leave the hostname alone (this is appropriate if your compute
+          # nodes' hostnames are already assigned by some other mechanism).
+          #
+          # One way or another, the hostnames of your node records should agree
+          # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+          #
+          # Example for compute0000, compute0001, ....:
+          # assign_node_hostname: compute%<slot_number>04d
+          # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+          AssignNodeHostname: "compute%<slot_number>d"
+
+      JobsAPI:
+        # Enable the legacy Jobs API.  This value must be a string.
+        # 'auto' -- (default) enable the Jobs API only if it has been used before
+        #         (i.e., there are job records in the database)
+        # 'true' -- enable the Jobs API despite lack of existing records.
+        # 'false' -- disable the Jobs API despite presence of existing records.
+        Enable: 'auto'
+
+        # Git repositories must be readable by api server, or you won't be
+        # able to submit crunch jobs. To pass the test suites, put a clone
+        # of the arvados tree in {git_repositories_dir}/arvados.git or
+        # {git_repositories_dir}/arvados/.git
+        GitInternalDir: /var/lib/arvados/internal.git
+
+        # Docker image to be used when none found in runtime_constraints of a job
+        DefaultDockerImage: ""
+
+        # none or slurm_immediate
+        CrunchJobWrapper: none
+
+        # username, or false = do not set uid when running jobs.
+        CrunchJobUser: crunch
+
+        # The web service must be able to create/write this file, and
+        # crunch-job must be able to stat() it.
+        CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+        # Control job reuse behavior when two completed jobs match the
+        # search criteria and have different outputs.
+        #
+        # If true, in case of a conflict, reuse the earliest job (this is
+        # similar to container reuse behavior).
+        #
+        # If false, in case of a conflict, do not reuse any completed job,
+        # but do reuse an already-running job if available (this is the
+        # original job reuse behavior, and is still the default).
+        ReuseJobIfOutputsDiffer: false
+
+      CloudVMs:
+        # Enable the cloud scheduler (experimental).
+        Enable: false
+
+        # Name/number of port where workers' SSH services listen.
+        SSHPort: "22"
+
+        # Interval between queue polls.
+        PollInterval: 10s
+
+        # Shell command to execute on each worker to determine whether
+        # the worker is booted and ready to run containers. It should
+        # exit zero if the worker is ready.
+        BootProbeCommand: "docker ps"
+
+        # Minimum interval between consecutive probes to a single
+        # worker.
+        ProbeInterval: 10s
+
+        # Maximum probes per second, across all workers in a pool.
+        MaxProbesPerSecond: 10
+
+        # Time before repeating SIGTERM when killing a container.
+        TimeoutSignal: 5s
+
+        # Time to give up on SIGTERM and write off the worker.
+        TimeoutTERM: 2m
+
+        # Maximum create/destroy-instance operations per second (0 =
+        # unlimited).
+        MaxCloudOpsPerSecond: 0
+
+        # Interval between cloud provider syncs/updates ("list all
+        # instances").
+        SyncInterval: 1m
+
+        # Time to leave an idle worker running (in case new containers
+        # appear in the queue that it can run) before shutting it
+        # down.
+        TimeoutIdle: 1m
+
+        # Time to wait for a new worker to boot (i.e., pass
+        # BootProbeCommand) before giving up and shutting it down.
+        TimeoutBooting: 10m
+
+        # Maximum time a worker can stay alive with no successful
+        # probes before being automatically shut down.
+        TimeoutProbe: 10m
+
+        # Time after shutting down a worker to retry the
+        # shutdown/destroy operation.
+        TimeoutShutdown: 10s
+
+        # Worker VM image ID.
+        ImageID: ami-01234567890abcdef
+
+        # Tags to add on all resources (VMs, NICs, disks) created by
+        # the container dispatcher. (Arvados's own tags --
+        # InstanceType, IdleBehavior, and InstanceSecret -- will also
+        # be added.)
+        ResourceTags:
+          SAMPLE: "tag value"
+
+        # Prefix for predefined tags used by Arvados (InstanceSetID,
+        # InstanceType, InstanceSecret, IdleBehavior). With the
+        # default value "Arvados", tags are "ArvadosInstanceSetID",
+        # "ArvadosInstanceSecret", etc.
+        #
+        # This should only be changed while no cloud resources are in
+        # use and the cloud dispatcher is not running. Otherwise,
+        # VMs/resources that were added using the old tag prefix will
+        # need to be detected and cleaned up manually.
+        TagKeyPrefix: Arvados
+
+        # Cloud driver: "azure" (Microsoft Azure) or "ec2" (Amazon AWS).
+        Driver: ec2
+
+        # Cloud-specific driver parameters.
+        DriverParameters:
+
+          # (ec2) Credentials.
+          AccessKeyID: ""
+          SecretAccessKey: ""
+
+          # (ec2) Instance configuration.
+          SecurityGroupIDs:
+            - ""
+          SubnetID: ""
+          Region: ""
+          EBSVolumeType: gp2
+          AdminUsername: debian
+
+          # (azure) Credentials.
+          SubscriptionID: ""
+          ClientID: ""
+          ClientSecret: ""
+          TenantID: ""
+
+          # (azure) Instance configuration.
+          CloudEnvironment: AzurePublicCloud
+          ResourceGroup: ""
+          Location: centralus
+          Network: ""
+          Subnet: ""
+          StorageAccount: ""
+          BlobContainer: ""
+          DeleteDanglingResourcesAfter: 20s
+          AdminUsername: arvados
+
+    InstanceTypes:
+
+      # Use the instance type name as the key (in place of "SAMPLE" in
+      # this sample entry).
+      SAMPLE:
+        # Cloud provider's instance type. Defaults to the configured type name.
+        ProviderType: ""
+        VCPUs: 1
+        RAM: 128MiB
+        IncludedScratch: 16GB
+        AddedScratch: 0
+        Price: 0.1
+        Preemptible: false
+
+    Mail:
+      MailchimpAPIKey: ""
+      MailchimpListID: ""
+      SendUserSetupNotificationEmail: ""
+      IssueReporterEmailFrom: ""
+      IssueReporterEmailTo: ""
+      SupportEmailAddress: ""
+      EmailFrom: ""
+    RemoteClusters:
+      "*":
+        Host: ""
+        Proxy: false
+        Scheme: https
+        Insecure: false
+        ActivateUsers: false
+      SAMPLE:
+        Host: sample.arvadosapi.com
+        Proxy: false
+        Scheme: https
+        Insecure: false
+        ActivateUsers: false
+`)
diff --git a/lib/config/load.go b/lib/config/load.go
new file mode 100644 (file)
index 0000000..3ed2b99
--- /dev/null
@@ -0,0 +1,168 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "bytes"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/ghodss/yaml"
+       "github.com/imdario/mergo"
+)
+
+type logger interface {
+       Warnf(string, ...interface{})
+}
+
+func LoadFile(path string, log logger) (*arvados.Config, error) {
+       f, err := os.Open(path)
+       if err != nil {
+               return nil, err
+       }
+       defer f.Close()
+       return Load(f, log)
+}
+
+func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
+       return load(rdr, log, true)
+}
+
+func load(rdr io.Reader, log logger, useDeprecated bool) (*arvados.Config, error) {
+       buf, err := ioutil.ReadAll(rdr)
+       if err != nil {
+               return nil, err
+       }
+
+       // Load the config into a dummy map to get the cluster ID
+       // keys, discarding the values; then set up defaults for each
+       // cluster ID; then load the real config on top of the
+       // defaults.
+       var dummy struct {
+               Clusters map[string]struct{}
+       }
+       err = yaml.Unmarshal(buf, &dummy)
+       if err != nil {
+               return nil, err
+       }
+       if len(dummy.Clusters) == 0 {
+               return nil, errors.New("config does not define any clusters")
+       }
+
+       // We can't merge deep structs here; instead, we unmarshal the
+       // default & loaded config files into generic maps, merge
+       // those, and then json-encode+decode the result into the
+       // config struct type.
+       var merged map[string]interface{}
+       for id := range dummy.Clusters {
+               var src map[string]interface{}
+               err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte(" xxxxx:"), []byte(" "+id+":"), -1), &src)
+               if err != nil {
+                       return nil, fmt.Errorf("loading defaults for %s: %s", id, err)
+               }
+               err = mergo.Merge(&merged, src, mergo.WithOverride)
+               if err != nil {
+                       return nil, fmt.Errorf("merging defaults for %s: %s", id, err)
+               }
+       }
+       var src map[string]interface{}
+       err = yaml.Unmarshal(buf, &src)
+       if err != nil {
+               return nil, fmt.Errorf("loading config data: %s", err)
+       }
+       logExtraKeys(log, merged, src, "")
+       removeSampleKeys(merged)
+       err = mergo.Merge(&merged, src, mergo.WithOverride)
+       if err != nil {
+               return nil, fmt.Errorf("merging config data: %s", err)
+       }
+
+       // map[string]interface{} => json => arvados.Config
+       var cfg arvados.Config
+       var errEnc error
+       pr, pw := io.Pipe()
+       go func() {
+               errEnc = json.NewEncoder(pw).Encode(merged)
+               pw.Close()
+       }()
+       err = json.NewDecoder(pr).Decode(&cfg)
+       if errEnc != nil {
+               err = errEnc
+       }
+       if err != nil {
+               return nil, fmt.Errorf("transcoding config data: %s", err)
+       }
+
+       if useDeprecated {
+               err = applyDeprecatedConfig(&cfg, buf, log)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Check for known mistakes
+       for id, cc := range cfg.Clusters {
+               err = checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection)
+               if err != nil {
+                       return nil, err
+               }
+       }
+       return &cfg, nil
+}
+
+func checkKeyConflict(label string, m map[string]string) error {
+       saw := map[string]bool{}
+       for k := range m {
+               k = strings.ToLower(k)
+               if saw[k] {
+                       return fmt.Errorf("%s: multiple entries for %q (fix by using same capitalization as default/example file)", label, k)
+               }
+               saw[k] = true
+       }
+       return nil
+}
+
+func removeSampleKeys(m map[string]interface{}) {
+       delete(m, "SAMPLE")
+       for _, v := range m {
+               if v, _ := v.(map[string]interface{}); v != nil {
+                       removeSampleKeys(v)
+               }
+       }
+}
+
+func logExtraKeys(log logger, expected, supplied map[string]interface{}, prefix string) {
+       if log == nil {
+               return
+       }
+       allowed := map[string]interface{}{}
+       for k, v := range expected {
+               allowed[strings.ToLower(k)] = v
+       }
+       for k, vsupp := range supplied {
+               vexp, ok := allowed[strings.ToLower(k)]
+               if !ok && expected["SAMPLE"] != nil {
+                       vexp = expected["SAMPLE"]
+               } else if !ok {
+                       log.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
+                       continue
+               }
+               if vsupp, ok := vsupp.(map[string]interface{}); !ok {
+                       // if vsupp is a map but vexp isn't map, this
+                       // will be caught elsewhere; see TestBadType.
+                       continue
+               } else if vexp, ok := vexp.(map[string]interface{}); !ok {
+                       log.Warnf("unexpected object in config entry: %s%s", prefix, k)
+               } else {
+                       logExtraKeys(log, vexp, vsupp, prefix+k+".")
+               }
+       }
+}
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
new file mode 100644 (file)
index 0000000..6ce81bb
--- /dev/null
@@ -0,0 +1,216 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "bytes"
+       "io"
+       "os"
+       "os/exec"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/ghodss/yaml"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&LoadSuite{})
+
+type LoadSuite struct{}
+
+func (s *LoadSuite) TestEmpty(c *check.C) {
+       cfg, err := Load(&bytes.Buffer{}, ctxlog.TestLogger(c))
+       c.Check(cfg, check.IsNil)
+       c.Assert(err, check.ErrorMatches, `config does not define any clusters`)
+}
+
+func (s *LoadSuite) TestNoConfigs(c *check.C) {
+       cfg, err := Load(bytes.NewBufferString(`Clusters: {"z1111": {}}`), ctxlog.TestLogger(c))
+       c.Assert(err, check.IsNil)
+       c.Assert(cfg.Clusters, check.HasLen, 1)
+       cc, err := cfg.GetCluster("z1111")
+       c.Assert(err, check.IsNil)
+       c.Check(cc.ClusterID, check.Equals, "z1111")
+       c.Check(cc.API.MaxRequestAmplification, check.Equals, 4)
+       c.Check(cc.API.MaxItemsPerResponse, check.Equals, 1000)
+}
+
+func (s *LoadSuite) TestSampleKeys(c *check.C) {
+       for _, yaml := range []string{
+               `{"Clusters":{"z1111":{}}}`,
+               `{"Clusters":{"z1111":{"InstanceTypes":{"Foo":{"RAM": "12345M"}}}}}`,
+       } {
+               cfg, err := Load(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
+               c.Assert(err, check.IsNil)
+               cc, err := cfg.GetCluster("z1111")
+               _, hasSample := cc.InstanceTypes["SAMPLE"]
+               c.Check(hasSample, check.Equals, false)
+               if strings.Contains(yaml, "Foo") {
+                       c.Check(cc.InstanceTypes["Foo"].RAM, check.Equals, arvados.ByteSize(12345000000))
+                       c.Check(cc.InstanceTypes["Foo"].Price, check.Equals, 0.0)
+               }
+       }
+}
+
+func (s *LoadSuite) TestMultipleClusters(c *check.C) {
+       cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"z1111":{},"z2222":{}}}`), ctxlog.TestLogger(c))
+       c.Assert(err, check.IsNil)
+       c1, err := cfg.GetCluster("z1111")
+       c.Assert(err, check.IsNil)
+       c.Check(c1.ClusterID, check.Equals, "z1111")
+       c2, err := cfg.GetCluster("z2222")
+       c.Assert(err, check.IsNil)
+       c.Check(c2.ClusterID, check.Equals, "z2222")
+}
+
+func (s *LoadSuite) TestDeprecatedOrUnknownWarning(c *check.C) {
+       var logbuf bytes.Buffer
+       logger := logrus.New()
+       logger.Out = &logbuf
+       _, err := Load(bytes.NewBufferString(`
+Clusters:
+  zzzzz:
+    postgresql: {}
+    BadKey: {}
+    Containers: {}
+    RemoteClusters:
+      z2222:
+        Host: z2222.arvadosapi.com
+        Proxy: true
+        BadKey: badValue
+`), logger)
+       c.Assert(err, check.IsNil)
+       logs := strings.Split(strings.TrimSuffix(logbuf.String(), "\n"), "\n")
+       for _, log := range logs {
+               c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*BadKey.*`)
+       }
+       c.Check(logs, check.HasLen, 2)
+}
+
+func (s *LoadSuite) TestNoWarningsForDumpedConfig(c *check.C) {
+       var logbuf bytes.Buffer
+       logger := logrus.New()
+       logger.Out = &logbuf
+       cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"zzzzz":{}}}`), logger)
+       c.Assert(err, check.IsNil)
+       yaml, err := yaml.Marshal(cfg)
+       c.Assert(err, check.IsNil)
+       cfgDumped, err := Load(bytes.NewBuffer(yaml), logger)
+       c.Assert(err, check.IsNil)
+       c.Check(cfg, check.DeepEquals, cfgDumped)
+       c.Check(logbuf.String(), check.Equals, "")
+}
+
+func (s *LoadSuite) TestPostgreSQLKeyConflict(c *check.C) {
+       _, err := Load(bytes.NewBufferString(`
+Clusters:
+ zzzzz:
+  postgresql:
+   connection:
+     DBName: dbname
+     Host: host
+`), ctxlog.TestLogger(c))
+       c.Check(err, check.ErrorMatches, `Clusters.zzzzz.PostgreSQL.Connection: multiple entries for "(dbname|host)".*`)
+}
+
+func (s *LoadSuite) TestBadType(c *check.C) {
+       for _, data := range []string{`
+Clusters:
+ zzzzz:
+  PostgreSQL: true
+`, `
+Clusters:
+ zzzzz:
+  PostgreSQL:
+   ConnectionPool: true
+`, `
+Clusters:
+ zzzzz:
+  PostgreSQL:
+   ConnectionPool: "foo"
+`, `
+Clusters:
+ zzzzz:
+  PostgreSQL:
+   ConnectionPool: []
+`, `
+Clusters:
+ zzzzz:
+  PostgreSQL:
+   ConnectionPool: [] # {foo: bar} isn't caught here; we rely on config-check
+`,
+       } {
+               c.Log(data)
+               v, err := Load(bytes.NewBufferString(data), ctxlog.TestLogger(c))
+               if v != nil {
+                       c.Logf("%#v", v.Clusters["zzzzz"].PostgreSQL.ConnectionPool)
+               }
+               c.Check(err, check.ErrorMatches, `.*cannot unmarshal .*PostgreSQL.*`)
+       }
+}
+
+func (s *LoadSuite) TestMovedKeys(c *check.C) {
+       s.checkEquivalent(c, `# config has old keys only
+Clusters:
+ zzzzz:
+  RequestLimits:
+   MultiClusterRequestConcurrency: 3
+   MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+  API:
+   MaxRequestAmplification: 3
+   MaxItemsPerResponse: 999
+`)
+       s.checkEquivalent(c, `# config has both old and new keys; old values win
+Clusters:
+ zzzzz:
+  RequestLimits:
+   MultiClusterRequestConcurrency: 0
+   MaxItemsPerResponse: 555
+  API:
+   MaxRequestAmplification: 3
+   MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+  API:
+   MaxRequestAmplification: 0
+   MaxItemsPerResponse: 555
+`)
+}
+
+func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
+       got, err := Load(bytes.NewBufferString(goty), ctxlog.TestLogger(c))
+       c.Assert(err, check.IsNil)
+       expected, err := Load(bytes.NewBufferString(expectedy), ctxlog.TestLogger(c))
+       c.Assert(err, check.IsNil)
+       if !c.Check(got, check.DeepEquals, expected) {
+               cmd := exec.Command("diff", "-u", "--label", "expected", "--label", "got", "/dev/fd/3", "/dev/fd/4")
+               for _, obj := range []interface{}{expected, got} {
+                       y, _ := yaml.Marshal(obj)
+                       pr, pw, err := os.Pipe()
+                       c.Assert(err, check.IsNil)
+                       defer pr.Close()
+                       go func() {
+                               io.Copy(pw, bytes.NewBuffer(y))
+                               pw.Close()
+                       }()
+                       cmd.ExtraFiles = append(cmd.ExtraFiles, pr)
+               }
+               diff, err := cmd.CombinedOutput()
+               c.Log(string(diff))
+               c.Check(err, check.IsNil)
+       }
+}
diff --git a/lib/config/uptodate.go b/lib/config/uptodate.go
new file mode 100644 (file)
index 0000000..71bdba7
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+//go:generate go run generate.go
diff --git a/lib/config/uptodate_test.go b/lib/config/uptodate_test.go
new file mode 100644 (file)
index 0000000..10551f8
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+       "bytes"
+       "io/ioutil"
+       "testing"
+)
+
+func TestUpToDate(t *testing.T) {
+       src := "config.default.yml"
+       srcdata, err := ioutil.ReadFile(src)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if !bytes.Equal(srcdata, DefaultYAML) {
+               t.Fatalf("content of %s differs from DefaultYAML -- you need to run 'go generate' and commit", src)
+       }
+}
index f0268091bedb58f412d4e93ba675481d99f5e3ef..4345370469d07f3d5be685b9dd2e4a0efbe1ab7b 100644 (file)
@@ -14,6 +14,6 @@ import (
 
 var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
 
-func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, _ string) service.Handler {
-       return &Handler{Cluster: cluster, NodeProfile: np}
+func newHandler(_ context.Context, cluster *arvados.Cluster, _ string) service.Handler {
+       return &Handler{Cluster: cluster}
 }
index ab49e39d12656c3f960e840f82c9f4974e59d32d..07daf2f90ef28b3199e856c93134aa5b6975fab3 100644 (file)
@@ -217,17 +217,15 @@ func fetchRemoteCollectionByPDH(
        // returned to the client.  When that happens, all
        // other outstanding requests are cancelled
        sharedContext, cancelFunc := context.WithCancel(req.Context())
+       defer cancelFunc()
+
        req = req.WithContext(sharedContext)
        wg := sync.WaitGroup{}
        pdh := m[1]
        success := make(chan *http.Response)
        errorChan := make(chan error, len(h.handler.Cluster.RemoteClusters))
 
-       // use channel as a semaphore to limit the number of concurrent
-       // requests at a time
-       sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
-
-       defer cancelFunc()
+       acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
 
        for remoteID := range h.handler.Cluster.RemoteClusters {
                if remoteID == h.handler.Cluster.ClusterID {
@@ -238,9 +236,8 @@ func fetchRemoteCollectionByPDH(
                wg.Add(1)
                go func(remote string) {
                        defer wg.Done()
-                       // blocks until it can put a value into the
-                       // channel (which has a max queue capacity)
-                       sem <- true
+                       acquire()
+                       defer release()
                        select {
                        case <-sharedContext.Done():
                                return
@@ -278,7 +275,6 @@ func fetchRemoteCollectionByPDH(
                        case success <- newResponse:
                                wasSuccess = true
                        }
-                       <-sem
                }(remoteID)
        }
        go func() {
index 9c8b1614bcdcceaa4be70bcba15fa694e26940dc..fd2fbc226e4860f7ddeb591c555f1759f3fcb7ef 100644 (file)
@@ -175,9 +175,9 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
                httpserver.Error(w, "Federated multi-object may not provide 'limit', 'offset' or 'order'.", http.StatusBadRequest)
                return true
        }
-       if expectCount > h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse() {
+       if max := h.handler.Cluster.API.MaxItemsPerResponse; expectCount > max {
                httpserver.Error(w, fmt.Sprintf("Federated multi-object request for %v objects which is more than max page size %v.",
-                       expectCount, h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse()), http.StatusBadRequest)
+                       expectCount, max), http.StatusBadRequest)
                return true
        }
        if req.Form.Get("select") != "" {
@@ -203,10 +203,7 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
 
        // Perform concurrent requests to each cluster
 
-       // use channel as a semaphore to limit the number of concurrent
-       // requests at a time
-       sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
-       defer close(sem)
+       acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
        wg := sync.WaitGroup{}
 
        req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
@@ -220,23 +217,20 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
                        // Nothing to query
                        continue
                }
-
-               // blocks until it can put a value into the
-               // channel (which has a max queue capacity)
-               sem <- true
+               acquire()
                wg.Add(1)
                go func(k string, v []string) {
+                       defer release()
+                       defer wg.Done()
                        rp, kn, err := h.remoteQueryUUIDs(w, req, k, v)
                        mtx.Lock()
+                       defer mtx.Unlock()
                        if err == nil {
                                completeResponses = append(completeResponses, rp...)
                                kind = kn
                        } else {
                                errors = append(errors, err)
                        }
-                       mtx.Unlock()
-                       wg.Done()
-                       <-sem
                }(k, v)
        }
        wg.Wait()
index 62916acd2ac10be14d90d4e02e2703e77949e32b..1c859cfc515d142a0289610e402e725e07bfebb1 100644 (file)
@@ -54,25 +54,22 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
        s.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)
        c.Assert(s.remoteMock.Start(), check.IsNil)
 
-       nodeProfile := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI:   arvados.SystemServiceInstance{Listen: ":1"}, // local reqs will error "connection refused"
-       }
-       s.testHandler = &Handler{Cluster: &arvados.Cluster{
+       cluster := &arvados.Cluster{
                ClusterID:  "zhome",
                PostgreSQL: integrationTestCluster().PostgreSQL,
-               NodeProfiles: map[string]arvados.NodeProfile{
-                       "*": nodeProfile,
-               },
-               RequestLimits: arvados.RequestLimits{
-                       MaxItemsPerResponse:            1000,
-                       MultiClusterRequestConcurrency: 4,
+               TLS:        arvados.TLS{Insecure: true},
+               API: arvados.API{
+                       MaxItemsPerResponse:     1000,
+                       MaxRequestAmplification: 4,
                },
-       }, NodeProfile: &nodeProfile}
+       }
+       arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
+       arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
+       s.testHandler = &Handler{Cluster: cluster}
        s.testServer = newServerFromIntegrationTestEnv(c)
        s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.log, s.testHandler))
 
-       s.testHandler.Cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+       cluster.RemoteClusters = map[string]arvados.RemoteCluster{
                "zzzzz": {
                        Host:   s.remoteServer.Addr,
                        Proxy:  true,
@@ -318,16 +315,8 @@ func (s *FederationSuite) localServiceHandler(c *check.C, h http.Handler) *https
                        Handler: h,
                },
        }
-
        c.Assert(srv.Start(), check.IsNil)
-
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: srv.Addr,
-                       TLS: false, Insecure: true}}
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
-
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "http://"+srv.Addr)
        return srv
 }
 
@@ -338,13 +327,8 @@ func (s *FederationSuite) localServiceReturns404(c *check.C) *httpserver.Server
 }
 
 func (s *FederationSuite) TestGetLocalCollection(c *check.C) {
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
-                       TLS: true, Insecure: true}}
        s.testHandler.Cluster.ClusterID = "zzzzz"
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
 
        // HTTP GET
 
@@ -416,12 +400,7 @@ func (s *FederationSuite) TestSignedLocatorPattern(c *check.C) {
 }
 
 func (s *FederationSuite) TestGetLocalCollectionByPDH(c *check.C) {
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
-                       TLS: true, Insecure: true}}
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
 
        req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
        req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
@@ -505,12 +484,7 @@ func (s *FederationSuite) TestGetCollectionByPDHErrorBadHash(c *check.C) {
 }
 
 func (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
-                       TLS: true, Insecure: true}}
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
 
        req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
        req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
@@ -526,12 +500,7 @@ func (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {
 }
 
 func (s *FederationSuite) TestSaltedTokenGetCollectionByPDHError(c *check.C) {
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
-                       TLS: true, Insecure: true}}
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
 
        req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
        req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
@@ -616,13 +585,8 @@ func (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *c
        req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
        req.Header.Set("Content-type", "application/json")
 
-       np := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
-                       TLS: true, Insecure: true}}
+       arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
        s.testHandler.Cluster.ClusterID = "zzzzz"
-       s.testHandler.Cluster.NodeProfiles["*"] = np
-       s.testHandler.NodeProfile = &np
 
        resp := s.testRequest(req)
        c.Check(resp.StatusCode, check.Equals, http.StatusOK)
@@ -850,7 +814,7 @@ func (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) {
 }
 
 func (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {
-       s.testHandler.Cluster.RequestLimits.MaxItemsPerResponse = 1
+       s.testHandler.Cluster.API.MaxItemsPerResponse = 1
        req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
                url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
                        arvadostest.QueuedContainerUUID))),
index 53125ae5543b51287e5de80a8b442f2002972a86..2c3ce1d4f28d189e956cd3e120b8433214861619 100644 (file)
@@ -8,7 +8,7 @@ import (
        "context"
        "database/sql"
        "errors"
-       "net"
+       "fmt"
        "net/http"
        "net/url"
        "strings"
@@ -22,8 +22,7 @@ import (
 )
 
 type Handler struct {
-       Cluster     *arvados.Cluster
-       NodeProfile *arvados.NodeProfile
+       Cluster *arvados.Cluster
 
        setupOnce      sync.Once
        handlerStack   http.Handler
@@ -50,8 +49,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
                        req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
                }
        }
-       if h.Cluster.HTTPRequestTimeout > 0 {
-               ctx, cancel := context.WithDeadline(req.Context(), time.Now().Add(time.Duration(h.Cluster.HTTPRequestTimeout)))
+       if h.Cluster.API.RequestTimeout > 0 {
+               ctx, cancel := context.WithDeadline(req.Context(), time.Now().Add(time.Duration(h.Cluster.API.RequestTimeout)))
                req = req.WithContext(ctx)
                defer cancel()
        }
@@ -61,7 +60,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 
 func (h *Handler) CheckHealth() error {
        h.setupOnce.Do(h.setup)
-       _, _, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       _, _, err := findRailsAPI(h.Cluster)
        return err
 }
 
@@ -72,6 +71,7 @@ func (h *Handler) setup() {
        mux.Handle("/_health/", &health.Handler{
                Token:  h.Cluster.ManagementToken,
                Prefix: "/_health/",
+               Routes: health.Routes{"ping": func() error { _, err := h.db(&http.Request{}); return err }},
        })
        hs := http.NotFoundHandler()
        hs = prepend(hs, h.proxyRailsAPI)
@@ -126,7 +126,7 @@ func prepend(next http.Handler, middleware middlewareFunc) http.Handler {
 }
 
 func (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error) {
-       urlOut, insecure, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       urlOut, insecure, err := findRailsAPI(h.Cluster)
        if err != nil {
                return nil, err
        }
@@ -152,22 +152,19 @@ func (h *Handler) proxyRailsAPI(w http.ResponseWriter, req *http.Request, next h
        }
 }
 
-// For now, findRailsAPI always uses the rails API running on this
-// node.
-func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, bool, error) {
-       hostport := np.RailsAPI.Listen
-       if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
-               // ":12345" => connect to indicated port on localhost
-               hostport = "localhost" + hostport
-       } else if _, _, err := net.SplitHostPort(hostport); err == nil {
-               // "[::1]:12345" => connect to indicated address & port
-       } else {
-               return nil, false, err
+// Use a localhost entry from Services.RailsAPI.InternalURLs if one is
+// present, otherwise choose an arbitrary entry.
+func findRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
+       var best *url.URL
+       for target := range cluster.Services.RailsAPI.InternalURLs {
+               target := url.URL(target)
+               best = &target
+               if strings.HasPrefix(target.Host, "localhost:") || strings.HasPrefix(target.Host, "127.0.0.1:") || strings.HasPrefix(target.Host, "[::1]:") {
+                       break
+               }
        }
-       proto := "http"
-       if np.RailsAPI.TLS {
-               proto = "https"
+       if best == nil {
+               return nil, false, fmt.Errorf("Services.RailsAPI.InternalURLs is empty")
        }
-       url, err := url.Parse(proto + "://" + hostport)
-       return url, np.RailsAPI.Insecure, err
+       return best, cluster.TLS.Insecure, nil
 }
index 96110ea85859b05b362f849475a9d77c91919752..a1efaacddff5b2b7c52ad8fd78eb79c0500b2be8 100644 (file)
@@ -42,15 +42,11 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
        s.cluster = &arvados.Cluster{
                ClusterID:  "zzzzz",
                PostgreSQL: integrationTestCluster().PostgreSQL,
-               NodeProfiles: map[string]arvados.NodeProfile{
-                       "*": {
-                               Controller: arvados.SystemServiceInstance{Listen: ":"},
-                               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
-                       },
-               },
+               TLS:        arvados.TLS{Insecure: true},
        }
-       node := s.cluster.NodeProfiles["*"]
-       s.handler = newHandler(s.ctx, s.cluster, &node, "")
+       arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+       arvadostest.SetServiceURL(&s.cluster.Services.Controller, "http://localhost:/")
+       s.handler = newHandler(s.ctx, s.cluster, "")
 }
 
 func (s *HandlerSuite) TearDownTest(c *check.C) {
@@ -72,7 +68,7 @@ func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
 }
 
 func (s *HandlerSuite) TestRequestTimeout(c *check.C) {
-       s.cluster.HTTPRequestTimeout = arvados.Duration(time.Nanosecond)
+       s.cluster.API.RequestTimeout = arvados.Duration(time.Nanosecond)
        req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
        resp := httptest.NewRecorder()
        s.handler.ServeHTTP(resp, req)
diff --git a/lib/controller/semaphore.go b/lib/controller/semaphore.go
new file mode 100644 (file)
index 0000000..ff607bb
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+func semaphore(max int) (acquire, release func()) {
+       if max > 0 {
+               ch := make(chan bool, max)
+               return func() { ch <- true }, func() { <-ch }
+       } else {
+               return func() {}, func() {}
+       }
+}
index ae89c3d7ea4d073fa44885f193af138f81b85508..a398af97b21884ae896f675b1c2ab00a59ae55d4 100644 (file)
@@ -10,6 +10,7 @@ import (
        "path/filepath"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/ctxlog"
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
        check "gopkg.in/check.v1"
@@ -32,23 +33,19 @@ func integrationTestCluster() *arvados.Cluster {
 func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
        log := ctxlog.TestLogger(c)
 
-       nodeProfile := arvados.NodeProfile{
-               Controller: arvados.SystemServiceInstance{Listen: ":"},
-               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
-       }
        handler := &Handler{Cluster: &arvados.Cluster{
                ClusterID:  "zzzzz",
                PostgreSQL: integrationTestCluster().PostgreSQL,
-               NodeProfiles: map[string]arvados.NodeProfile{
-                       "*": nodeProfile,
-               },
-       }, NodeProfile: &nodeProfile}
+               TLS:        arvados.TLS{Insecure: true},
+       }}
+       arvadostest.SetServiceURL(&handler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+       arvadostest.SetServiceURL(&handler.Cluster.Services.Controller, "http://localhost:/")
 
        srv := &httpserver.Server{
                Server: http.Server{
                        Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
                },
-               Addr: nodeProfile.Controller.Listen,
+               Addr: ":",
        }
        return srv
 }
index 22ceb8aebe787ae79c1274cc0c714bc39df04640..ae6ac70e9665f777069232d49eaf0dd76a66d1a2 100644 (file)
@@ -15,10 +15,10 @@ import (
 
 var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)
 
-func newHandler(ctx context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, token string) service.Handler {
+func newHandler(ctx context.Context, cluster *arvados.Cluster, token string) service.Handler {
        ac, err := arvados.NewClientFromConfig(cluster)
        if err != nil {
-               return service.ErrorHandler(ctx, cluster, np, fmt.Errorf("error initializing client from cluster config: %s", err))
+               return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
        }
        d := &dispatcher{
                Cluster:   cluster,
index 71ff9c784e958fa7927cb3ca57214593d74eecd7..bc699d92804092d8dbbc37bdcd3d8180b67e70c1 100644 (file)
@@ -95,7 +95,7 @@ func (disp *dispatcher) Close() {
 // Make a worker.Executor for the given instance.
 func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
        exr := ssh_executor.New(inst)
-       exr.SetTargetPort(disp.Cluster.CloudVMs.SSHPort)
+       exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)
        exr.SetSigners(disp.sshKey)
        return exr
 }
@@ -126,8 +126,8 @@ func (disp *dispatcher) initialize() {
        disp.stop = make(chan struct{}, 1)
        disp.stopped = make(chan struct{})
 
-       if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
-               disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
+       if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Containers.DispatchPrivateKey)); err != nil {
+               disp.logger.Fatalf("error parsing configured Containers.DispatchPrivateKey: %s", err)
        } else {
                disp.sshKey = key
        }
@@ -138,7 +138,7 @@ func (disp *dispatcher) initialize() {
        }
        disp.instanceSet = instanceSet
        disp.reg = prometheus.NewRegistry()
-       disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+       disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
        disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient)
 
        if disp.Cluster.ManagementToken == "" {
@@ -167,11 +167,11 @@ func (disp *dispatcher) run() {
        defer disp.instanceSet.Stop()
        defer disp.pool.Stop()
 
-       staleLockTimeout := time.Duration(disp.Cluster.Dispatch.StaleLockTimeout)
+       staleLockTimeout := time.Duration(disp.Cluster.Containers.StaleLockTimeout)
        if staleLockTimeout == 0 {
                staleLockTimeout = defaultStaleLockTimeout
        }
-       pollInterval := time.Duration(disp.Cluster.Dispatch.PollInterval)
+       pollInterval := time.Duration(disp.Cluster.Containers.CloudVMs.PollInterval)
        if pollInterval <= 0 {
                pollInterval = defaultPollInterval
        }
index 00157b75c649226880898c802973e9cd03a82173..012621f12f633fe9c352e2f6bb847dadb965a59d 100644 (file)
@@ -49,23 +49,25 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
        }
 
        s.cluster = &arvados.Cluster{
-               CloudVMs: arvados.CloudVMs{
-                       Driver:               "test",
-                       SyncInterval:         arvados.Duration(10 * time.Millisecond),
-                       TimeoutIdle:          arvados.Duration(150 * time.Millisecond),
-                       TimeoutBooting:       arvados.Duration(150 * time.Millisecond),
-                       TimeoutProbe:         arvados.Duration(15 * time.Millisecond),
-                       TimeoutShutdown:      arvados.Duration(5 * time.Millisecond),
-                       MaxCloudOpsPerSecond: 500,
-               },
-               Dispatch: arvados.Dispatch{
-                       PrivateKey:         string(dispatchprivraw),
-                       PollInterval:       arvados.Duration(5 * time.Millisecond),
-                       ProbeInterval:      arvados.Duration(5 * time.Millisecond),
+               Containers: arvados.ContainersConfig{
+                       DispatchPrivateKey: string(dispatchprivraw),
                        StaleLockTimeout:   arvados.Duration(5 * time.Millisecond),
-                       MaxProbesPerSecond: 1000,
-                       TimeoutSignal:      arvados.Duration(3 * time.Millisecond),
-                       TimeoutTERM:        arvados.Duration(20 * time.Millisecond),
+                       CloudVMs: arvados.CloudVMsConfig{
+                               Driver:               "test",
+                               SyncInterval:         arvados.Duration(10 * time.Millisecond),
+                               TimeoutIdle:          arvados.Duration(150 * time.Millisecond),
+                               TimeoutBooting:       arvados.Duration(150 * time.Millisecond),
+                               TimeoutProbe:         arvados.Duration(15 * time.Millisecond),
+                               TimeoutShutdown:      arvados.Duration(5 * time.Millisecond),
+                               MaxCloudOpsPerSecond: 500,
+                               PollInterval:         arvados.Duration(5 * time.Millisecond),
+                               ProbeInterval:        arvados.Duration(5 * time.Millisecond),
+                               MaxProbesPerSecond:   1000,
+                               TimeoutSignal:        arvados.Duration(3 * time.Millisecond),
+                               TimeoutTERM:          arvados.Duration(20 * time.Millisecond),
+                               ResourceTags:         map[string]string{"testtag": "test value"},
+                               TagKeyPrefix:         "test:",
+                       },
                },
                InstanceTypes: arvados.InstanceTypeMap{
                        test.InstanceType(1).Name:  test.InstanceType(1),
@@ -76,16 +78,9 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
                        test.InstanceType(8).Name:  test.InstanceType(8),
                        test.InstanceType(16).Name: test.InstanceType(16),
                },
-               NodeProfiles: map[string]arvados.NodeProfile{
-                       "*": {
-                               Controller:    arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_API_HOST")},
-                               DispatchCloud: arvados.SystemServiceInstance{Listen: ":"},
-                       },
-               },
-               Services: arvados.Services{
-                       Controller: arvados.Service{ExternalURL: arvados.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}},
-               },
        }
+       arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
+       arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
 
        arvClient, err := arvados.NewClientFromConfig(s.cluster)
        c.Check(err, check.IsNil)
@@ -242,7 +237,7 @@ func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
 
 func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
        s.cluster.ManagementToken = "abcdefgh"
-       s.cluster.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
+       s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
        drivers["test"] = s.stubDriver
        s.disp.setupOnce.Do(s.disp.initialize)
        s.disp.queue = &test.Queue{}
index eb1e48737c8b131cbb919ca71e8f6bbc377c553a..b67b5d054b57d172b940255a8318b76dd21af3b8 100644 (file)
@@ -22,17 +22,26 @@ var drivers = map[string]cloud.Driver{
 }
 
 func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
-       driver, ok := drivers[cluster.CloudVMs.Driver]
+       driver, ok := drivers[cluster.Containers.CloudVMs.Driver]
        if !ok {
-               return nil, fmt.Errorf("unsupported cloud driver %q", cluster.CloudVMs.Driver)
+               return nil, fmt.Errorf("unsupported cloud driver %q", cluster.Containers.CloudVMs.Driver)
        }
-       is, err := driver.InstanceSet(cluster.CloudVMs.DriverParameters, setID, logger)
-       if maxops := cluster.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {
-               is = &rateLimitedInstanceSet{
+       sharedResourceTags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)
+       is, err := driver.InstanceSet(cluster.Containers.CloudVMs.DriverParameters, setID, sharedResourceTags, logger)
+       if maxops := cluster.Containers.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {
+               is = rateLimitedInstanceSet{
                        InstanceSet: is,
                        ticker:      time.NewTicker(time.Second / time.Duration(maxops)),
                }
        }
+       is = defaultTaggingInstanceSet{
+               InstanceSet: is,
+               defaultTags: cloud.InstanceTags(cluster.Containers.CloudVMs.ResourceTags),
+       }
+       is = filteringInstanceSet{
+               InstanceSet: is,
+               logger:      logger,
+       }
        return is, err
 }
 
@@ -56,3 +65,51 @@ func (inst *rateLimitedInstance) Destroy() error {
        <-inst.ticker.C
        return inst.Instance.Destroy()
 }
+
+// Adds the specified defaultTags to every Create() call.
+type defaultTaggingInstanceSet struct {
+       cloud.InstanceSet
+       defaultTags cloud.InstanceTags
+}
+
+func (is defaultTaggingInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {
+       allTags := cloud.InstanceTags{}
+       for k, v := range is.defaultTags {
+               allTags[k] = v
+       }
+       for k, v := range tags {
+               allTags[k] = v
+       }
+       return is.InstanceSet.Create(it, image, allTags, init, pk)
+}
+
+// Filters the instances returned by the wrapped InstanceSet's
+// Instances() method (in case the wrapped InstanceSet didn't do this
+// itself).
+type filteringInstanceSet struct {
+       cloud.InstanceSet
+       logger logrus.FieldLogger
+}
+
+func (is filteringInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {
+       instances, err := is.InstanceSet.Instances(tags)
+
+       skipped := 0
+       var returning []cloud.Instance
+nextInstance:
+       for _, inst := range instances {
+               instTags := inst.Tags()
+               for k, v := range tags {
+                       if instTags[k] != v {
+                               skipped++
+                               continue nextInstance
+                       }
+               }
+               returning = append(returning, inst)
+       }
+       is.logger.WithFields(logrus.Fields{
+               "returning": len(returning),
+               "skipped":   skipped,
+       }).WithError(err).Debugf("filteringInstanceSet returning instances")
+       return returning, err
+}
index 873d987327eafed2a53f6d63f0dcc17230dbeb0d..a9a5a429f3e941a0973ee394d6693f952e4e780c 100644 (file)
@@ -56,7 +56,7 @@ type StubDriver struct {
 }
 
 // InstanceSet returns a new *StubInstanceSet.
-func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
        if sd.holdCloudOps == nil {
                sd.holdCloudOps = make(chan bool)
        }
index 014ab93bfe9c7289bcd99286379a3a26bbc38b18..0ee36a96ff1d23d3c27e48679dba4b31007299f4 100644 (file)
@@ -25,6 +25,7 @@ const (
        tagKeyInstanceType   = "InstanceType"
        tagKeyIdleBehavior   = "IdleBehavior"
        tagKeyInstanceSecret = "InstanceSecret"
+       tagKeyInstanceSetID  = "InstanceSetID"
 )
 
 // An InstanceView shows a worker's current state and recent activity.
@@ -91,25 +92,27 @@ func duration(conf arvados.Duration, def time.Duration) time.Duration {
 //
 // New instances are configured and set up according to the given
 // cluster configuration.
-func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {
+func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSetID cloud.InstanceSetID, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {
        wp := &Pool{
                logger:             logger,
                arvClient:          arvClient,
+               instanceSetID:      instanceSetID,
                instanceSet:        &throttledInstanceSet{InstanceSet: instanceSet},
                newExecutor:        newExecutor,
-               bootProbeCommand:   cluster.CloudVMs.BootProbeCommand,
-               imageID:            cloud.ImageID(cluster.CloudVMs.ImageID),
+               bootProbeCommand:   cluster.Containers.CloudVMs.BootProbeCommand,
+               imageID:            cloud.ImageID(cluster.Containers.CloudVMs.ImageID),
                instanceTypes:      cluster.InstanceTypes,
-               maxProbesPerSecond: cluster.Dispatch.MaxProbesPerSecond,
-               probeInterval:      duration(cluster.Dispatch.ProbeInterval, defaultProbeInterval),
-               syncInterval:       duration(cluster.CloudVMs.SyncInterval, defaultSyncInterval),
-               timeoutIdle:        duration(cluster.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
-               timeoutBooting:     duration(cluster.CloudVMs.TimeoutBooting, defaultTimeoutBooting),
-               timeoutProbe:       duration(cluster.CloudVMs.TimeoutProbe, defaultTimeoutProbe),
-               timeoutShutdown:    duration(cluster.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
-               timeoutTERM:        duration(cluster.Dispatch.TimeoutTERM, defaultTimeoutTERM),
-               timeoutSignal:      duration(cluster.Dispatch.TimeoutSignal, defaultTimeoutSignal),
+               maxProbesPerSecond: cluster.Containers.CloudVMs.MaxProbesPerSecond,
+               probeInterval:      duration(cluster.Containers.CloudVMs.ProbeInterval, defaultProbeInterval),
+               syncInterval:       duration(cluster.Containers.CloudVMs.SyncInterval, defaultSyncInterval),
+               timeoutIdle:        duration(cluster.Containers.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
+               timeoutBooting:     duration(cluster.Containers.CloudVMs.TimeoutBooting, defaultTimeoutBooting),
+               timeoutProbe:       duration(cluster.Containers.CloudVMs.TimeoutProbe, defaultTimeoutProbe),
+               timeoutShutdown:    duration(cluster.Containers.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
+               timeoutTERM:        duration(cluster.Containers.CloudVMs.TimeoutTERM, defaultTimeoutTERM),
+               timeoutSignal:      duration(cluster.Containers.CloudVMs.TimeoutSignal, defaultTimeoutSignal),
                installPublicKey:   installPublicKey,
+               tagKeyPrefix:       cluster.Containers.CloudVMs.TagKeyPrefix,
                stop:               make(chan bool),
        }
        wp.registerMetrics(reg)
@@ -128,6 +131,7 @@ type Pool struct {
        // configuration
        logger             logrus.FieldLogger
        arvClient          *arvados.Client
+       instanceSetID      cloud.InstanceSetID
        instanceSet        *throttledInstanceSet
        newExecutor        func(cloud.Instance) Executor
        bootProbeCommand   string
@@ -143,6 +147,7 @@ type Pool struct {
        timeoutTERM        time.Duration
        timeoutSignal      time.Duration
        installPublicKey   ssh.PublicKey
+       tagKeyPrefix       string
 
        // private state
        subscribers  map[<-chan struct{}]chan<- struct{}
@@ -281,9 +286,10 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
        go func() {
                defer wp.notify()
                tags := cloud.InstanceTags{
-                       tagKeyInstanceType:   it.Name,
-                       tagKeyIdleBehavior:   string(IdleBehaviorRun),
-                       tagKeyInstanceSecret: secret,
+                       wp.tagKeyPrefix + tagKeyInstanceSetID:  string(wp.instanceSetID),
+                       wp.tagKeyPrefix + tagKeyInstanceType:   it.Name,
+                       wp.tagKeyPrefix + tagKeyIdleBehavior:   string(IdleBehaviorRun),
+                       wp.tagKeyPrefix + tagKeyInstanceSecret: secret,
                }
                initCmd := cloud.InitCommand(fmt.Sprintf("umask 0177 && echo -n %q >%s", secret, instanceSecretFilename))
                inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
@@ -338,7 +344,8 @@ func (wp *Pool) SetIdleBehavior(id cloud.InstanceID, idleBehavior IdleBehavior)
 //
 // Caller must have lock.
 func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {
-       inst = tagVerifier{inst}
+       secret := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceSecret]
+       inst = tagVerifier{inst, secret}
        id := inst.ID()
        if wkr := wp.workers[id]; wkr != nil {
                wkr.executor.SetTarget(inst)
@@ -349,7 +356,7 @@ func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*wor
        }
 
        state := StateUnknown
-       if _, ok := wp.creating[inst.Tags()[tagKeyInstanceSecret]]; ok {
+       if _, ok := wp.creating[secret]; ok {
                state = StateBooting
        }
 
@@ -359,7 +366,7 @@ func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*wor
        // process); otherwise, default to "run". After this,
        // wkr.idleBehavior is the source of truth, and will only be
        // changed via SetIdleBehavior().
-       idleBehavior := IdleBehavior(inst.Tags()[tagKeyIdleBehavior])
+       idleBehavior := IdleBehavior(inst.Tags()[wp.tagKeyPrefix+tagKeyIdleBehavior])
        if !validIdleBehavior[idleBehavior] {
                idleBehavior = IdleBehaviorRun
        }
@@ -728,7 +735,7 @@ func (wp *Pool) getInstancesAndSync() error {
        }
        wp.logger.Debug("getting instance list")
        threshold := time.Now()
-       instances, err := wp.instanceSet.Instances(cloud.InstanceTags{})
+       instances, err := wp.instanceSet.Instances(cloud.InstanceTags{wp.tagKeyPrefix + tagKeyInstanceSetID: string(wp.instanceSetID)})
        if err != nil {
                wp.instanceSet.throttleInstances.CheckRateLimitError(err, wp.logger, "list instances", wp.notify)
                return err
@@ -748,7 +755,7 @@ func (wp *Pool) sync(threshold time.Time, instances []cloud.Instance) {
        notify := false
 
        for _, inst := range instances {
-               itTag := inst.Tags()[tagKeyInstanceType]
+               itTag := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceType]
                it, ok := wp.instanceTypes[itTag]
                if !ok {
                        wp.logger.WithField("Instance", inst).Errorf("unknown InstanceType tag %q --- ignoring", itTag)
index fc33a7ab235d7a733903903219302a81c8fc44d0..4b87ce503157a6d873e5d7d13eed018fc0ad35af 100644 (file)
@@ -65,7 +65,8 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
 
        logger := ctxlog.TestLogger(c)
        driver := &test.StubDriver{}
-       is, err := driver.InstanceSet(nil, "", logger)
+       instanceSetID := cloud.InstanceSetID("test-instance-set-id")
+       is, err := driver.InstanceSet(nil, instanceSetID, nil, logger)
        c.Assert(err, check.IsNil)
 
        newExecutor := func(cloud.Instance) Executor {
@@ -76,13 +77,14 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
        }
 
        cluster := &arvados.Cluster{
-               Dispatch: arvados.Dispatch{
-                       MaxProbesPerSecond: 1000,
-                       ProbeInterval:      arvados.Duration(time.Millisecond * 10),
-               },
-               CloudVMs: arvados.CloudVMs{
-                       BootProbeCommand: "true",
-                       SyncInterval:     arvados.Duration(time.Millisecond * 10),
+               Containers: arvados.ContainersConfig{
+                       CloudVMs: arvados.CloudVMsConfig{
+                               BootProbeCommand:   "true",
+                               MaxProbesPerSecond: 1000,
+                               ProbeInterval:      arvados.Duration(time.Millisecond * 10),
+                               SyncInterval:       arvados.Duration(time.Millisecond * 10),
+                               TagKeyPrefix:       "testprefix:",
+                       },
                },
                InstanceTypes: arvados.InstanceTypeMap{
                        type1.Name: type1,
@@ -91,7 +93,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
                },
        }
 
-       pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+       pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
        notify := pool.Subscribe()
        defer pool.Unsubscribe(notify)
        pool.Create(type1)
@@ -106,13 +108,14 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
                }
        }
        // Wait for the tags to save to the cloud provider
+       tagKey := cluster.Containers.CloudVMs.TagKeyPrefix + tagKeyIdleBehavior
        deadline := time.Now().Add(time.Second)
        for !func() bool {
                pool.mtx.RLock()
                defer pool.mtx.RUnlock()
                for _, wkr := range pool.workers {
                        if wkr.instType == type2 {
-                               return wkr.instance.Tags()[tagKeyIdleBehavior] == string(IdleBehaviorHold)
+                               return wkr.instance.Tags()[tagKey] == string(IdleBehaviorHold)
                        }
                }
                return false
@@ -126,7 +129,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
 
        c.Log("------- starting new pool, waiting to recover state")
 
-       pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+       pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
        notify2 := pool2.Subscribe()
        defer pool2.Unsubscribe(notify2)
        waitForIdle(pool2, notify2)
@@ -144,7 +147,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
 func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
        logger := ctxlog.TestLogger(c)
        driver := test.StubDriver{HoldCloudOps: true}
-       instanceSet, err := driver.InstanceSet(nil, "", logger)
+       instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, logger)
        c.Assert(err, check.IsNil)
 
        type1 := arvados.InstanceType{Name: "a1s", ProviderType: "a1.small", VCPUs: 1, RAM: 1 * GiB, Price: .01}
index e22c85d00906fba303f7d636e41a84a3cce3c523..330071951425c1c382f8be4e53f436d758d032f6 100644 (file)
@@ -23,11 +23,11 @@ var (
 
 type tagVerifier struct {
        cloud.Instance
+       secret string
 }
 
 func (tv tagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {
-       expectSecret := tv.Instance.Tags()[tagKeyInstanceSecret]
-       if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || expectSecret == "" {
+       if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || tv.secret == "" {
                // If the wrapped instance indicates it has a way to
                // verify the key, return that decision.
                return err
@@ -49,7 +49,7 @@ func (tv tagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) er
        if err != nil {
                return err
        }
-       if stdout.String() != expectSecret {
+       if stdout.String() != tv.secret {
                return errBadInstanceSecret
        }
        return nil
index 49c5057b3842e49da945d40c3950f7c2185dfcc5..03ab15176f5297b85182d3689b71f5a3f0195004 100644 (file)
@@ -455,8 +455,8 @@ func (wkr *worker) saveTags() {
        instance := wkr.instance
        tags := instance.Tags()
        update := cloud.InstanceTags{
-               tagKeyInstanceType: wkr.instType.Name,
-               tagKeyIdleBehavior: string(wkr.idleBehavior),
+               wkr.wp.tagKeyPrefix + tagKeyInstanceType: wkr.instType.Name,
+               wkr.wp.tagKeyPrefix + tagKeyIdleBehavior: string(wkr.idleBehavior),
        }
        save := false
        for k, v := range update {
index 15a2a894c5bceb89bdae4f6c5e4146d317dca083..4f9ba911cd3c112463f749738dcb70d1dfb47619 100644 (file)
@@ -25,7 +25,7 @@ func (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {
        bootTimeout := time.Minute
        probeTimeout := time.Second
 
-       is, err := (&test.StubDriver{}).InstanceSet(nil, "", logger)
+       is, err := (&test.StubDriver{}).InstanceSet(nil, "test-instance-set-id", nil, logger)
        c.Assert(err, check.IsNil)
        inst, err := is.Create(arvados.InstanceType{}, "", nil, "echo InitCommand", nil)
        c.Assert(err, check.IsNil)
index e853da943222aa2182b01f41d12ebb3cbec5193a..94021163e469fd87c6eb58dc29041ba00b95b65a 100644 (file)
@@ -10,11 +10,15 @@ import (
        "flag"
        "fmt"
        "io"
+       "io/ioutil"
+       "net"
        "net/http"
        "net/url"
        "os"
+       "strings"
 
        "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/config"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/ctxlog"
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
@@ -27,7 +31,7 @@ type Handler interface {
        CheckHealth() error
 }
 
-type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler
+type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, token string) Handler
 
 type command struct {
        newHandler NewHandlerFunc
@@ -61,7 +65,6 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        flags := flag.NewFlagSet("", flag.ContinueOnError)
        flags.SetOutput(stderr)
        configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
-       nodeProfile := flags.String("node-profile", "", "`Name` of NodeProfiles config entry to use (if blank, use $ARVADOS_NODE_PROFILE or hostname reported by OS)")
        err = flags.Parse(args)
        if err == flag.ErrHelp {
                err = nil
@@ -69,7 +72,10 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        } else if err != nil {
                return 2
        }
-       cfg, err := arvados.GetConfig(*configFile)
+       // Logged warnings are discarded for now: the config template
+       // is incomplete, which causes extra warnings about keys that
+       // are really OK.
+       cfg, err := config.LoadFile(*configFile, ctxlog.New(ioutil.Discard, "json", "error"))
        if err != nil {
                return 1
        }
@@ -77,24 +83,15 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        if err != nil {
                return 1
        }
-       log = ctxlog.New(stderr, cluster.Logging.Format, cluster.Logging.Level).WithFields(logrus.Fields{
+       log = ctxlog.New(stderr, cluster.SystemLogs.Format, cluster.SystemLogs.LogLevel).WithFields(logrus.Fields{
                "PID": os.Getpid(),
        })
        ctx := ctxlog.Context(c.ctx, log)
 
-       profileName := *nodeProfile
-       if profileName == "" {
-               profileName = os.Getenv("ARVADOS_NODE_PROFILE")
-       }
-       profile, err := cluster.GetNodeProfile(profileName)
+       listen, err := getListenAddr(cluster.Services, c.svcName)
        if err != nil {
                return 1
        }
-       listen := profile.ServicePorts()[c.svcName]
-       if listen == "" {
-               err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
-               return 1
-       }
 
        if cluster.SystemRootToken == "" {
                log.Warn("SystemRootToken missing from cluster config, falling back to ARVADOS_API_TOKEN environment variable")
@@ -113,7 +110,7 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
                }
        }
 
-       handler := c.newHandler(ctx, cluster, profile, cluster.SystemRootToken)
+       handler := c.newHandler(ctx, cluster, cluster.SystemRootToken)
        if err = handler.CheckHealth(); err != nil {
                return 1
        }
@@ -146,3 +143,21 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
 }
 
 const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+func getListenAddr(svcs arvados.Services, prog arvados.ServiceName) (string, error) {
+       svc, ok := svcs.Map()[prog]
+       if !ok {
+               return "", fmt.Errorf("unknown service name %q", prog)
+       }
+       for url := range svc.InternalURLs {
+               if strings.HasPrefix(url.Host, "localhost:") {
+                       return url.Host, nil
+               }
+               listener, err := net.Listen("tcp", url.Host)
+               if err == nil {
+                       listener.Close()
+                       return url.Host, nil
+               }
+       }
+       return "", fmt.Errorf("configuration does not enable the %s service on this host", prog)
+}
index 62960dc31cb2b71a2b2aea85db0300f00a44995d..bb7c5c51da01a4074da5b1b80506b2e25fc9a25d 100644 (file)
@@ -38,7 +38,7 @@ func (*Suite) TestCommand(c *check.C) {
        ctx, cancel := context.WithCancel(context.Background())
        defer cancel()
 
-       cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler {
+       cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string) Handler {
                c.Check(ctx.Value("foo"), check.Equals, "bar")
                c.Check(token, check.Equals, "abcde")
                return &testHandler{ctx: ctx, healthCheck: healthCheck}
index 8955210913c291341fd27e132b86aad086a2f3d7..1ca5c5f4463b00f4afffbff0507134619f14e57c 100644 (file)
@@ -17,7 +17,7 @@ import (
 // responds 500 to all requests.  ErrorHandler itself logs the given
 // error once, and the handler logs it again for each incoming
 // request.
-func ErrorHandler(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, err error) Handler {
+func ErrorHandler(ctx context.Context, _ *arvados.Cluster, err error) Handler {
        logger := ctxlog.FromContext(ctx)
        logger.WithError(err).Error("unhealthy service")
        return errorHandler{err, logger}
index e20eb18ddacae7d0ae9f6facfad897d084dd8be4..aa3388d00bc9964eb8eb845f2210ac6eee6510de 100644 (file)
@@ -155,7 +155,7 @@ class ArvadosContainer(JobBase):
                                 vwd.mkdirs(p.target)
                             else:
                                 source, path = self.arvrunner.fs_access.get_collection(p.resolved)
-                                vwd.copy(path, p.target, source_collection=source)
+                                vwd.copy(path or ".", p.target, source_collection=source)
                         elif p.type == "CreateFile":
                             if self.arvrunner.secret_store.has_secret(p.resolved):
                                 secret_mounts["%s/%s" % (self.outdir, p.target)] = {
diff --git a/sdk/cwl/tests/15241-writable-dir-job.json b/sdk/cwl/tests/15241-writable-dir-job.json
new file mode 100644 (file)
index 0000000..f30578e
--- /dev/null
@@ -0,0 +1,6 @@
+{
+    "filesDir": {
+        "location": "keep:d7514270f356df848477718d58308cc4+94",
+        "class": "Directory"
+    }
+}
diff --git a/sdk/cwl/tests/15241-writable-dir.cwl b/sdk/cwl/tests/15241-writable-dir.cwl
new file mode 100644 (file)
index 0000000..bfd17f7
--- /dev/null
@@ -0,0 +1,20 @@
+cwlVersion: v1.0
+class: CommandLineTool
+
+requirements:
+  - class: InitialWorkDirRequirement
+    listing:
+      - entry: $(inputs.filesDir)
+        writable: true
+
+inputs:
+  filesDir:
+    type: Directory
+
+outputs:
+  results:
+    type: Directory
+    outputBinding:
+      glob: .
+
+arguments: [touch, $(inputs.filesDir.path)/blurg.txt]
index 8b8ff28c789d771ba0c2e54855beb82381ee6a83..d649c3bf67706669ee93076a2640958f3194d734 100644 (file)
     }
   tool: 13931-size.cwl
   doc: Test that size is set for files in Keep
+
+- job: 15241-writable-dir-job.json
+  output: {
+    "results": {
+        "basename": "keep:6dd5fa20622d5a7a23c9147d0927da2a+180",
+        "class": "Directory",
+        "listing": [
+            {
+                "basename": "d7514270f356df848477718d58308cc4+94",
+                "class": "Directory",
+                "listing": [
+                    {
+                        "basename": "a",
+                        "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                        "class": "File",
+                        "location": "a",
+                        "size": 0
+                    },
+                    {
+                        "basename": "blurg.txt",
+                        "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                        "class": "File",
+                        "location": "blurg.txt",
+                        "size": 0
+                    },
+                    {
+                        "basename": "c",
+                        "class": "Directory",
+                        "listing": [
+                            {
+                                "basename": "d",
+                                "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                                "class": "File",
+                                "location": "d",
+                                "size": 0
+                            }
+                        ],
+                        "location": "c"
+                    },
+                    {
+                        "basename": "b",
+                        "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                        "class": "File",
+                        "location": "b",
+                        "size": 0
+                    }
+                ],
+                "location": "d7514270f356df848477718d58308cc4+94"
+            }
+        ],
+        "location": "keep:6dd5fa20622d5a7a23c9147d0927da2a+180"
+    }
+  }
+  tool: 15241-writable-dir.cwl
+  doc: Test for writable collections
index 0750693f5f08e7d31922f700ff79968656aa3f49..b62229fbcafa517cbfb30ebf6bb71e39988753cb 100644 (file)
@@ -324,7 +324,7 @@ class TestContainer(unittest.TestCase):
         call_args, call_kwargs = runner.api.container_requests().create.call_args
 
         vwdmock.copy.assert_has_calls([mock.call('bar', 'foo', source_collection=sourcemock)])
-        vwdmock.copy.assert_has_calls([mock.call('', 'foo2', source_collection=sourcemock)])
+        vwdmock.copy.assert_has_calls([mock.call('.', 'foo2', source_collection=sourcemock)])
         vwdmock.copy.assert_has_calls([mock.call('baz/filename', 'filename', source_collection=sourcemock)])
         vwdmock.copy.assert_has_calls([mock.call('subdir', 'subdir', source_collection=sourcemock)])
 
index ad33e6e9af614b7046a52efe519b103321800f06..d2c4d9c2eaff4e0255e365ae062c478f5f0fcc3f 100644 (file)
@@ -1401,6 +1401,7 @@ class TestSubmit(unittest.TestCase):
 
     @stubs
     def test_submit_request_uuid(self, stubs):
+        stubs.api._rootDesc["remoteHosts"]["zzzzz"] = "123"
         stubs.expect_container_request_uuid = "zzzzz-xvhdp-yyyyyyyyyyyyyyy"
 
         stubs.api.container_requests().update().execute.return_value = {
index 2965d5ecb0dc8aa89da2354eea231464d9fa202f..d96bf25173a949dc0d95cb49f9ba639295c019b4 100644 (file)
@@ -9,7 +9,6 @@ import (
        "errors"
        "fmt"
        "net/url"
-       "os"
 
        "git.curoverse.com/arvados.git/sdk/go/config"
 )
@@ -51,26 +50,24 @@ func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
        }
 }
 
-type RequestLimits struct {
-       MaxItemsPerResponse            int
-       MultiClusterRequestConcurrency int
+type API struct {
+       MaxItemsPerResponse     int
+       MaxRequestAmplification int
+       RequestTimeout          Duration
 }
 
 type Cluster struct {
-       ClusterID          string `json:"-"`
-       ManagementToken    string
-       SystemRootToken    string
-       Services           Services
-       NodeProfiles       map[string]NodeProfile
-       InstanceTypes      InstanceTypeMap
-       CloudVMs           CloudVMs
-       Dispatch           Dispatch
-       HTTPRequestTimeout Duration
-       RemoteClusters     map[string]RemoteCluster
-       PostgreSQL         PostgreSQL
-       RequestLimits      RequestLimits
-       Logging            Logging
-       TLS                TLS
+       ClusterID       string `json:"-"`
+       ManagementToken string
+       SystemRootToken string
+       Services        Services
+       InstanceTypes   InstanceTypeMap
+       Containers      ContainersConfig
+       RemoteClusters  map[string]RemoteCluster
+       PostgreSQL      PostgreSQL
+       API             API
+       SystemLogs      SystemLogs
+       TLS             TLS
 }
 
 type Services struct {
@@ -80,15 +77,16 @@ type Services struct {
        Keepbalance   Service
        Keepproxy     Service
        Keepstore     Service
-       Keepweb       Service
        Nodemanager   Service
        RailsAPI      Service
+       WebDAV        Service
        Websocket     Service
-       Workbench     Service
+       Workbench1    Service
+       Workbench2    Service
 }
 
 type Service struct {
-       InternalURLs map[URL]ServiceInstance
+       InternalURLs map[URL]ServiceInstance `json:",omitempty"`
        ExternalURL  URL
 }
 
@@ -105,11 +103,16 @@ func (su *URL) UnmarshalText(text []byte) error {
        return err
 }
 
+func (su URL) MarshalText() ([]byte, error) {
+       return []byte(fmt.Sprintf("%s", (*url.URL)(&su).String())), nil
+}
+
 type ServiceInstance struct{}
 
-type Logging struct {
-       Level  string
-       Format string
+type SystemLogs struct {
+       LogLevel                string
+       Format                  string
+       MaxRequestLogParamsSize int
 }
 
 type PostgreSQL struct {
@@ -143,59 +146,31 @@ type InstanceType struct {
        Preemptible     bool
 }
 
-type Dispatch struct {
-       // PEM encoded SSH key (RSA, DSA, or ECDSA) able to log in to
-       // cloud VMs.
-       PrivateKey string
-
-       // Max time for workers to come up before abandoning stale
-       // locks from previous run
-       StaleLockTimeout Duration
-
-       // Interval between queue polls
-       PollInterval Duration
-
-       // Interval between probes to each worker
-       ProbeInterval Duration
-
-       // Maximum total worker probes per second
-       MaxProbesPerSecond int
-
-       // Time before repeating SIGTERM when killing a container
-       TimeoutSignal Duration
-
-       // Time to give up on SIGTERM and write off the worker
-       TimeoutTERM Duration
+type ContainersConfig struct {
+       CloudVMs           CloudVMsConfig
+       DispatchPrivateKey string
+       StaleLockTimeout   Duration
 }
 
-type CloudVMs struct {
-       // Shell command that exits zero IFF the VM is fully booted
-       // and ready to run containers, e.g., "mount | grep
-       // /encrypted-tmp"
-       BootProbeCommand string
-
-       // Listening port (name or number) of SSH servers on worker
-       // VMs
-       SSHPort string
+type CloudVMsConfig struct {
+       Enable bool
 
-       SyncInterval Duration
-
-       // Maximum idle time before automatic shutdown
-       TimeoutIdle Duration
-
-       // Maximum booting time before automatic shutdown
-       TimeoutBooting Duration
-
-       // Maximum time with no successful probes before automatic shutdown
-       TimeoutProbe Duration
-
-       // Time after shutdown to retry shutdown
-       TimeoutShutdown Duration
-
-       // Maximum create/destroy-instance operations per second
+       BootProbeCommand     string
+       ImageID              string
        MaxCloudOpsPerSecond int
-
-       ImageID string
+       MaxProbesPerSecond   int
+       PollInterval         Duration
+       ProbeInterval        Duration
+       SSHPort              string
+       SyncInterval         Duration
+       TimeoutBooting       Duration
+       TimeoutIdle          Duration
+       TimeoutProbe         Duration
+       TimeoutShutdown      Duration
+       TimeoutSignal        Duration
+       TimeoutTERM          Duration
+       ResourceTags         map[string]string
+       TagKeyPrefix         string
 
        Driver           string
        DriverParameters json.RawMessage
@@ -259,51 +234,16 @@ func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
        return nil
 }
 
-// GetNodeProfile returns a NodeProfile for the given hostname. An
-// error is returned if the appropriate configuration can't be
-// determined (e.g., this does not appear to be a system node). If
-// node is empty, use the OS-reported hostname.
-func (cc *Cluster) GetNodeProfile(node string) (*NodeProfile, error) {
-       if node == "" {
-               hostname, err := os.Hostname()
-               if err != nil {
-                       return nil, err
-               }
-               node = hostname
-       }
-       if cfg, ok := cc.NodeProfiles[node]; ok {
-               return &cfg, nil
-       }
-       // If node is not listed, but "*" gives a default system node
-       // config, use the default config.
-       if cfg, ok := cc.NodeProfiles["*"]; ok {
-               return &cfg, nil
-       }
-       return nil, fmt.Errorf("config does not provision host %q as a system node", node)
-}
-
-type NodeProfile struct {
-       Controller    SystemServiceInstance `json:"arvados-controller"`
-       Health        SystemServiceInstance `json:"arvados-health"`
-       Keepbalance   SystemServiceInstance `json:"keep-balance"`
-       Keepproxy     SystemServiceInstance `json:"keepproxy"`
-       Keepstore     SystemServiceInstance `json:"keepstore"`
-       Keepweb       SystemServiceInstance `json:"keep-web"`
-       Nodemanager   SystemServiceInstance `json:"arvados-node-manager"`
-       DispatchCloud SystemServiceInstance `json:"arvados-dispatch-cloud"`
-       RailsAPI      SystemServiceInstance `json:"arvados-api-server"`
-       Websocket     SystemServiceInstance `json:"arvados-ws"`
-       Workbench     SystemServiceInstance `json:"arvados-workbench"`
-}
-
 type ServiceName string
 
 const (
        ServiceNameRailsAPI      ServiceName = "arvados-api-server"
        ServiceNameController    ServiceName = "arvados-controller"
        ServiceNameDispatchCloud ServiceName = "arvados-dispatch-cloud"
+       ServiceNameHealth        ServiceName = "arvados-health"
        ServiceNameNodemanager   ServiceName = "arvados-node-manager"
-       ServiceNameWorkbench     ServiceName = "arvados-workbench"
+       ServiceNameWorkbench1    ServiceName = "arvados-workbench1"
+       ServiceNameWorkbench2    ServiceName = "arvados-workbench2"
        ServiceNameWebsocket     ServiceName = "arvados-ws"
        ServiceNameKeepbalance   ServiceName = "keep-balance"
        ServiceNameKeepweb       ServiceName = "keep-web"
@@ -311,41 +251,23 @@ const (
        ServiceNameKeepstore     ServiceName = "keepstore"
 )
 
-// ServicePorts returns the configured listening address (or "" if
-// disabled) for each service on the node.
-func (np *NodeProfile) ServicePorts() map[ServiceName]string {
-       return map[ServiceName]string{
-               ServiceNameRailsAPI:      np.RailsAPI.Listen,
-               ServiceNameController:    np.Controller.Listen,
-               ServiceNameDispatchCloud: np.DispatchCloud.Listen,
-               ServiceNameNodemanager:   np.Nodemanager.Listen,
-               ServiceNameWorkbench:     np.Workbench.Listen,
-               ServiceNameWebsocket:     np.Websocket.Listen,
-               ServiceNameKeepbalance:   np.Keepbalance.Listen,
-               ServiceNameKeepweb:       np.Keepweb.Listen,
-               ServiceNameKeepproxy:     np.Keepproxy.Listen,
-               ServiceNameKeepstore:     np.Keepstore.Listen,
-       }
-}
-
-func (h RequestLimits) GetMultiClusterRequestConcurrency() int {
-       if h.MultiClusterRequestConcurrency == 0 {
-               return 4
-       }
-       return h.MultiClusterRequestConcurrency
-}
-
-func (h RequestLimits) GetMaxItemsPerResponse() int {
-       if h.MaxItemsPerResponse == 0 {
-               return 1000
+// Map returns all services as a map, suitable for iterating over all
+// services or looking up a service by name.
+func (svcs Services) Map() map[ServiceName]Service {
+       return map[ServiceName]Service{
+               ServiceNameRailsAPI:      svcs.RailsAPI,
+               ServiceNameController:    svcs.Controller,
+               ServiceNameDispatchCloud: svcs.DispatchCloud,
+               ServiceNameHealth:        svcs.Health,
+               ServiceNameNodemanager:   svcs.Nodemanager,
+               ServiceNameWorkbench1:    svcs.Workbench1,
+               ServiceNameWorkbench2:    svcs.Workbench2,
+               ServiceNameWebsocket:     svcs.Websocket,
+               ServiceNameKeepbalance:   svcs.Keepbalance,
+               ServiceNameKeepweb:       svcs.WebDAV,
+               ServiceNameKeepproxy:     svcs.Keepproxy,
+               ServiceNameKeepstore:     svcs.Keepstore,
        }
-       return h.MaxItemsPerResponse
-}
-
-type SystemServiceInstance struct {
-       Listen   string
-       TLS      bool
-       Insecure bool
 }
 
 type TLS struct {
index 25eed010f26c534ef8e36dfa119065731d1e2ac4..2696fdb051146ca34bd311e7e29e1092b0a3723e 100644 (file)
@@ -7,6 +7,7 @@ package arvados
 import (
        "encoding/json"
        "fmt"
+       "strings"
        "time"
 )
 
@@ -23,13 +24,17 @@ func (d *Duration) UnmarshalJSON(data []byte) error {
 }
 
 // MarshalJSON implements json.Marshaler.
-func (d *Duration) MarshalJSON() ([]byte, error) {
+func (d Duration) MarshalJSON() ([]byte, error) {
        return json.Marshal(d.String())
 }
 
-// String implements fmt.Stringer.
+// String returns a format similar to (time.Duration)String() but with
+// "0m" and "0s" removed: e.g., "1h" instead of "1h0m0s".
 func (d Duration) String() string {
-       return time.Duration(d).String()
+       s := time.Duration(d).String()
+       s = strings.Replace(s, "m0s", "m", 1)
+       s = strings.Replace(s, "h0m", "h", 1)
+       return s
 }
 
 // Duration returns a time.Duration.
diff --git a/sdk/go/arvados/duration_test.go b/sdk/go/arvados/duration_test.go
new file mode 100644 (file)
index 0000000..ee787a6
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "time"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&DurationSuite{})
+
+type DurationSuite struct{}
+
+func (s *DurationSuite) TestMarshalJSON(c *check.C) {
+       var d struct {
+               D Duration
+       }
+       err := json.Unmarshal([]byte(`{"D":"1.234s"}`), &d)
+       c.Check(err, check.IsNil)
+       c.Check(d.D, check.Equals, Duration(time.Second+234*time.Millisecond))
+       buf, err := json.Marshal(d)
+       c.Check(string(buf), check.Equals, `{"D":"1.234s"}`)
+
+       for _, trial := range []struct {
+               seconds int
+               out     string
+       }{
+               {30, "30s"},
+               {60, "1m"},
+               {120, "2m"},
+               {150, "2m30s"},
+               {3600, "1h"},
+               {7201, "2h1s"},
+               {360600, "100h10m"},
+               {360610, "100h10m10s"},
+       } {
+               buf, err := json.Marshal(Duration(time.Duration(trial.seconds) * time.Second))
+               c.Check(err, check.IsNil)
+               c.Check(string(buf), check.Equals, `"`+trial.out+`"`)
+       }
+}
index 2ae2bd8924e23b583a267091cc6b9985e52d3422..7fd03b120a7f34240393f884f88992b885499e1f 100644 (file)
@@ -23,7 +23,6 @@ import (
        "testing"
        "time"
 
-       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
 )
 
@@ -87,7 +86,7 @@ type CollectionFSSuite struct {
 
 func (s *CollectionFSSuite) SetUpTest(c *check.C) {
        s.client = NewClientFromEnv()
-       err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+arvadostest.FooAndBarFilesInDirUUID, nil, nil)
+       err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil)
        c.Assert(err, check.IsNil)
        s.kc = &keepClientStub{
                blocks: map[string][]byte{
index 1a06ce14632af5e1dc7f219208307291009d8da9..49e7d675f8b5c6729b61d94d0f271f615c68e924 100644 (file)
@@ -12,7 +12,6 @@ import (
        "path/filepath"
        "strings"
 
-       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
 )
 
@@ -121,7 +120,7 @@ func (s *SiteFSSuite) TestProjectReaddirAfterLoadOne(c *check.C) {
 func (s *SiteFSSuite) TestSlashInName(c *check.C) {
        badCollection := Collection{
                Name:      "bad/collection",
-               OwnerUUID: arvadostest.AProjectUUID,
+               OwnerUUID: fixtureAProjectUUID,
        }
        err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", s.client.UpdateBody(&badCollection), nil)
        c.Assert(err, check.IsNil)
@@ -130,7 +129,7 @@ func (s *SiteFSSuite) TestSlashInName(c *check.C) {
        badProject := Group{
                Name:       "bad/project",
                GroupClass: "project",
-               OwnerUUID:  arvadostest.AProjectUUID,
+               OwnerUUID:  fixtureAProjectUUID,
        }
        err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", s.client.UpdateBody(&badProject), nil)
        c.Assert(err, check.IsNil)
@@ -157,7 +156,7 @@ func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
 
        oob := Collection{
                Name:      "oob",
-               OwnerUUID: arvadostest.AProjectUUID,
+               OwnerUUID: fixtureAProjectUUID,
        }
        err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", s.client.UpdateBody(&oob), nil)
        c.Assert(err, check.IsNil)
index 80028dc5985bd46e510c45f102eaf7d2ac518287..fff0b7e010b22b1811991ce3b6249093c50b616b 100644 (file)
@@ -8,10 +8,22 @@ import (
        "net/http"
        "os"
 
-       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
 )
 
+const (
+       // Importing arvadostest would be an import cycle, so these
+       // fixtures are duplicated here [until fs moves to a separate
+       // package].
+       fixtureActiveToken             = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+       fixtureAProjectUUID            = "zzzzz-j7d0g-v955i6s2oi1cbso"
+       fixtureFooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
+       fixtureFooCollectionName       = "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+       fixtureFooCollectionPDH        = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
+       fixtureFooCollection           = "zzzzz-4zz18-fy296fx3hot09f7"
+       fixtureNonexistentCollection   = "zzzzz-4zz18-totallynotexist"
+)
+
 var _ = check.Suite(&SiteFSSuite{})
 
 type SiteFSSuite struct {
@@ -23,7 +35,7 @@ type SiteFSSuite struct {
 func (s *SiteFSSuite) SetUpTest(c *check.C) {
        s.client = &Client{
                APIHost:   os.Getenv("ARVADOS_API_HOST"),
-               AuthToken: arvadostest.ActiveToken,
+               AuthToken: fixtureActiveToken,
                Insecure:  true,
        }
        s.kc = &keepClientStub{
@@ -53,16 +65,16 @@ func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
        c.Check(err, check.IsNil)
        c.Check(len(fis), check.Equals, 0)
 
-       err = s.fs.Mkdir("/by_id/"+arvadostest.FooCollection, 0755)
+       err = s.fs.Mkdir("/by_id/"+fixtureFooCollection, 0755)
        c.Check(err, check.Equals, os.ErrExist)
 
-       f, err = s.fs.Open("/by_id/" + arvadostest.NonexistentCollection)
+       f, err = s.fs.Open("/by_id/" + fixtureNonexistentCollection)
        c.Assert(err, check.Equals, os.ErrNotExist)
 
        for _, path := range []string{
-               arvadostest.FooCollection,
-               arvadostest.FooPdh,
-               arvadostest.AProjectUUID + "/" + arvadostest.FooCollectionName,
+               fixtureFooCollection,
+               fixtureFooCollectionPDH,
+               fixtureAProjectUUID + "/" + fixtureFooCollectionName,
        } {
                f, err = s.fs.Open("/by_id/" + path)
                c.Assert(err, check.IsNil)
@@ -74,7 +86,7 @@ func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
                c.Check(names, check.DeepEquals, []string{"foo"})
        }
 
-       f, err = s.fs.Open("/by_id/" + arvadostest.AProjectUUID + "/A Subproject/baz_file")
+       f, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file")
        c.Assert(err, check.IsNil)
        fis, err = f.Readdir(-1)
        var names []string
@@ -83,15 +95,15 @@ func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
        }
        c.Check(names, check.DeepEquals, []string{"baz"})
 
-       _, err = s.fs.OpenFile("/by_id/"+arvadostest.NonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
+       _, err = s.fs.OpenFile("/by_id/"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
        c.Check(err, check.Equals, ErrInvalidOperation)
-       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection, "/by_id/beep")
+       err = s.fs.Rename("/by_id/"+fixtureFooCollection, "/by_id/beep")
        c.Check(err, check.Equals, ErrInvalidArgument)
-       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/beep")
+       err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/beep")
        c.Check(err, check.Equals, ErrInvalidArgument)
        _, err = s.fs.Stat("/by_id/beep")
        c.Check(err, check.Equals, os.ErrNotExist)
-       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/"+arvadostest.FooCollection+"/bar")
+       err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/"+fixtureFooCollection+"/bar")
        c.Check(err, check.IsNil)
 
        err = s.fs.Rename("/by_id", "/beep")
index 47953ce9da7a10b795b980adbc04ae964d532926..1969441da1d0dc8767c1ca9acb5145a75b3613d0 100644 (file)
@@ -9,6 +9,9 @@ import "strings"
 func (c PostgreSQLConnection) String() string {
        s := ""
        for k, v := range c {
+               if v == "" {
+                       continue
+               }
                s += strings.ToLower(k)
                s += "='"
                s += strings.Replace(
index 4f648e9b437e7b5eead7abf4b0db302011725cb7..95b83265a05a4835363975cf8720157c0e9171da 100644 (file)
@@ -26,7 +26,6 @@ const (
        FooBarDirCollection     = "zzzzz-4zz18-foonbarfilesdir"
        WazVersion1Collection   = "zzzzz-4zz18-25k12570yk1ver1"
        UserAgreementPDH        = "b519d9cb706a29fc7ea24dbea2f05851+93"
-       FooPdh                  = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
        HelloWorldPdh           = "55713e6a34081eb03609e7ad5fcad129+62"
 
        AProjectUUID    = "zzzzz-j7d0g-v955i6s2oi1cbso"
index 89925a957d381b322f51c528cebea7a2d45fb222..80735f86eb613d86d3b57f0a41af3813de6d6aa1 100644 (file)
@@ -6,6 +6,9 @@ package arvadostest
 
 import (
        "net/http"
+       "net/url"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
 )
 
 // StubResponse struct with response status and body
@@ -37,3 +40,22 @@ func (stub *ServerStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
                resp.Write([]byte(``))
        }
 }
+
+// SetServiceURL overrides the given service config/discovery with the
+// given internalURLs.
+//
+// ExternalURL is set to the last internalURL, which only aims to
+// address the case where there is only one.
+//
+// SetServiceURL panics on errors.
+func SetServiceURL(service *arvados.Service, internalURLs ...string) {
+       service.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+       for _, u := range internalURLs {
+               u, err := url.Parse(u)
+               if err != nil {
+                       panic(err)
+               }
+               service.InternalURLs[arvados.URL(*u)] = arvados.ServiceInstance{}
+               service.ExternalURL = arvados.URL(*u)
+       }
+}
index 564331327a8d53ad250b044112f25e1b07730444..acfdbb7f8fc713517628314367a65234dc7fbd3d 100644 (file)
@@ -9,8 +9,8 @@ import (
        "encoding/json"
        "errors"
        "fmt"
-       "net"
        "net/http"
+       "net/url"
        "sync"
        "time"
 
@@ -28,7 +28,7 @@ type Aggregator struct {
        httpClient *http.Client
        timeout    arvados.Duration
 
-       Config *arvados.Config
+       Cluster *arvados.Cluster
 
        // If non-nil, Log is called after handling each request.
        Log func(*http.Request, error)
@@ -42,6 +42,10 @@ func (agg *Aggregator) setup() {
        }
 }
 
+func (agg *Aggregator) CheckHealth() error {
+       return nil
+}
+
 func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        agg.setupOnce.Do(agg.setup)
        sendErr := func(statusCode int, err error) {
@@ -54,13 +58,7 @@ func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
 
        resp.Header().Set("Content-Type", "application/json")
 
-       cluster, err := agg.Config.GetCluster("")
-       if err != nil {
-               err = fmt.Errorf("arvados.GetCluster(): %s", err)
-               sendErr(http.StatusInternalServerError, err)
-               return
-       }
-       if !agg.checkAuth(req, cluster) {
+       if !agg.checkAuth(req) {
                sendErr(http.StatusUnauthorized, errUnauthorized)
                return
        }
@@ -68,7 +66,7 @@ func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
                sendErr(http.StatusNotFound, errNotFound)
                return
        }
-       json.NewEncoder(resp).Encode(agg.ClusterHealth(cluster))
+       json.NewEncoder(resp).Encode(agg.ClusterHealth())
        if agg.Log != nil {
                agg.Log(req, nil)
        }
@@ -104,7 +102,7 @@ type ServiceHealth struct {
        N      int    `json:"n"`
 }
 
-func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResponse {
+func (agg *Aggregator) ClusterHealth() ClusterHealthResponse {
        resp := ClusterHealthResponse{
                Health:   "OK",
                Checks:   make(map[string]CheckResult),
@@ -113,46 +111,41 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
 
        mtx := sync.Mutex{}
        wg := sync.WaitGroup{}
-       for profileName, profile := range cluster.NodeProfiles {
-               for svc, addr := range profile.ServicePorts() {
-                       // Ensure svc is listed in resp.Services.
-                       mtx.Lock()
-                       if _, ok := resp.Services[svc]; !ok {
-                               resp.Services[svc] = ServiceHealth{Health: "ERROR"}
-                       }
-                       mtx.Unlock()
-
-                       if addr == "" {
-                               // svc is not expected on this node.
-                               continue
-                       }
+       for svcName, svc := range agg.Cluster.Services.Map() {
+               // Ensure svc is listed in resp.Services.
+               mtx.Lock()
+               if _, ok := resp.Services[svcName]; !ok {
+                       resp.Services[svcName] = ServiceHealth{Health: "ERROR"}
+               }
+               mtx.Unlock()
 
+               for addr := range svc.InternalURLs {
                        wg.Add(1)
-                       go func(profileName string, svc arvados.ServiceName, addr string) {
+                       go func(svcName arvados.ServiceName, addr arvados.URL) {
                                defer wg.Done()
                                var result CheckResult
-                               url, err := agg.pingURL(profileName, addr)
+                               pingURL, err := agg.pingURL(addr)
                                if err != nil {
                                        result = CheckResult{
                                                Health: "ERROR",
                                                Error:  err.Error(),
                                        }
                                } else {
-                                       result = agg.ping(url, cluster)
+                                       result = agg.ping(pingURL)
                                }
 
                                mtx.Lock()
                                defer mtx.Unlock()
-                               resp.Checks[fmt.Sprintf("%s+%s", svc, url)] = result
+                               resp.Checks[fmt.Sprintf("%s+%s", svcName, pingURL)] = result
                                if result.Health == "OK" {
-                                       h := resp.Services[svc]
+                                       h := resp.Services[svcName]
                                        h.N++
                                        h.Health = "OK"
-                                       resp.Services[svc] = h
+                                       resp.Services[svcName] = h
                                } else {
                                        resp.Health = "ERROR"
                                }
-                       }(profileName, svc, addr)
+                       }(svcName, addr)
                }
        }
        wg.Wait()
@@ -168,12 +161,12 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
        return resp
 }
 
-func (agg *Aggregator) pingURL(node, addr string) (string, error) {
-       _, port, err := net.SplitHostPort(addr)
-       return "http://" + node + ":" + port + "/_health/ping", err
+func (agg *Aggregator) pingURL(svcURL arvados.URL) (*url.URL, error) {
+       base := url.URL(svcURL)
+       return base.Parse("/_health/ping")
 }
 
-func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckResult) {
+func (agg *Aggregator) ping(target *url.URL) (result CheckResult) {
        t0 := time.Now()
 
        var err error
@@ -186,11 +179,11 @@ func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckR
                }
        }()
 
-       req, err := http.NewRequest("GET", url, nil)
+       req, err := http.NewRequest("GET", target.String(), nil)
        if err != nil {
                return
        }
-       req.Header.Set("Authorization", "Bearer "+cluster.ManagementToken)
+       req.Header.Set("Authorization", "Bearer "+agg.Cluster.ManagementToken)
 
        ctx, cancel := context.WithTimeout(req.Context(), time.Duration(agg.timeout))
        defer cancel()
@@ -216,10 +209,10 @@ func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckR
        return
 }
 
-func (agg *Aggregator) checkAuth(req *http.Request, cluster *arvados.Cluster) bool {
+func (agg *Aggregator) checkAuth(req *http.Request) bool {
        creds := auth.CredentialsFromRequest(req)
        for _, token := range creds.Tokens {
-               if token != "" && token == cluster.ManagementToken {
+               if token != "" && token == agg.Cluster.ManagementToken {
                        return true
                }
        }
index 122355be987755b161d38a2e46e0bc2cc4f52208..3ede3b983a5a97e69164f5704e5d91c4cf49b02f 100644 (file)
@@ -30,13 +30,8 @@ func (s *AggregatorSuite) TestInterface(c *check.C) {
 }
 
 func (s *AggregatorSuite) SetUpTest(c *check.C) {
-       s.handler = &Aggregator{Config: &arvados.Config{
-               Clusters: map[string]arvados.Cluster{
-                       "zzzzz": {
-                               ManagementToken: arvadostest.ManagementToken,
-                               NodeProfiles:    map[string]arvados.NodeProfile{},
-                       },
-               },
+       s.handler = &Aggregator{Cluster: &arvados.Cluster{
+               ManagementToken: arvadostest.ManagementToken,
        }}
        s.req = httptest.NewRequest("GET", "/_health/all", nil)
        s.req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
@@ -57,9 +52,9 @@ func (s *AggregatorSuite) TestBadAuth(c *check.C) {
        c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
 }
 
-func (s *AggregatorSuite) TestEmptyConfig(c *check.C) {
+func (s *AggregatorSuite) TestNoServicesConfigured(c *check.C) {
        s.handler.ServeHTTP(s.resp, s.req)
-       s.checkOK(c)
+       s.checkUnhealthy(c)
 }
 
 func (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, string) {
@@ -73,51 +68,18 @@ func (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, st
        return srv, ":" + port
 }
 
-type unhealthyHandler struct{}
-
-func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
-       if req.URL.Path == "/_health/ping" {
-               resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
-       } else {
-               http.Error(resp, "not found", http.StatusNotFound)
-       }
-}
-
 func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
        srv, listen := s.stubServer(&unhealthyHandler{})
        defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
-               Keepstore: arvados.SystemServiceInstance{Listen: listen},
-       }
+       arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listen+"/")
        s.handler.ServeHTTP(s.resp, s.req)
        s.checkUnhealthy(c)
 }
 
-type healthyHandler struct{}
-
-func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
-       if req.URL.Path == "/_health/ping" {
-               resp.Write([]byte(`{"health":"OK"}`))
-       } else {
-               http.Error(resp, "not found", http.StatusNotFound)
-       }
-}
-
 func (s *AggregatorSuite) TestHealthy(c *check.C) {
        srv, listen := s.stubServer(&healthyHandler{})
        defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
-               Controller:    arvados.SystemServiceInstance{Listen: listen},
-               DispatchCloud: arvados.SystemServiceInstance{Listen: listen},
-               Keepbalance:   arvados.SystemServiceInstance{Listen: listen},
-               Keepproxy:     arvados.SystemServiceInstance{Listen: listen},
-               Keepstore:     arvados.SystemServiceInstance{Listen: listen},
-               Keepweb:       arvados.SystemServiceInstance{Listen: listen},
-               Nodemanager:   arvados.SystemServiceInstance{Listen: listen},
-               RailsAPI:      arvados.SystemServiceInstance{Listen: listen},
-               Websocket:     arvados.SystemServiceInstance{Listen: listen},
-               Workbench:     arvados.SystemServiceInstance{Listen: listen},
-       }
+       s.setAllServiceURLs(listen)
        s.handler.ServeHTTP(s.resp, s.req)
        resp := s.checkOK(c)
        svc := "keepstore+http://localhost" + listen + "/_health/ping"
@@ -132,21 +94,8 @@ func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
        defer srvH.Close()
        srvU, listenU := s.stubServer(&unhealthyHandler{})
        defer srvU.Close()
-       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
-               Controller:    arvados.SystemServiceInstance{Listen: listenH},
-               DispatchCloud: arvados.SystemServiceInstance{Listen: listenH},
-               Keepbalance:   arvados.SystemServiceInstance{Listen: listenH},
-               Keepproxy:     arvados.SystemServiceInstance{Listen: listenH},
-               Keepstore:     arvados.SystemServiceInstance{Listen: listenH},
-               Keepweb:       arvados.SystemServiceInstance{Listen: listenH},
-               Nodemanager:   arvados.SystemServiceInstance{Listen: listenH},
-               RailsAPI:      arvados.SystemServiceInstance{Listen: listenH},
-               Websocket:     arvados.SystemServiceInstance{Listen: listenH},
-               Workbench:     arvados.SystemServiceInstance{Listen: listenH},
-       }
-       s.handler.Config.Clusters["zzzzz"].NodeProfiles["127.0.0.1"] = arvados.NodeProfile{
-               Keepstore: arvados.SystemServiceInstance{Listen: listenU},
-       }
+       s.setAllServiceURLs(listenH)
+       arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listenH+"/", "http://127.0.0.1"+listenU+"/")
        s.handler.ServeHTTP(s.resp, s.req)
        resp := s.checkUnhealthy(c)
        ep := resp.Checks["keepstore+http://localhost"+listenH+"/_health/ping"]
@@ -158,10 +107,25 @@ func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
        c.Logf("%#v", ep)
 }
 
+func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
+       s.handler.timeout = arvados.Duration(100 * time.Millisecond)
+       srv, listen := s.stubServer(&slowHandler{})
+       defer srv.Close()
+       arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listen+"/")
+       s.handler.ServeHTTP(s.resp, s.req)
+       resp := s.checkUnhealthy(c)
+       ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
+       c.Check(ep.Health, check.Equals, "ERROR")
+       c.Check(ep.HTTPStatusCode, check.Equals, 0)
+       rt, err := ep.ResponseTime.Float64()
+       c.Check(err, check.IsNil)
+       c.Check(rt > 0.005, check.Equals, true)
+}
+
 func (s *AggregatorSuite) checkError(c *check.C) {
        c.Check(s.resp.Code, check.Not(check.Equals), http.StatusOK)
        var resp ClusterHealthResponse
-       err := json.NewDecoder(s.resp.Body).Decode(&resp)
+       err := json.Unmarshal(s.resp.Body.Bytes(), &resp)
        c.Check(err, check.IsNil)
        c.Check(resp.Health, check.Not(check.Equals), "OK")
 }
@@ -177,36 +141,60 @@ func (s *AggregatorSuite) checkOK(c *check.C) ClusterHealthResponse {
 func (s *AggregatorSuite) checkResult(c *check.C, health string) ClusterHealthResponse {
        c.Check(s.resp.Code, check.Equals, http.StatusOK)
        var resp ClusterHealthResponse
-       err := json.NewDecoder(s.resp.Body).Decode(&resp)
+       c.Log(s.resp.Body.String())
+       err := json.Unmarshal(s.resp.Body.Bytes(), &resp)
        c.Check(err, check.IsNil)
        c.Check(resp.Health, check.Equals, health)
        return resp
 }
 
-type slowHandler struct{}
+func (s *AggregatorSuite) setAllServiceURLs(listen string) {
+       svcs := &s.handler.Cluster.Services
+       for _, svc := range []*arvados.Service{
+               &svcs.Controller,
+               &svcs.DispatchCloud,
+               &svcs.Keepbalance,
+               &svcs.Keepproxy,
+               &svcs.Keepstore,
+               &svcs.Health,
+               &svcs.Nodemanager,
+               &svcs.RailsAPI,
+               &svcs.WebDAV,
+               &svcs.Websocket,
+               &svcs.Workbench1,
+               &svcs.Workbench2,
+       } {
+               arvadostest.SetServiceURL(svc, "http://localhost"+listen+"/")
+       }
+}
 
-func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+type unhealthyHandler struct{}
+
+func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/_health/ping" {
+               resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
+       } else {
+               http.Error(resp, "not found", http.StatusNotFound)
+       }
+}
+
+type healthyHandler struct{}
+
+func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        if req.URL.Path == "/_health/ping" {
-               time.Sleep(3 * time.Second)
                resp.Write([]byte(`{"health":"OK"}`))
        } else {
                http.Error(resp, "not found", http.StatusNotFound)
        }
 }
 
-func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
-       s.handler.timeout = arvados.Duration(100 * time.Millisecond)
-       srv, listen := s.stubServer(&slowHandler{})
-       defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
-               Keepstore: arvados.SystemServiceInstance{Listen: listen},
+type slowHandler struct{}
+
+func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/_health/ping" {
+               time.Sleep(3 * time.Second)
+               resp.Write([]byte(`{"health":"OK"}`))
+       } else {
+               http.Error(resp, "not found", http.StatusNotFound)
        }
-       s.handler.ServeHTTP(s.resp, s.req)
-       resp := s.checkUnhealthy(c)
-       ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
-       c.Check(ep.Health, check.Equals, "ERROR")
-       c.Check(ep.HTTPStatusCode, check.Equals, 0)
-       rt, err := ep.ResponseTime.Float64()
-       c.Check(err, check.IsNil)
-       c.Check(rt > 0.005, check.Equals, true)
 }
index 7f400ceefcdfe810ab6507047e508757f4ce1630..dd78d416b2d3ce13f14e48eedf13c723a488505f 100644 (file)
@@ -17,7 +17,7 @@ def pam_sm_authenticate(pamh, flags, argv):
 
     try:
         username = pamh.get_user(None)
-    except pamh.exception, e:
+    except pamh.exception as e:
         return e.pam_result
 
     if not username:
index c94f5b41f5b674beaf7b41989d6ec6f0a84329e9..9bc2cef60169b674a51d6ae3cffc24662aaa78df 100755 (executable)
@@ -42,13 +42,6 @@ setup(name='arvados-pam',
           ('share/pam-configs', ['pam-configs/arvados']),
           ('share/doc/arvados-pam', ['LICENSE-2.0.txt', 'README.rst']),
           ('share/doc/arvados-pam/examples', glob.glob('examples/*')),
-
-          # The arvados build scripts used to install data files to
-          # "/usr/data/*" but now install them to "/usr/*". Here, we
-          # install an extra copy in the old location so existing pam
-          # configs can still work. When old systems have had a chance
-          # to update to the new paths, this line can be removed.
-          ('data/lib/security', ['lib/libpam_arvados.py']),
       ],
       install_requires=[
           'arvados-python-client>=0.1.20150801000000',
diff --git a/sdk/python/arvados/commands/federation_migrate.py b/sdk/python/arvados/commands/federation_migrate.py
new file mode 100755 (executable)
index 0000000..1daf6be
--- /dev/null
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.util
+import arvados.errors
+import csv
+import sys
+import argparse
+import hmac
+import urllib.parse
+
+def main():
+
+    parser = argparse.ArgumentParser(description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html')
+    parser.add_argument('--tokens', type=str, required=True)
+    group = parser.add_mutually_exclusive_group(required=True)
+    group.add_argument('--report', type=str, help="Generate report .csv file listing users by email address and their associated Arvados accounts")
+    group.add_argument('--migrate', type=str, help="Consume report .csv and migrate users to designated Arvados accounts")
+    group.add_argument('--check', action="store_true", help="Check that tokens are usable and the federation is well connected")
+    args = parser.parse_args()
+
+    clusters = {}
+    errors = []
+    print("Reading %s" % args.tokens)
+    with open(args.tokens, "rt") as f:
+        for r in csv.reader(f):
+            host = r[0]
+            token = r[1]
+            print("Contacting %s" % (host))
+            arv = arvados.api(host=host, token=token, cache=False)
+            try:
+                cur = arv.users().current().execute()
+                arv.api_client_authorizations().list(limit=1).execute()
+            except arvados.errors.ApiError as e:
+                errors.append("checking token for %s: %s" % (host, e))
+                errors.append('    This script requires a token issued to a trusted client in order to manipulate access tokens.')
+                errors.append('    See "Trusted client setting" in https://doc.arvados.org/install/install-workbench-app.html')
+                errors.append('    and https://doc.arvados.org/api/tokens.html')
+                continue
+
+            if not cur["is_admin"]:
+                errors.append("Not admin of %s" % host)
+                continue
+
+            clusters[arv._rootDesc["uuidPrefix"]] = arv
+
+
+    print("Checking that the federation is well connected")
+    for v in clusters.values():
+        for r in clusters:
+            if r != v._rootDesc["uuidPrefix"] and r not in v._rootDesc["remoteHosts"]:
+                errors.append("%s is missing from remoteHosts of %s" % (r, v._rootDesc["uuidPrefix"]))
+        for r in v._rootDesc["remoteHosts"]:
+            if r != "*" and r not in clusters:
+                print("WARNING: %s is federated with %s but %s is missing from the tokens file or the token is invalid" % (v._rootDesc["uuidPrefix"], r, r))
+
+    if errors:
+        for e in errors:
+            print("ERROR: "+str(e))
+        exit(1)
+
+    if args.check:
+        print("Tokens file passed checks")
+        exit(0)
+
+    if args.report:
+        users = []
+        for c, arv in clusters.items():
+            print("Getting user list from %s" % c)
+            ul = arvados.util.list_all(arv.users().list)
+            for l in ul:
+                if l["uuid"].startswith(c):
+                    users.append(l)
+
+        out = csv.writer(open(args.report, "wt"))
+
+        out.writerow(("email", "user uuid", "primary cluster/user"))
+
+        users = sorted(users, key=lambda u: u["email"]+"::"+u["uuid"])
+
+        accum = []
+        lastemail = None
+        for u in users:
+            if u["uuid"].endswith("-anonymouspublic") or u["uuid"].endswith("-000000000000000"):
+                continue
+            if lastemail == None:
+                lastemail = u["email"]
+            if u["email"] == lastemail:
+                accum.append(u)
+            else:
+                homeuuid = None
+                for a in accum:
+                    if homeuuid is None:
+                        homeuuid = a["uuid"]
+                    if a["uuid"] != homeuuid:
+                        homeuuid = ""
+                for a in accum:
+                    out.writerow((a["email"], a["uuid"], homeuuid[0:5]))
+                lastemail = u["email"]
+                accum = [u]
+
+        homeuuid = None
+        for a in accum:
+            if homeuuid is None:
+                homeuuid = a["uuid"]
+            if a["uuid"] != homeuuid:
+                homeuuid = ""
+        for a in accum:
+            out.writerow((a["email"], a["uuid"], homeuuid[0:5]))
+
+        print("Wrote %s" % args.report)
+
+    if args.migrate:
+        rows = []
+        by_email = {}
+        with open(args.migrate, "rt") as f:
+            for r in csv.reader(f):
+                if r[0] == "email":
+                    continue
+                by_email.setdefault(r[0], [])
+                by_email[r[0]].append(r)
+                rows.append(r)
+        for r in rows:
+            email = r[0]
+            old_user_uuid = r[1]
+            userhome = r[2]
+
+            if userhome == "":
+                print("(%s) Skipping %s, no home cluster specified" % (email, old_user_uuid))
+            if old_user_uuid.startswith(userhome):
+                continue
+            candidates = []
+            for b in by_email[email]:
+                if b[1].startswith(userhome):
+                    candidates.append(b)
+            if len(candidates) == 0:
+                if len(userhome) == 5 and userhome not in clusters:
+                    print("(%s) Cannot migrate %s, unknown home cluster %s (typo?)" % (email, old_user_uuid, userhome))
+                else:
+                    print("(%s) No user listed with same email to migrate %s to %s" % (email, old_user_uuid, userhome))
+                continue
+            if len(candidates) > 1:
+                print("(%s) Multiple users listed to migrate %s to %s, use full uuid" % (email, old_user_uuid, userhome))
+                continue
+            new_user_uuid = candidates[0][1]
+
+            # cluster where the migration is happening
+            migratecluster = old_user_uuid[0:5]
+            migratearv = clusters[migratecluster]
+
+            # the user's new home cluster
+            newhomecluster = userhome[0:5]
+            homearv = clusters[newhomecluster]
+
+            # create a token for the new user and salt it for the
+            # migration cluster, then use it to access the migration
+            # cluster as the new user once before merging to ensure
+            # the new user is known on that cluster.
+            try:
+                newtok = homearv.api_client_authorizations().create(body={
+                    "api_client_authorization": {'owner_uuid': new_user_uuid}}).execute()
+            except arvados.errors.ApiError as e:
+                print("(%s) Could not create API token for %s: %s" % (email, new_user_uuid, e))
+                continue
+
+            salted = 'v2/' + newtok["uuid"] + '/' + hmac.new(newtok["api_token"].encode(),
+                                                             msg=migratecluster.encode(),
+                                                             digestmod='sha1').hexdigest()
+            try:
+                ru = urllib.parse.urlparse(migratearv._rootDesc["rootUrl"])
+                newuser = arvados.api(host=ru.netloc, token=salted).users().current().execute()
+            except arvados.errors.ApiError as e:
+                print("(%s) Error getting user info for %s from %s: %s" % (email, new_user_uuid, migratecluster, e))
+                continue
+
+            try:
+                olduser = migratearv.users().get(uuid=old_user_uuid).execute()
+            except arvados.errors.ApiError as e:
+                print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
+                continue
+
+            if not newuser["is_active"]:
+                print("(%s) Activating user %s on %s" % (email, new_user_uuid, migratecluster))
+                try:
+                    migratearv.users().update(uuid=new_user_uuid, body={"is_active": True}).execute()
+                except arvados.errors.ApiError as e:
+                    print("(%s) Could not activate user %s on %s: %s" % (email, new_user_uuid, migratecluster, e))
+                    continue
+
+            if olduser["is_admin"] and not newuser["is_admin"]:
+                print("(%s) Not migrating %s because user is admin but target user %s is not admin on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+                continue
+
+            print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+
+            try:
+                grp = migratearv.groups().create(body={
+                    "owner_uuid": new_user_uuid,
+                    "name": "Migrated from %s (%s)" % (email, old_user_uuid),
+                    "group_class": "project"
+                }, ensure_unique_name=True).execute()
+                migratearv.users().merge(old_user_uuid=old_user_uuid,
+                                         new_user_uuid=new_user_uuid,
+                                         new_owner_uuid=grp["uuid"],
+                                         redirect_to_new_user=True).execute()
+            except arvados.errors.ApiError as e:
+                print("(%s) Error migrating user: %s" % (email, e))
+
+if __name__ == "__main__":
+    main()
diff --git a/sdk/python/bin/arv-federation-migrate b/sdk/python/bin/arv-federation-migrate
new file mode 100755 (executable)
index 0000000..a4c0974
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.federation_migrate import main
+main()
index ffca23495c475828184b31e355cfdaa42b6e0bdc..0fc2dde31b8a7851ff3db066062f068ebdbc9ff7 100644 (file)
@@ -37,6 +37,7 @@ setup(name='arvados-python-client',
           'bin/arv-keepdocker',
           'bin/arv-ls',
           'bin/arv-migrate-docker19',
+          'bin/arv-federation-migrate',
           'bin/arv-normalize',
           'bin/arv-put',
           'bin/arv-run',
index 6687ca491a769140aa8c803a5fd2b1a6ce3b1850..fea0578abdeca4d70a87cfadf0b9fe95825eb866 100644 (file)
@@ -413,29 +413,32 @@ def run_controller():
         f.write("""
 Clusters:
   zzzzz:
-    HTTPRequestTimeout: 30s
+    ManagementToken: e687950a23c3a9bceec28c6223a06c79
+    API:
+      RequestTimeout: 30s
     PostgreSQL:
       ConnectionPool: 32
       Connection:
-        host: {}
-        dbname: {}
-        user: {}
-        password: {}
-    NodeProfiles:
-      "*":
-        "arvados-controller":
-          Listen: ":{}"
-        "arvados-api-server":
-          Listen: ":{}"
-          TLS: true
-          Insecure: true
+        host: {dbhost}
+        dbname: {dbname}
+        user: {dbuser}
+        password: {dbpass}
+    TLS:
+      Insecure: true
+    Services:
+      Controller:
+        InternalURLs:
+          "http://localhost:{controllerport}": {{}}
+      RailsAPI:
+        InternalURLs:
+          "https://localhost:{railsport}": {{}}
         """.format(
-            _dbconfig('host'),
-            _dbconfig('database'),
-            _dbconfig('username'),
-            _dbconfig('password'),
-            port,
-            rails_api_port,
+            dbhost=_dbconfig('host'),
+            dbname=_dbconfig('database'),
+            dbuser=_dbconfig('username'),
+            dbpass=_dbconfig('password'),
+            controllerport=port,
+            railsport=rails_api_port,
         ))
     logf = open(_logfilename('controller'), 'a')
     controller = subprocess.Popen(
@@ -632,6 +635,7 @@ def run_arv_git_httpd():
     agh = subprocess.Popen(
         ['arv-git-httpd',
          '-repo-root='+gitdir+'/test',
+         '-management-token=e687950a23c3a9bceec28c6223a06c79',
          '-address=:'+str(gitport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('arv-git-httpd'), 'w') as f:
@@ -657,6 +661,7 @@ def run_keep_web():
         ['keep-web',
          '-allow-anonymous',
          '-attachment-only-host=download',
+         '-management-token=e687950a23c3a9bceec28c6223a06c79',
          '-listen=:'+str(keepwebport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('keep-web'), 'w') as f:
index 6d42956940972da3ba776637345b6f321258a769..804d2a479d3d4701489c8d1bed812c7a6873c252 100644 (file)
@@ -71,6 +71,8 @@ gem 'rails-observers'
 gem 'rails-perftest'
 gem 'rails-controller-testing'
 
+gem 'sass-rails'
+
 # Install any plugin gems
 Dir.glob(File.join(File.dirname(__FILE__), 'lib', '**', "Gemfile")) do |f|
     eval(IO.read(f), binding)
index 13f7564be86576c231d35a6f04da416738a59954..078b2b7f418d1e94ca2b4ab424be702cdad1197b 100644 (file)
@@ -110,6 +110,7 @@ GEM
     faye-websocket (0.10.7)
       eventmachine (>= 0.12.0)
       websocket-driver (>= 0.5.1)
+    ffi (1.9.25)
     globalid (0.4.2)
       activesupport (>= 4.2.0)
     googleauth (0.8.0)
@@ -220,6 +221,9 @@ GEM
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
     rake (12.3.2)
+    rb-fsevent (0.10.3)
+    rb-inotify (0.9.10)
+      ffi (>= 0.5.0, < 2)
     ref (2.0.0)
     request_store (1.4.1)
       rack (>= 1.4)
@@ -231,6 +235,17 @@ GEM
     rvm-capistrano (1.5.6)
       capistrano (~> 2.15.4)
     safe_yaml (1.0.5)
+    sass (3.5.5)
+      sass-listen (~> 4.0.0)
+    sass-listen (4.0.0)
+      rb-fsevent (~> 0.9, >= 0.9.4)
+      rb-inotify (~> 0.9, >= 0.9.7)
+    sass-rails (5.0.7)
+      railties (>= 4.0.0, < 6)
+      sass (~> 3.1)
+      sprockets (>= 2.8, < 4.0)
+      sprockets-rails (>= 2.0, < 4.0)
+      tilt (>= 1.1, < 3)
     signet (0.11.0)
       addressable (~> 2.3)
       faraday (~> 0.9)
@@ -257,6 +272,7 @@ GEM
       ref
     thor (0.20.3)
     thread_safe (0.3.6)
+    tilt (2.0.8)
     tzinfo (1.2.5)
       thread_safe (~> 0.1)
     uglifier (2.7.2)
@@ -299,6 +315,7 @@ DEPENDENCIES
   ruby-prof (~> 0.15.0)
   rvm-capistrano
   safe_yaml
+  sass-rails
   simplecov (~> 0.7.1)
   simplecov-rcov
   sshkey
index 4db96efabdb542ad26ca3cb66a9320854bd73035..c511f0ec514289d128cdc4beb6aedfe2c0bb3863 100644 (file)
Binary files a/services/api/app/assets/images/logo.png and b/services/api/app/assets/images/logo.png differ
index 742a575a93bb95a31791438936df00e99c97b0c4..721ff801c91c5aba35337fbd7a8efab7729d9f14 100644 (file)
@@ -7,7 +7,7 @@ SPDX-License-Identifier: AGPL-3.0 */
  * and any sub-directories. You're free to add application-wide styles to this file and they'll appear at
  * the top of the compiled file, but it's generally better to create a new file per style scope.
  *= require_self
- *= require_tree . 
+ *= require_tree .
 */
 
 .contain-align-left {
@@ -63,8 +63,7 @@ div#header span.beta > span {
     font-size: 0.8em;
 }
 img.curoverse-logo {
-    width: 221px;
-    height: 44px;
+    height: 66px;
 }
 #intropage {
     font-family: Verdana,Arial,sans-serif;
@@ -180,4 +179,3 @@ div#header a.sudo-logout {
     color: #000;
     font-weight: bold;
 }
-
index 77e3c75af28ad37531cd7ea3eb186fb149a3c679..89eabdcee2de14bd1d1ac1f38308a828e3c0f05c 100644 (file)
@@ -468,11 +468,21 @@ class ApplicationController < ActionController::Base
   end
 
   def load_json_value(hash, key, must_be_class=nil)
-    if hash[key].is_a? String
-      hash[key] = SafeJSON.load(hash[key])
-      if must_be_class and !hash[key].is_a? must_be_class
-        raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
-      end
+    return if hash[key].nil?
+
+    val = hash[key]
+    if val.is_a? ActionController::Parameters
+      val = val.to_unsafe_hash
+    elsif val.is_a? String
+      val = SafeJSON.load(val)
+      hash[key] = val
+    end
+    # When assigning a Hash to an ActionController::Parameters and then
+    # retrieve it, we get another ActionController::Parameters instead of
+    # a Hash. This doesn't happen with other types. This is why 'val' is
+    # being used to do type checking below.
+    if must_be_class and !val.is_a? must_be_class
+      raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
     end
   end
 
@@ -482,7 +492,7 @@ class ApplicationController < ActionController::Base
   accept_attribute_as_json :properties, Hash
   accept_attribute_as_json :info, Hash
   def accept_attribute_as_json(attr, must_be_class)
-    if params[resource_name] and resource_attrs.is_a? Hash
+    if params[resource_name] and [Hash, ActionController::Parameters].include?(resource_attrs.class)
       if resource_attrs[attr].is_a? Hash
         # Convert symbol keys to strings (in hashes provided by
         # resource_attrs)
index 13e47f76cdf88b17c2ee659dafce1417678ab2ee..313fe5d0a086241ba9ca9cef95c8e29db60ad843 100644 (file)
@@ -26,7 +26,7 @@ class Arvados::V1::SchemaController < ApplicationController
     Rails.cache.fetch 'arvados_v1_rest_discovery' do
       Rails.application.eager_load!
       remoteHosts = {}
-      Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end }
+      Rails.configuration.RemoteClusters.each {|k,v| if k != :"*" then remoteHosts[k] = v["Host"] end }
       discovery = {
         kind: "discovery#restDescription",
         discoveryVersion: "v1",
@@ -67,6 +67,7 @@ class Arvados::V1::SchemaController < ApplicationController
         remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
         websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
         workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
+        workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,
         keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
         gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
         parameters: {
index 18b6b46d2678df8b8094c5f273d8db55a7f9e2a3..4a345f363be8da15055f52d54dcfb929f6687298 100644 (file)
@@ -126,37 +126,65 @@ class Arvados::V1::UsersController < ApplicationController
   end
 
   def merge
-    if !Thread.current[:api_client].andand.is_trusted
-      return send_error("supplied API token is not from a trusted client", status: 403)
-    elsif Thread.current[:api_client_authorization].scopes != ['all']
-      return send_error("cannot merge with a scoped token", status: 403)
-    end
+    if (params[:old_user_uuid] || params[:new_user_uuid])
+      if !current_user.andand.is_admin
+        return send_error("Must be admin to use old_user_uuid/new_user_uuid", status: 403)
+      end
+      if !params[:old_user_uuid] || !params[:new_user_uuid]
+        return send_error("Must supply both old_user_uuid and new_user_uuid", status: 422)
+      end
+      new_user = User.find_by_uuid(params[:new_user_uuid])
+      if !new_user
+        return send_error("User in new_user_uuid not found", status: 422)
+      end
+      @object = User.find_by_uuid(params[:old_user_uuid])
+      if !@object
+        return send_error("User in old_user_uuid not found", status: 422)
+      end
+    else
+      if !Thread.current[:api_client].andand.is_trusted
+        return send_error("supplied API token is not from a trusted client", status: 403)
+      elsif Thread.current[:api_client_authorization].scopes != ['all']
+        return send_error("cannot merge with a scoped token", status: 403)
+      end
 
-    new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
-    if !new_auth
-      return send_error("invalid new_user_token", status: 401)
-    end
-    if !new_auth.api_client.andand.is_trusted
-      return send_error("supplied new_user_token is not from a trusted client", status: 403)
-    elsif new_auth.scopes != ['all']
-      return send_error("supplied new_user_token has restricted scope", status: 403)
+      new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
+      if !new_auth
+        return send_error("invalid new_user_token", status: 401)
+      end
+
+      if new_auth.user.uuid[0..4] == Rails.configuration.ClusterID
+        if !new_auth.api_client.andand.is_trusted
+          return send_error("supplied new_user_token is not from a trusted client", status: 403)
+        elsif new_auth.scopes != ['all']
+          return send_error("supplied new_user_token has restricted scope", status: 403)
+        end
+      end
+      new_user = new_auth.user
+      @object = current_user
     end
-    new_user = new_auth.user
 
-    if current_user.uuid == new_user.uuid
+    if @object.uuid == new_user.uuid
       return send_error("cannot merge user to self", status: 422)
     end
 
+    if !params[:new_owner_uuid]
+      return send_error("missing new_owner_uuid", status: 422)
+    end
+
     if !new_user.can?(write: params[:new_owner_uuid])
       return send_error("cannot move objects into supplied new_owner_uuid: new user does not have write permission", status: 403)
     end
 
     redirect = params[:redirect_to_new_user]
+    if @object.uuid[0..4] != Rails.configuration.ClusterID && redirect
+      return send_error("cannot merge remote user to other with redirect_to_new_user=true", status: 422)
+    end
+
     if !redirect
       return send_error("merge with redirect_to_new_user=false is not yet supported", status: 422)
     end
 
-    @object = current_user
     act_as_system_user do
       @object.merge(new_owner_uuid: params[:new_owner_uuid], redirect_to_user_uuid: redirect && new_user.uuid)
     end
@@ -171,11 +199,17 @@ class Arvados::V1::UsersController < ApplicationController
         type: 'string', required: true,
       },
       new_user_token: {
-        type: 'string', required: true,
+        type: 'string', required: false,
       },
       redirect_to_new_user: {
         type: 'boolean', required: false,
       },
+      old_user_uuid: {
+        type: 'string', required: false,
+      },
+      new_user_uuid: {
+        type: 'string', required: false,
+      }
     }
   end
 
index 6e18cdd4607bb5aa6e5b49b608f1d15882891167..ef0f8868666dfb3bb786dab263270c8911df45e6 100644 (file)
@@ -80,6 +80,16 @@ class UserSessionsController < ApplicationController
     # For the benefit of functional and integration tests:
     @user = user
 
+    if user.uuid[0..4] != Rails.configuration.ClusterID
+      # Actually a remote user
+      # Send them to their home cluster's login
+      rh = Rails.configuration.RemoteClusters[user.uuid[0..4]]
+      remote, return_to_url = params[:return_to].split(',', 2)
+      @remotehomeurl = "#{rh.Scheme || "https"}://#{rh.Host}/login?remote=#{Rails.configuration.ClusterID}&return_to=#{return_to_url}"
+      render
+      return
+    end
+
     # prevent ArvadosModel#before_create and _update from throwing
     # "unauthorized":
     Thread.current[:user] = user
index 54a4f369d9a058666c826cdd1a19e272962f8f3e..9f9a20fe33fa3b05bb6afc688a419c5934ce72e8 100644 (file)
@@ -51,11 +51,11 @@ class Blob
       timestamp = opts[:expire]
     else
       timestamp = db_current_time.to_i +
-        (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL)
+        (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL.to_i)
     end
     timestamp_hex = timestamp.to_s(16)
     # => "53163cb4"
-    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)
 
     # Generate a signature.
     signature =
@@ -103,7 +103,7 @@ class Blob
     if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
       raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
     end
-    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)
 
     my_signature =
       generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
index e0f653969948d6187865229be31b02f5e31a5294..775ebdb49486861d73f20b97ba562d77656de765 100644 (file)
@@ -33,7 +33,7 @@ class Collection < ArvadosModel
   validate :past_versions_cannot_be_updated, on: :update
   after_validation :set_file_count_and_total_size
   before_save :set_file_names
-  around_update :manage_versioning
+  around_update :manage_versioning, unless: :is_past_version?
 
   api_accessible :user, extend: :common do |t|
     t.add :name
@@ -281,8 +281,11 @@ class Collection < ArvadosModel
       sync_past_versions if syncable_updates.any?
       if snapshot
         snapshot.attributes = self.syncable_updates
-        snapshot.manifest_text = snapshot.signed_manifest_text
-        snapshot.save
+        leave_modified_by_user_alone do
+          act_as_system_user do
+            snapshot.save
+          end
+        end
       end
     end
   end
@@ -304,7 +307,7 @@ class Collection < ArvadosModel
     updates = self.syncable_updates
     Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_was, self.uuid_was).each do |c|
       c.attributes = updates
-      # Use a different validation context to skip the 'old_versions_cannot_be_updated'
+      # Use a different validation context to skip the 'past_versions_cannot_be_updated'
       # validator, as on this case it is legal to update some fields.
       leave_modified_by_user_alone do
         leave_modified_at_alone do
@@ -322,9 +325,17 @@ class Collection < ArvadosModel
     ['uuid', 'owner_uuid', 'delete_at', 'trash_at', 'is_trashed', 'replication_desired', 'storage_classes_desired']
   end
 
+  def is_past_version?
+    # Check for the '_was' values just in case the update operation
+    # includes a change on current_version_uuid or uuid.
+    !(new_record? || self.current_version_uuid_was == self.uuid_was)
+  end
+
   def should_preserve_version?
     return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys))
 
+    return false if self.is_trashed
+
     idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle
     if !self.preserve_version_was &&
       (idle_threshold < 0 ||
@@ -371,7 +382,7 @@ class Collection < ArvadosModel
       return manifest_text
     else
       token = Thread.current[:token]
-      exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL,
+      exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i,
              trash_at].compact.map(&:to_i).min
       self.class.sign_manifest manifest_text, token, exp
     end
@@ -379,7 +390,7 @@ class Collection < ArvadosModel
 
   def self.sign_manifest manifest, token, exp=nil
     if exp.nil?
-      exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
+      exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i
     end
     signing_opts = {
       api_token: token,
@@ -650,9 +661,7 @@ class Collection < ArvadosModel
   end
 
   def past_versions_cannot_be_updated
-    # We check for the '_was' values just in case the update operation
-    # includes a change on current_version_uuid or uuid.
-    if current_version_uuid_was != uuid_was
+    if is_past_version?
       errors.add(:base, "past versions cannot be updated")
       false
     end
@@ -660,7 +669,7 @@ class Collection < ArvadosModel
 
   def versioning_metadata_updates
     valid = true
-    if (current_version_uuid_was == uuid_was) && current_version_uuid_changed?
+    if !is_past_version? && current_version_uuid_changed?
       errors.add(:current_version_uuid, "cannot be updated")
       valid = false
     end
index 3f296be55003328f0aa2f81a396256bf89e72297..02746f64d4cc56bafe1ee429a2ecf4d4ec0049c6 100644 (file)
@@ -12,6 +12,12 @@ class JsonbType
       nil
     end
 
+    def changed_in_place?(raw_old_value, value)
+      # Compare deserialized values for correctness, checking serialized values
+      # may include changes in ordering, inline whitespaces, etc.
+      deserialize(raw_old_value) != value
+    end
+
     def deserialize(value)
       if value.nil?
         self.default_value
index 989a975924c1bedaa143b75fea3ca830157118d2..fc5ae0a49db5b2fb05036a0587759d92378358cf 100644 (file)
@@ -580,7 +580,7 @@ class User < ArvadosModel
     if self.prefs_changed?
       if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
         profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress
-        ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
+        ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address and !profile_notification_address.empty?
       end
     end
   end
index 302859543c5dd246ff29d4f84e2e2b051e091871..a99b6f165dd74864c160df8597c06eecc15f5477 100644 (file)
@@ -5,14 +5,15 @@ SPDX-License-Identifier: AGPL-3.0 %>
 <!DOCTYPE html>
 <html>
 <head>
-  <title>Server</title>
+  <title>Arvados API Server (<%= Rails.configuration.ClusterID %>)</title>
   <%= stylesheet_link_tag    "application" %>
   <%= javascript_include_tag "application" %>
   <%= csrf_meta_tags %>
 </head>
 <body>
 <div id="header">
-  <div class="apptitle">ARVADOS <span class="beta"><span>BETA</span></span></div>
+  <div class="apptitle">ARVADOS</div>
+  <div>(<%= Rails.configuration.ClusterID %>)</div>
   <div style="float:right">
     <% if current_user %>
     <%= current_user.full_name %>
@@ -23,7 +24,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
     &nbsp;&bull;&nbsp;
     <a class="logout" href="/logout">Log out</a>
     <% else %>
-    <a class="logout" href="/auth/joshid">Log in</a>
+      <!--<a class="logout" href="/auth/joshid">Log in</a>-->
     <% end %>
 
     <% if current_user and session[:real_uid] and session[:switch_back_to] and User.find(session[:real_uid].to_i).verify_userswitch_cookie(session[:switch_back_to]) %>
index 0f3141e0eff0b9fd6c873b0921516a7af94dc89f..b3c6e70d907f4e460096ecbb06aaa6ccc7ddcd87 100644 (file)
@@ -17,9 +17,9 @@ $(function(){
 
     <p>Sorry, something went wrong logging you in. Please try again.</p>
 
-    <p style="float:right;margin-top:1em">
-      <a href="/auth/joshid">Log in here.</a>
-    </p>
+    <!--<p style="float:right;margin-top:1em">
+      <a href="/login">Log in here.</a>
+    </p>-->
 
     <div style="clear:both;height:8em"></div>
   </div>
diff --git a/services/api/app/views/user_sessions/create.html.erb b/services/api/app/views/user_sessions/create.html.erb
new file mode 100644 (file)
index 0000000..545c3e5
--- /dev/null
@@ -0,0 +1,13 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div style="width:40em; margin:2em auto 0 auto">
+  <h1>Login redirect</h1>
+  <p>This login is linked to federated user <b><%= @user.email %></b> (<b><%= @user.uuid %></b>) on cluster <b><%= @user.uuid[0..4] %></b>.  You need to log in again on that cluster.</p>
+  <p>After logging in, you will be returned to this cluster (<b><%=Rails.configuration.ClusterID%></b>).</p>
+  <div style="width: 100%">
+    <div style="float: left"><a href="<%=@remotehomeurl%>">Click here log in on cluster <%= @user.uuid[0..4] %>.</a></div>
+    <div style="float: right"><a href="/logout">Cancel</a></div>
+  </div>
+</div>
index 669beb16e50e42e86fc9637594c264624faeae9a..22a8fed58e2fc584333f5f3684e822a7a20ac21c 100644 (file)
@@ -48,6 +48,13 @@ if $arvados_config_defaults.empty?
   raise "Missing #{::Rails.root.to_s}/config/config.default.yml"
 end
 
+def remove_sample_entries(h)
+  return unless h.is_a? Hash
+  h.delete("SAMPLE")
+  h.each { |k, v| remove_sample_entries(v) }
+end
+remove_sample_entries($arvados_config_defaults)
+
 clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first
 $arvados_config_defaults = clusterConfig
 $arvados_config_defaults["ClusterID"] = clusterID
@@ -106,8 +113,8 @@ arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_v
 arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
 arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
 arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
-arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
-arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+arvcfg.declare_config "Collections.BlobSigningTTL", ActiveSupport::Duration, :blob_signature_ttl
+arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Collections.BlobSigning", !v }
 arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
 arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
 arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
@@ -171,13 +178,13 @@ arvcfg.declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
 dbcfg = ConfigLoader.new
 
 dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
-dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
-dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
-dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
-dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
-dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
-dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template
-dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding
+dbcfg.declare_config "PostgreSQL.Connection.host", String, :host
+dbcfg.declare_config "PostgreSQL.Connection.port", String, :port
+dbcfg.declare_config "PostgreSQL.Connection.user", String, :username
+dbcfg.declare_config "PostgreSQL.Connection.password", String, :password
+dbcfg.declare_config "PostgreSQL.Connection.dbname", String, :database
+dbcfg.declare_config "PostgreSQL.Connection.template", String, :template
+dbcfg.declare_config "PostgreSQL.Connection.encoding", String, :encoding
 
 application_config = {}
 %w(application.default application).each do |cfgfile|
@@ -239,16 +246,16 @@ end
 # rails environments.
 #
 if ::Rails.env.to_s == "test" && db_config["test"].nil?
-  $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test"
+  $arvados_config["PostgreSQL"]["Connection"]["dbname"] = "arvados_test"
 end
 
-if $arvados_config["PostgreSQL"]["Connection"]["Password"].empty?
+if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
   raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
 end
 
-dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"]
-if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0
-  dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}"
+dbhost = $arvados_config["PostgreSQL"]["Connection"]["host"]
+if $arvados_config["PostgreSQL"]["Connection"]["port"] != 0
+  dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["port"]}"
 end
 
 #
@@ -257,10 +264,10 @@ end
 # For config migration, we've previously populated the PostgreSQL
 # section of the config from database.yml
 #
-ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:"+
-                      "#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@"+
-                      "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?"+
-                      "template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&"+
+ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["user"]}:"+
+                      "#{$arvados_config["PostgreSQL"]["Connection"]["password"]}@"+
+                      "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["dbname"]}?"+
+                      "template=#{$arvados_config["PostgreSQL"]["Connection"]["template"]}&"+
                       "encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
                       "pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
 
old mode 100755 (executable)
new mode 100644 (file)
index 61f9b2d..c0cd40d
@@ -2,58 +2,16 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-require "arvados/keep"
-require "group_pdhs"
-
 class AddFileInfoToCollection < ActiveRecord::Migration[4.2]
-  def do_batch(pdhs)
-    pdhs_str = ''
-    pdhs.each do |pdh|
-      pdhs_str << "'" << pdh << "'" << ","
-    end
-
-    collections = ActiveRecord::Base.connection.exec_query(
-      "SELECT DISTINCT portable_data_hash, manifest_text FROM collections "\
-      "WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) "
-    )
-
-    collections.rows.each do |row|
-      manifest = Keep::Manifest.new(row[1])
-      ActiveRecord::Base.connection.exec_query("BEGIN")
-      ActiveRecord::Base.connection.exec_query("UPDATE collections SET file_count=#{manifest.files_count}, "\
-                                               "file_size_total=#{manifest.files_size} "\
-                                               "WHERE portable_data_hash='#{row[0]}'")
-      ActiveRecord::Base.connection.exec_query("COMMIT")
-    end
-  end
-
   def up
     add_column :collections, :file_count, :integer, default: 0, null: false
     add_column :collections, :file_size_total, :integer, limit: 8, default: 0, null: false
 
-    distinct_pdh_count = ActiveRecord::Base.connection.exec_query(
-      "SELECT DISTINCT portable_data_hash FROM collections"
-    ).rows.count
-
-    # Generator that queries for all the distinct pdhs greater than last_pdh
-    ordered_pdh_query = lambda { |last_pdh, &block|
-      pdhs = ActiveRecord::Base.connection.exec_query(
-        "SELECT DISTINCT portable_data_hash FROM collections "\
-        "WHERE portable_data_hash > '#{last_pdh}' "\
-        "ORDER BY portable_data_hash LIMIT 1000"
-      )
-      pdhs.rows.each do |row|
-        block.call(row[0])
-      end
-    }
-
-    batch_size_max = 1 << 28 # 256 MiB
-    GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,
-                                                   distinct_pdh_count,
-                                                   batch_size_max,
-                                                   "AddFileInfoToCollection") do |pdhs|
-      do_batch(pdhs)
-    end
+    puts "Collections now have two new columns, file_count and file_size_total."
+    puts "They were initialized with a zero value. If you are upgrading an Arvados"
+    puts "installation, please run the populate-file-info-columns-in-collections.rb"
+    puts "script to populate the columns. If this is a new installation, that is not"
+    puts "necessary."
   end
 
   def down
index e97f65a97397c86474fa69d190e92cd1fabfe8ce..886c8873891c044270313e3563c73e4fe950c5cb 100644 (file)
@@ -44,7 +44,7 @@ module AuditLogs
   end
 
   def self.tidy_in_background
-    max_age = Rails.configuration.AuditLogs.MaxAge
+    max_age = Rails.configuration.AuditLogs.MaxAge.to_i
     max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch
     return if max_age <= 0 || max_batch <= 0
 
index c5c5cdc76933dc833eb0fb5c8b46995152dc503f..2146d9bc379409fbe2bffaac73dd273c29fb93e3 100644 (file)
@@ -11,7 +11,7 @@ namespace :db do
   desc "Remove old container log entries from the logs table"
 
   task delete_old_container_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
index 3c1c049998377ffe79ac9cb3a2b512d34a6834f9..a1ae2226a071b7f9b8ca0e81148e7ac5ba679b08 100644 (file)
@@ -9,7 +9,7 @@
 namespace :db do
   desc "Remove old job stderr entries from the logs table"
   task delete_old_job_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
index f7faabc4c262c990ee20ee60a9cdc829b1bf8214..c99b08513b64a57b046dccea7905ca032bd3b916 100644 (file)
@@ -65,7 +65,7 @@ module Trashable
       earliest_delete = [
         @validation_timestamp,
         trash_at_was,
-      ].compact.min + Rails.configuration.Collections.BlobSigningTTL.seconds
+      ].compact.min + Rails.configuration.Collections.BlobSigningTTL
 
       # The previous value of delete_at is also an upper bound on the
       # longest-lived permission token. For example, if TTL=14,
@@ -96,7 +96,7 @@ module TrashableController
       @object.update_attributes!(trash_at: db_current_time)
     end
     earliest_delete = (@object.trash_at +
-                       Rails.configuration.Collections.BlobSigningTTL.seconds)
+                       Rails.configuration.Collections.BlobSigningTTL)
     if @object.delete_at > earliest_delete
       @object.update_attributes!(delete_at: earliest_delete)
     end
index 21cd74bae67d3cbf94257284aedf05ab20aaad57..c688ac008b44b21944e86b36cdb3abbb15273e12 100644 (file)
@@ -12,14 +12,19 @@ module UpdatePriority
   #
   # If container priority=0 but there are committed container requests
   # for it with priority>0, update priority.
-  def self.update_priority
+  #
+  # Normally, update_priority is a no-op if another thread/process is
+  # already updating. Test cases that need to check priorities after
+  # updating can force a (possibly overlapping) update in the current
+  # thread/transaction by setting the "nolock" flag. See #14878.
+  def self.update_priority(nolock: false)
     if !File.owned?(Rails.root.join('tmp'))
       Rails.logger.warn("UpdatePriority: not owner of #{Rails.root}/tmp, skipping")
       return
     end
     lockfile = Rails.root.join('tmp', 'update_priority.lock')
     File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
-      return unless f.flock(File::LOCK_NB|File::LOCK_EX)
+      return unless nolock || f.flock(File::LOCK_NB|File::LOCK_EX)
 
       # priority>0 but should be 0:
       ActiveRecord::Base.connection.
diff --git a/services/api/script/populate-file-info-columns-in-collections.rb b/services/api/script/populate-file-info-columns-in-collections.rb
new file mode 100755 (executable)
index 0000000..f7cb024
--- /dev/null
@@ -0,0 +1,97 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Arvados version 1.4.0 introduces two new columns on the collections table named
+#   file_count
+#   file_size_total
+#
+# The database migration that adds these columns does not populate them with data,
+# it initializes them set to zero.
+#
+# This script will populate the columns, if file_count is zero. It will ignore
+# collections that have invalid manifests, but it will spit out details for those
+# collections.
+#
+# Run the script as
+#
+# cd scripts
+# RAILS_ENV=production bundle exec populate-file-info-columns-in-collections.rb
+#
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+require "arvados/keep"
+require "group_pdhs"
+
+  def do_batch(pdhs)
+    pdhs_str = ''
+    pdhs.each do |pdh|
+      pdhs_str << "'" << pdh << "'" << ","
+    end
+
+    collections = ActiveRecord::Base.connection.exec_query(
+      "SELECT DISTINCT portable_data_hash, manifest_text FROM collections "\
+      "WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) "
+    )
+    collections.rows.each do |row|
+      begin
+        manifest = Keep::Manifest.new(row[1])
+        ActiveRecord::Base.connection.exec_query("BEGIN")
+        ActiveRecord::Base.connection.exec_query("UPDATE collections SET file_count=#{manifest.files_count}, "\
+                                                 "file_size_total=#{manifest.files_size} "\
+                                                 "WHERE portable_data_hash='#{row[0]}'")
+        ActiveRecord::Base.connection.exec_query("COMMIT")
+      rescue ArgumentError => detail
+        require 'pp'
+        puts
+        puts "*************** Row detail ***************"
+        puts
+        pp row
+        puts
+        puts "************ Collection detail ***********"
+        puts
+        pp Collection.find_by_portable_data_hash(row[0])
+        puts
+        puts "************** Error detail **************"
+        puts
+        pp detail
+        puts
+        puts "Skipping this collection, continuing!"
+        next
+      end
+    end
+  end
+
+
+def main
+
+  distinct_pdh_count = ActiveRecord::Base.connection.exec_query(
+    "SELECT DISTINCT portable_data_hash FROM collections where file_count=0"
+  ).rows.count
+
+  # Generator that queries for all the distinct pdhs greater than last_pdh
+  ordered_pdh_query = lambda { |last_pdh, &block|
+    pdhs = ActiveRecord::Base.connection.exec_query(
+      "SELECT DISTINCT portable_data_hash FROM collections "\
+      "WHERE file_count=0 and portable_data_hash > '#{last_pdh}' "\
+      "ORDER BY portable_data_hash LIMIT 1000"
+    )
+    pdhs.rows.each do |row|
+      block.call(row[0])
+    end
+  }
+
+  batch_size_max = 1 << 28 # 256 MiB
+  GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,
+                                                 distinct_pdh_count,
+                                                 batch_size_max,
+                                                 "AddFileInfoToCollection") do |pdhs|
+    do_batch(pdhs)
+  end
+end
+
+main
index cc545b2fd1a92fbb892f3e8a78dc759996cb8b55..4e8b0559aabf519ac9b598e385f1a5432a9a9f85 100644 (file)
@@ -924,19 +924,47 @@ EOS
     assert_equal 'value1', json_response['properties']['property1']
   end
 
-  test "create collection with properties" do
-    authorize_with :active
-    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
-    post :create, params: {
-      collection: {
-        manifest_text: manifest_text,
-        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
-        properties: {'property_1' => 'value_1'}
+  [
+    {'property_1' => 'value_1'},
+    "{\"property_1\":\"value_1\"}",
+  ].each do |p|
+    test "create collection with valid properties param #{p.inspect}" do
+      authorize_with :active
+      manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+      post :create, params: {
+        collection: {
+          manifest_text: manifest_text,
+          portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+          properties: p
+        }
       }
-    }
-    assert_response :success
-    assert_not_nil json_response['uuid']
-    assert_equal 'value_1', json_response['properties']['property_1']
+      assert_response :success
+      assert_not_nil json_response['uuid']
+      assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
+      assert_equal 'value_1', json_response['properties']['property_1']
+    end
+  end
+
+  [
+    false,
+    [],
+    42,
+    'some string',
+    '["json", "encoded", "array"]',
+  ].each do |p|
+    test "create collection with non-valid properties param #{p.inspect}" do
+      authorize_with :active
+      post :create, params: {
+        collection: {
+          name: "test collection with non-valid properties param '#{p.inspect}'",
+          manifest_text: '',
+          properties: p
+        }
+      }
+      assert_response 422
+      response_errors = json_response['errors']
+      assert_not_nil response_errors, 'Expected error in response'
+    end
   end
 
   [
@@ -1395,4 +1423,20 @@ EOS
     assert_response :success
     assert_equal 3, json_response['version']
   end
+
+  test "delete collection with versioning enabled" do
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second
+
+    col = collections(:collection_owned_by_active)
+    assert_equal 2, col.version
+    assert col.modified_at < Time.now - 1.second
+
+    authorize_with(:active)
+    post :trash, params: {
+      id: col.uuid,
+    }
+    assert_response :success
+    assert_equal col.version, json_response['version'], 'Trashing a collection should not create a new version'
+  end
 end
index 0501da1673ebdff87c5c9900b205dfdde96dce42..60696b98a9c998be7e270fe8bd3fea8cc72bd450 100644 (file)
@@ -927,7 +927,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
            redirect_to_new_user: true,
          })
     assert_response(:success)
-    assert_equal(users(:project_viewer).redirect_to_user_uuid, users(:active).uuid)
+    assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)
 
     auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
     assert_not_nil(auth)
@@ -935,6 +935,82 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     assert_equal(users(:active).uuid, auth.user.uuid)
   end
 
+
+  test "merge 'project_viewer' account into 'active' account using uuids" do
+    authorize_with(:admin)
+    post(:merge, params: {
+           old_user_uuid: users(:project_viewer).uuid,
+           new_user_uuid: users(:active).uuid,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(:success)
+    assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)
+
+    auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
+    assert_not_nil(auth)
+    assert_not_nil(auth.user)
+    assert_equal(users(:active).uuid, auth.user.uuid)
+  end
+
+  test "merge 'project_viewer' account into 'active' account using uuids denied for non-admin" do
+    authorize_with(:active)
+    post(:merge, params: {
+           old_user_uuid: users(:project_viewer).uuid,
+           new_user_uuid: users(:active).uuid,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(403)
+    assert_nil(users(:project_viewer).redirect_to_user_uuid)
+  end
+
+  test "merge 'project_viewer' account into 'active' account using uuids denied missing old_user_uuid" do
+    authorize_with(:admin)
+    post(:merge, params: {
+           new_user_uuid: users(:active).uuid,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+    assert_nil(users(:project_viewer).redirect_to_user_uuid)
+  end
+
+  test "merge 'project_viewer' account into 'active' account using uuids denied missing new_user_uuid" do
+    authorize_with(:admin)
+    post(:merge, params: {
+           old_user_uuid: users(:project_viewer).uuid,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+    assert_nil(users(:project_viewer).redirect_to_user_uuid)
+  end
+
+  test "merge 'project_viewer' account into 'active' account using uuids denied bogus old_user_uuid" do
+    authorize_with(:admin)
+    post(:merge, params: {
+           old_user_uuid: "zzzzz-tpzed-bogusbogusbogus",
+           new_user_uuid: users(:active).uuid,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+    assert_nil(users(:project_viewer).redirect_to_user_uuid)
+  end
+
+  test "merge 'project_viewer' account into 'active' account using uuids denied bogus new_user_uuid" do
+    authorize_with(:admin)
+    post(:merge, params: {
+           old_user_uuid: users(:project_viewer).uuid,
+           new_user_uuid: "zzzzz-tpzed-bogusbogusbogus",
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+    assert_nil(users(:project_viewer).redirect_to_user_uuid)
+  end
+
   NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
                          "last_name", "username"].sort
 
index ab1a3e69de4a10ac6d623769ca126b6cf6909b78..eb44b9b34ead7bc858ce700c441cc3377f4de1b0 100644 (file)
@@ -358,7 +358,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
     assert_not_nil json_response['properties']
     assert_empty json_response['properties']
 
-    # update collection's description
+    # update collection's properties
     put "/arvados/v1/collections/#{json_response['uuid']}",
       params: {
         format: :json,
@@ -366,6 +366,35 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
       },
       headers: auth(:active)
     assert_response :success
+    assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
+    assert_equal 'value_1', json_response['properties']['property_1']
+  end
+
+  test "create collection and update it with json encoded hash properties" do
+    # create collection to be searched for
+    signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\n", api_token(:active))
+    post "/arvados/v1/collections",
+      params: {
+        format: :json,
+        collection: {manifest_text: signed_manifest}.to_json,
+      },
+      headers: auth(:active)
+    assert_response 200
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['properties']
+    assert_empty json_response['properties']
+
+    # update collection's properties
+    put "/arvados/v1/collections/#{json_response['uuid']}",
+      params: {
+        format: :json,
+        collection: {
+          properties: "{\"property_1\":\"value_1\"}"
+        }
+      },
+      headers: auth(:active)
+    assert_response :success
+    assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
     assert_equal 'value_1', json_response['properties']['property_1']
   end
 end
index 477f9e27505200b3e080ae29b8362151adb6e21d..08d5b1fb72cb9544ba8ae651e0936826462703d3 100644 (file)
@@ -266,6 +266,67 @@ class CollectionTest < ActiveSupport::TestCase
     end
   end
 
+  # This test exposes a bug related to JSONB attributes, see #15725.
+  test "recently loaded collection shouldn't list changed attributes" do
+    col = Collection.where("properties != '{}'::jsonb").limit(1).first
+    refute col.properties_changed?, 'Properties field should not be seen as changed'
+  end
+
+  [
+    [
+      true,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {:foo=>:bar, :lst=>[1, 3, 5, 7], :hsh=>{'baz'=>'qux', :foobar=>true, 'hsh'=>{:nested=>true}}, :delete_at=>nil},
+    ],
+    [
+      true,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}},
+    ],
+    [
+      true,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'foobar'=>true, 'hsh'=>{'nested'=>true}, 'baz'=>'qux'}},
+    ],
+    [
+      false,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 42], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+    ],
+    [
+      false,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'foo'=>'bar', 'lst'=>[1, 3, 7, 5], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+    ],
+    [
+      false,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>false}}, 'delete_at'=>nil},
+    ],
+    [
+      false,
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>1234567890},
+    ],
+  ].each do |should_be_equal, value_1, value_2|
+    test "JSONB properties #{value_1} is#{should_be_equal ? '' : ' not'} equal to #{value_2}" do
+      act_as_user users(:active) do
+        # Set up initial collection
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+        c.update_attributes!({'properties' => value_1})
+        c.reload
+        assert c.changes.keys.empty?
+        c.properties = value_2
+        if should_be_equal
+          assert c.changes.keys.empty?, "Properties #{value_1.inspect} should be equal to #{value_2.inspect}"
+        else
+          refute c.changes.keys.empty?, "Properties #{value_1.inspect} should not be equal to #{value_2.inspect}"
+        end
+      end
+    end
+  end
+
   test "older versions' modified_at indicate when they're created" do
     Rails.configuration.Collections.CollectionVersioning = true
     Rails.configuration.Collections.PreserveVersionIfIdle = 0
@@ -334,7 +395,6 @@ class CollectionTest < ActiveSupport::TestCase
     ['owner_uuid', 'zzzzz-tpzed-d9tiejq69daie8f', 'zzzzz-tpzed-xurymjxw79nv3jz'],
     ['replication_desired', 2, 3],
     ['storage_classes_desired', ['hot'], ['archive']],
-    ['is_trashed', true, false],
   ].each do |attr, first_val, second_val|
     test "sync #{attr} with older versions" do
       Rails.configuration.Collections.CollectionVersioning = true
@@ -760,7 +820,7 @@ class CollectionTest < ActiveSupport::TestCase
                              name: 'foo',
                              trash_at: db_current_time + 1.years)
       sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
-      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
+      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i
       assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
       assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
     end
@@ -849,7 +909,7 @@ class CollectionTest < ActiveSupport::TestCase
     test test_name do
       act_as_user users(:active) do
         min_exp = (db_current_time +
-                   Rails.configuration.Collections.BlobSigningTTL.seconds)
+                   Rails.configuration.Collections.BlobSigningTTL)
         if fixture_name == :expired_collection
           # Fixture-finder shorthand doesn't find trashed collections
           # because they're not in the default scope.
index 0dad6ee75ccf64b6484bf8da202b780932ff7a95..69e277cc2c000ae627c4b1193c629ac8cf99e7dc 100644 (file)
@@ -837,7 +837,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_not_nil(trash)
     assert_not_nil(delete)
     assert_in_delta(trash, now + 1.second, 10)
-    assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL.second, 10)
+    assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL, 10)
   end
 
   def check_output_ttl_1y(now, trash, delete)
index 2d28d3fb690e50c5702635fb772dd5d54bec9ad8..c1f60d91d02467571c8d9c720f0534d8164a9cfd 100644 (file)
@@ -10,13 +10,13 @@ class UpdatePriorityTest < ActiveSupport::TestCase
     uuid = containers(:running).uuid
     ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
     assert_equal 0, Container.find_by_uuid(uuid).priority
-    UpdatePriority.update_priority
+    UpdatePriority.update_priority(nolock: true)
     assert_operator 0, :<, Container.find_by_uuid(uuid).priority
 
     uuid = containers(:queued).uuid
     ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
     assert_equal 0, Container.find_by_uuid(uuid).priority
-    UpdatePriority.update_priority
+    UpdatePriority.update_priority(nolock: true)
     assert_operator 0, :<, Container.find_by_uuid(uuid).priority
   end
 
@@ -24,7 +24,7 @@ class UpdatePriorityTest < ActiveSupport::TestCase
     uuid = containers(:running).uuid
     ActiveRecord::Base.connection.exec_query('DELETE FROM container_requests WHERE container_uuid=$1', 'test-setup', [[nil, uuid]])
     assert_operator 0, :<, Container.find_by_uuid(uuid).priority
-    UpdatePriority.update_priority
+    UpdatePriority.update_priority(nolock: true)
     assert_equal 0, Container.find_by_uuid(uuid).priority
   end
 end
index a2b560889f5b40cf488739a05bb77c63644152d8..c0fe38008d9a419f1ccfd17e10bde60803cfa29f 100644 (file)
@@ -111,7 +111,7 @@ func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
        // simulate mounted read-only collection
        s.cp.mounts["/mnt"] = arvados.Mount{
                Kind:             "collection",
-               PortableDataHash: arvadostest.FooPdh,
+               PortableDataHash: arvadostest.FooCollectionPDH,
        }
 
        // simulate mounted writable collection
@@ -125,7 +125,7 @@ func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
        c.Assert(f.Close(), check.IsNil)
        s.cp.mounts["/mnt-w"] = arvados.Mount{
                Kind:             "collection",
-               PortableDataHash: arvadostest.FooPdh,
+               PortableDataHash: arvadostest.FooCollectionPDH,
                Writable:         true,
        }
        s.cp.binds = append(s.cp.binds, bindtmp+":/mnt-w")
@@ -197,7 +197,7 @@ func (s *copierSuite) TestUnsupportedMountKindBelow(c *check.C) {
 func (s *copierSuite) TestWritableMountBelow(c *check.C) {
        s.cp.mounts["/ctr/outdir/mount"] = arvados.Mount{
                Kind:             "collection",
-               PortableDataHash: arvadostest.FooPdh,
+               PortableDataHash: arvadostest.FooCollectionPDH,
                Writable:         true,
        }
        c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/mount", 0755), check.IsNil)
index 21fcf4d674e1015184f2c87413cb0fbb32a9f0d1..2f66b2461ebb81c7ee94399cc4dc426ac2315c33 100644 (file)
@@ -5,67 +5,24 @@
 package main
 
 import (
-       "flag"
-       "fmt"
-       "net/http"
+       "context"
+       "os"
 
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/service"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/health"
-       "git.curoverse.com/arvados.git/sdk/go/httpserver"
-       log "github.com/sirupsen/logrus"
 )
 
-var version = "dev"
-
-func main() {
-       configFile := flag.String("config", arvados.DefaultConfigFile, "`path` to arvados configuration file")
-       getVersion := flag.Bool("version", false, "Print version information and exit.")
-       flag.Parse()
-
-       // Print version information if requested
-       if *getVersion {
-               fmt.Printf("arvados-health %s\n", version)
-               return
-       }
-
-       log.SetFormatter(&log.JSONFormatter{
-               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
-       })
-       log.Printf("arvados-health %s started", version)
+var (
+       version             = "dev"
+       command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
+)
 
-       cfg, err := arvados.GetConfig(*configFile)
-       if err != nil {
-               log.Fatal(err)
-       }
-       clusterCfg, err := cfg.GetCluster("")
-       if err != nil {
-               log.Fatal(err)
-       }
-       nodeCfg, err := clusterCfg.GetNodeProfile("")
-       if err != nil {
-               log.Fatal(err)
-       }
+func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string) service.Handler {
+       return &health.Aggregator{Cluster: cluster}
+}
 
-       log := log.WithField("Service", "Health")
-       srv := &httpserver.Server{
-               Addr: nodeCfg.Health.Listen,
-               Server: http.Server{
-                       Handler: &health.Aggregator{
-                               Config: cfg,
-                               Log: func(req *http.Request, err error) {
-                                       log.WithField("RemoteAddr", req.RemoteAddr).
-                                               WithField("Path", req.URL.Path).
-                                               WithError(err).
-                                               Info("HTTP request")
-                               },
-                       },
-               },
-       }
-       if err := srv.Start(); err != nil {
-               log.Fatal(err)
-       }
-       log.WithField("Listen", srv.Addr).Info("listening")
-       if err := srv.Wait(); err != nil {
-               log.Fatal(err)
-       }
+func main() {
+       os.Exit(command.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
 }
index d147573eec72d402faec43c21da86a010f13dc94..d6dd389278e7ae4f05faab2450680a2112fb1545 100644 (file)
@@ -45,7 +45,7 @@ func (s *UnitSuite) TestCache(c *check.C) {
                coll, err = cache.Get(arv, arvadostest.FooCollection, false)
                c.Check(err, check.Equals, nil)
                c.Assert(coll, check.NotNil)
-               c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooPdh)
+               c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
                c.Check(coll.ManifestText[:2], check.Equals, ". ")
        }
        s.checkCacheMetrics(c, cache.registry,
@@ -62,10 +62,10 @@ func (s *UnitSuite) TestCache(c *check.C) {
        // lookup.
        arv.ApiToken = arvadostest.ActiveToken
 
-       coll2, err := cache.Get(arv, arvadostest.FooPdh, false)
+       coll2, err := cache.Get(arv, arvadostest.FooCollectionPDH, false)
        c.Check(err, check.Equals, nil)
        c.Assert(coll2, check.NotNil)
-       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
        c.Check(coll2.ManifestText[:2], check.Equals, ". ")
        c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
 
@@ -76,10 +76,10 @@ func (s *UnitSuite) TestCache(c *check.C) {
                "pdh_hits 4",
                "api_calls 2")
 
-       coll2, err = cache.Get(arv, arvadostest.FooPdh, false)
+       coll2, err = cache.Get(arv, arvadostest.FooCollectionPDH, false)
        c.Check(err, check.Equals, nil)
        c.Assert(coll2, check.NotNil)
-       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
        c.Check(coll2.ManifestText[:2], check.Equals, ". ")
 
        s.checkCacheMetrics(c, cache.registry,
@@ -118,7 +118,7 @@ func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
        cache.registry = prometheus.NewRegistry()
 
        for _, forceReload := range []bool{false, true, false, true} {
-               _, err := cache.Get(arv, arvadostest.FooPdh, forceReload)
+               _, err := cache.Get(arv, arvadostest.FooCollectionPDH, forceReload)
                c.Check(err, check.Equals, nil)
        }
 
index 44d0b0ffefa743dc931eb448bcadce510e5abf92..1c93a2b91c0981840c5ac2dde998a55adb1d9b51 100644 (file)
@@ -298,8 +298,8 @@ func (s *IntegrationSuite) TestCadaverByID(c *check.C) {
                c.Check(stdout, check.Matches, `(?ms).*collection is empty.*`)
        }
        for _, path := range []string{
-               "/by_id/" + arvadostest.FooPdh,
-               "/by_id/" + arvadostest.FooPdh + "/",
+               "/by_id/" + arvadostest.FooCollectionPDH,
+               "/by_id/" + arvadostest.FooCollectionPDH + "/",
                "/by_id/" + arvadostest.FooCollection,
                "/by_id/" + arvadostest.FooCollection + "/",
        } {
index 7a015c91f9d07b56926dd480e0b30f47149af1c8..040638623748f8aa57150b314886871703157287 100644 (file)
@@ -59,7 +59,7 @@ func (s *UnitSuite) TestCORSPreflight(c *check.C) {
 }
 
 func (s *UnitSuite) TestInvalidUUID(c *check.C) {
-       bogusID := strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "-"
+       bogusID := strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + "-"
        token := arvadostest.ActiveToken
        for _, trial := range []string{
                "http://keep-web/c=" + bogusID + "/foo",
@@ -186,8 +186,8 @@ func (s *IntegrationSuite) doVhostRequests(c *check.C, authz authorizer) {
                arvadostest.FooCollection + ".example.com/foo",
                arvadostest.FooCollection + "--collections.example.com/foo",
                arvadostest.FooCollection + "--collections.example.com/_/foo",
-               arvadostest.FooPdh + ".example.com/foo",
-               strings.Replace(arvadostest.FooPdh, "+", "-", -1) + "--collections.example.com/foo",
+               arvadostest.FooCollectionPDH + ".example.com/foo",
+               strings.Replace(arvadostest.FooCollectionPDH, "+", "-", -1) + "--collections.example.com/foo",
                arvadostest.FooBarDirCollection + ".example.com/dir1/foo",
        } {
                c.Log("doRequests: ", hostPath)
index a9830bc1de4715d2cfdaa39049106bcf95cce779..ab50641be19c780a7d0b6145353b2611d0b02578 100644 (file)
@@ -164,16 +164,16 @@ func (s *IntegrationSuite) Test200(c *check.C) {
                        dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
                },
                {
-                       host:    strings.Replace(arvadostest.FooPdh, "+", "-", 1) + ".collections.example.com",
+                       host:    strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + ".collections.example.com",
                        path:    "/t=" + arvadostest.ActiveToken + "/foo",
                        dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
                },
                {
-                       path:    "/c=" + arvadostest.FooPdh + "/t=" + arvadostest.ActiveToken + "/foo",
+                       path:    "/c=" + arvadostest.FooCollectionPDH + "/t=" + arvadostest.ActiveToken + "/foo",
                        dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
                },
                {
-                       path:    "/c=" + strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "/t=" + arvadostest.ActiveToken + "/_/foo",
+                       path:    "/c=" + strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + "/t=" + arvadostest.ActiveToken + "/_/foo",
                        dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
                },
                {
index 96f458720d38b56b97fa51fd63e76faa798987bf..4d9e798ac67c71c2a81f51abeb2128b340a6cda6 100644 (file)
@@ -169,7 +169,7 @@ func (v *UnixVolume) DeviceID() string {
 
        fi, err := os.Stat(dev)
        if err != nil {
-               return giveup("stat %q: %s\n", dev, err)
+               return giveup("stat %q: %s", dev, err)
        }
        ino := fi.Sys().(*syscall.Stat_t).Ino
 
@@ -377,18 +377,18 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
        n, err := io.Copy(tmpfile, rdr)
        v.os.stats.TickOutBytes(uint64(n))
        if err != nil {
-               log.Printf("%s: writing to %s: %s\n", v, bpath, err)
+               log.Printf("%s: writing to %s: %s", v, bpath, err)
                tmpfile.Close()
                v.os.Remove(tmpfile.Name())
                return err
        }
        if err := tmpfile.Close(); err != nil {
-               log.Printf("closing %s: %s\n", tmpfile.Name(), err)
+               log.Printf("closing %s: %s", tmpfile.Name(), err)
                v.os.Remove(tmpfile.Name())
                return err
        }
        if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
-               log.Printf("rename %s %s: %s\n", tmpfile.Name(), bpath, err)
+               log.Printf("rename %s %s: %s", tmpfile.Name(), bpath, err)
                return v.os.Remove(tmpfile.Name())
        }
        return nil
@@ -400,14 +400,14 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
 func (v *UnixVolume) Status() *VolumeStatus {
        fi, err := v.os.Stat(v.Root)
        if err != nil {
-               log.Printf("%s: os.Stat: %s\n", v, err)
+               log.Printf("%s: os.Stat: %s", v, err)
                return nil
        }
        devnum := fi.Sys().(*syscall.Stat_t).Dev
 
        var fs syscall.Statfs_t
        if err := syscall.Statfs(v.Root, &fs); err != nil {
-               log.Printf("%s: statfs: %s\n", v, err)
+               log.Printf("%s: statfs: %s", v, err)
                return nil
        }
        // These calculations match the way df calculates disk usage:
@@ -620,7 +620,7 @@ func (v *UnixVolume) IsFull() (isFull bool) {
        if avail, err := v.FreeDiskSpace(); err == nil {
                isFull = avail < MinFreeKilobytes
        } else {
-               log.Printf("%s: FreeDiskSpace: %s\n", v, err)
+               log.Printf("%s: FreeDiskSpace: %s", v, err)
                isFull = false
        }
 
@@ -679,6 +679,7 @@ func (v *UnixVolume) lock(ctx context.Context) error {
        if v.locker == nil {
                return nil
        }
+       t0 := time.Now()
        locked := make(chan struct{})
        go func() {
                v.locker.Lock()
@@ -686,6 +687,7 @@ func (v *UnixVolume) lock(ctx context.Context) error {
        }()
        select {
        case <-ctx.Done():
+               log.Printf("%s: client hung up while waiting for Serialize lock (%s)", v, time.Since(t0))
                go func() {
                        <-locked
                        v.locker.Unlock()
index f5329ebe16213ad1d7fa37aff09212efce299603..3bc905b0857f0bf52ba71944aeab5e2eea2242a6 100644 (file)
@@ -176,7 +176,7 @@ security_groups = idstring1, idstring2
 # size class (since libcloud does not provide any consistent API for exposing
 # this setting).
 # You may also want to define the amount of scratch space (expressed
-# in GB) for Crunch jobs.  You can also override Amazon's provided
+# in MB) for Crunch jobs.  You can also override Amazon's provided
 # data fields (such as price per hour) by setting them here.
 #
 # Additionally, you can ask for a preemptible instance (AWS's spot instance)
@@ -184,19 +184,22 @@ security_groups = idstring1, idstring2
 # both spot & reserved versions of the same size, you can do so by renaming
 # the Size section and specifying the instance type inside it.
 
+# 100 GB scratch space
 [Size m4.large]
 cores = 2
 price = 0.126
-scratch = 100
+scratch = 100000
 
+# 10 GB scratch space
 [Size m4.large.spot]
 instance_type = m4.large
 preemptible = true
 cores = 2
 price = 0.126
-scratch = 100
+scratch = 10000
 
+# 200 GB scratch space
 [Size m4.xlarge]
 cores = 4
 price = 0.252
-scratch = 100
+scratch = 200000
index 4f00d54e7e28d10eb366e47da1e0b2f1957d017f..ef05467810a79a88e1e4f27149fd30531804d318 100644 (file)
@@ -38,7 +38,7 @@ setup(name='arvados-node-manager',
           'apache-libcloud>=2.3.1.dev1',
           'arvados-python-client>=0.1.20170731145219',
           'future',
-          'pykka',
+          'pykka < 2',
           'python-daemon',
           'setuptools',
           'subprocess32>=3.5.1',
index 878119634bbaf23fca3183ab37651e3274147e3e..3e829522af24de67e134166e8dc227b2ba7b9b61 100755 (executable)
@@ -564,7 +564,7 @@ case "$subcmd" in
         ;;
 
     root-cert)
-       CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.pem
+       CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.crt
        if test -n "$1" ; then
            CERT="$1"
        fi
index 1b062ad8d131c141dd55a18bf0a474a6991a0186..6cd2de501e857e03edce332f618f6bc63f80de9b 100755 (executable)
@@ -8,6 +8,8 @@ set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+
 if test ! -s /var/lib/arvados/root-cert.pem ; then
     # req           signing request sub-command
     # -new          new certificate request
@@ -26,7 +28,7 @@ if test ! -s /var/lib/arvados/root-cert.pem ; then
            -nodes \
            -sha256 \
            -x509 \
-           -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=arvbox testing root CA for ${uuid_prefix}" \
+           -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=test root CA for ${uuid_prefix} generated $(date --rfc-3339=seconds)" \
            -extensions x509_ext \
            -config <(cat /etc/ssl/openssl.cnf \
                          <(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
@@ -59,7 +61,7 @@ if test ! -s /var/lib/arvados/server-cert-${localip}.pem ; then
            -new \
            -nodes \
            -sha256 \
-           -subj "/C=US/ST=MA/O=Arvados testing for ${uuid_prefix}/OU=arvbox/CN=localhost" \
+           -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=test server cert for ${uuid_prefix} generated $(date --rfc-3339=seconds)" \
            -reqexts x509_ext \
            -extensions x509_ext \
            -config <(cat /etc/ssl/openssl.cnf \
index 06a9ba7087892e12e1daeab396d82f463c43409b..986ad84966b53554550f78db6f52df96000a1793 100755 (executable)
@@ -18,6 +18,11 @@ if test "$1" = "--only-deps" ; then
 fi
 
 uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+secret_token=$(cat /var/lib/arvados/api_secret_token)
+blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+management_token=$(cat /var/lib/arvados/management_token)
+sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+vm_uuid=$(cat /var/lib/arvados/vm-uuid)
 database_pw=$(cat /var/lib/arvados/api_database_pw)
 
 if test -s /var/lib/arvados/api_rails_env ; then
@@ -31,7 +36,23 @@ mkdir -p /etc/arvados
 cat >/var/lib/arvados/cluster_config.yml <<EOF
 Clusters:
   ${uuid_prefix}:
-    NodeProfiles:
+    ManagementToken: $management_token
+    Services:
+      Workbench1:
+        ExternalURL: "https://$localip:${services[workbench]}"
+      Workbench2:
+        ExternalURL: "https://$localip:${services[workbench2-ssl]}"
+      SSO:
+        ExternalURL: "https://$localip:${services[sso]}"
+      Websocket:
+        ExternalURL: "wss://$localip:${services[websockets-ssl]}/websocket"
+      GitSSH:
+        ExternalURL: "ssh://git@$localip:"
+      GitHTTP:
+        ExternalURL: "http://$localip:${services[arv-git-httpd]}/"
+      WebDAV:
+        ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
+    NodeProfiles:  # to be deprecated in favor of "Services" section
       "*":
         arvados-controller:
           Listen: ":${services[controller]}" # choose a port
@@ -42,11 +63,27 @@ Clusters:
       Connection:
         # All parameters here are passed to the PG client library in a connection string;
         # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
-        Host: localhost
-        User: arvados
-        Password: ${database_pw}
-        DBName: arvados_${database_env}
+        host: localhost
+        user: arvados
+        password: ${database_pw}
+        dbname: arvados_${database_env}
         client_encoding: utf8
+    API:
+      RailsSessionSecretToken: $secret_token
+    Collections:
+      BlobSigningKey: $blob_signing_key
+      DefaultReplication: 1
+    Containers:
+      SupportedDockerImageFormats: ["v2"]
+    Login:
+      ProviderAppSecret: $sso_app_secret
+      ProviderAppID: arvados-server
+    Users:
+      NewUsersAreActive: true
+      AutoAdminFirstUser: true
+      AutoSetupNewUsers: true
+      AutoSetupNewUsersWithVmUUID: $vm_uuid
+      AutoSetupNewUsersWithRepository: true
 EOF
 
 /usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
index 87c427cd29ae0140b34d086f788a2df6e7aa4a48..4330157937410fe08658e28c8235fad697f2de2d 100755 (executable)
@@ -19,7 +19,7 @@ fi
 
 cat > /usr/local/bin/crunch-run.sh <<EOF
 #!/bin/sh
-exec /usr/local/bin/crunch-run -container-enable-networking=always -container-network-mode=host \$@
+exec /usr/local/bin/crunch-run -container-enable-networking=default -container-network-mode=host \$@
 EOF
 chmod +x /usr/local/bin/crunch-run.sh
 
index 4014c5c8b040316c4850df4d788476854d06527c..2353e949f7090093a02501afa57779f0dce6f649 100755 (executable)
@@ -24,6 +24,8 @@ http {
      access_log off;
      include /etc/nginx/mime.types;
      default_type application/octet-stream;
+     client_max_body_size 128M;
+
      server {
             listen ${services[doc]} default_server;
             listen [::]:${services[doc]} default_server;
index 2dbef4ab876ab1911c518eded2b17478cd8acca4..e9e1ca4f8c8b0901c1e3792f2eb50d25f74c8fc3 100755 (executable)
@@ -26,6 +26,27 @@ cat <<EOF > /usr/src/workbench2/public/config.json
 }
 EOF
 
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+url_prefix="https://$localip:${services[workbench2-ssl]}/"
+
+set +e
+read -rd $'\000' apiclient <<EOF
+{
+   "url_prefix": "$url_prefix",
+   "is_trusted": true
+}
+EOF
+set -e
+
+clientuuid=$(arv --format=uuid api_client list --filters '[["url_prefix", "=", "'$url_prefix'"]]')
+if [[ -n "$clientuuid" ]] ; then
+    arv api_client update --uuid $clientuuid --api-client "$apiclient"
+else
+    arv api_client create --api-client "$apiclient"
+fi
+
 export HTTPS=false
 # Can't use "yarn start", need to run the dev server script
 # directly so that the TERM signal from "sv restart" gets to the
diff --git a/tools/keep-xref/keep-xref.py b/tools/keep-xref/keep-xref.py
new file mode 100755 (executable)
index 0000000..7bc4158
--- /dev/null
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+
+from __future__ import print_function, absolute_import
+import argparse
+import arvados
+import arvados.util
+import csv
+import sys
+import logging
+
+lglvl = logging.INFO+1
+logging.basicConfig(level=lglvl, format='%(message)s')
+
+"""
+ Given a list of collections missing blocks (as produced by
+keep-balance), produce a report listing affected collections and
+container requests.
+"""
+
+def rerun_request(arv, container_requests_to_rerun, ct):
+    requests = arvados.util.list_all(arv.container_requests().list, filters=[["container_uuid", "=", ct["uuid"]]])
+    for cr in requests:
+        if cr["requesting_container_uuid"]:
+            rerun_request(arv, container_requests_to_rerun, arv.containers().get(uuid=cr["requesting_container_uuid"]).execute())
+        else:
+            container_requests_to_rerun[cr["uuid"]] = cr
+
+def get_owner(arv, owners, record):
+    uuid = record["owner_uuid"]
+    if uuid not in owners:
+        if uuid[6:11] == "tpzed":
+            owners[uuid] = (arv.users().get(uuid=uuid).execute()["full_name"], uuid)
+        else:
+            grp = arv.groups().get(uuid=uuid).execute()
+            _, ou = get_owner(arv, owners, grp)
+            owners[uuid] = (grp["name"], ou)
+    return owners[uuid]
+
+def main():
+    parser = argparse.ArgumentParser(description='Re-run containers associated with missing blocks')
+    parser.add_argument('inp')
+    args = parser.parse_args()
+
+    arv = arvados.api('v1')
+
+    busted_collections = set()
+
+    logging.log(lglvl, "Reading %s", args.inp)
+
+    # Get the list of bad collection PDHs
+    blocksfile = open(args.inp, "rt")
+    for line in blocksfile:
+        # Ignore the first item, that's the block id
+        collections = line.rstrip().split(" ")[1:]
+        for c in collections:
+            busted_collections.add(c)
+
+    out = csv.writer(sys.stdout)
+
+    out.writerow(("collection uuid", "container request uuid", "record name", "modified at", "owner uuid", "owner name", "root owner uuid", "root owner name", "notes"))
+
+    logging.log(lglvl, "Finding collections")
+
+    owners = {}
+    collections_to_delete = {}
+    container_requests_to_rerun = {}
+    # Get containers that produced these collections
+    i = 0
+    for b in busted_collections:
+        if (i % 100) == 0:
+            logging.log(lglvl, "%d/%d", i, len(busted_collections))
+        i += 1
+        collections_to_delete = arvados.util.list_all(arv.collections().list, filters=[["portable_data_hash", "=", b]])
+        for d in collections_to_delete:
+            t = ""
+            if d["properties"].get("type") not in ("output", "log"):
+                t = "\"type\" was '%s', expected one of 'output' or 'log'" % d["properties"].get("type")
+            ou = get_owner(arv, owners, d)
+            out.writerow((d["uuid"], "", d["name"], d["modified_at"], d["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], t))
+
+        maybe_containers_to_rerun = arvados.util.list_all(arv.containers().list, filters=[["output", "=", b]])
+        for ct in maybe_containers_to_rerun:
+            rerun_request(arv, container_requests_to_rerun, ct)
+
+    logging.log(lglvl, "%d/%d", i, len(busted_collections))
+    logging.log(lglvl, "Finding container requests")
+
+    i = 0
+    for _, cr in container_requests_to_rerun.items():
+        if (i % 100) == 0:
+            logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+        i += 1
+        ou = get_owner(arv, owners, cr)
+        out.writerow(("", cr["uuid"], cr["name"], cr["modified_at"], cr["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], ""))
+
+    logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+
+if __name__ == "__main__":
+    main()
index 5e2ed2e32e9863ff24bf20b263a9ba4218668d25..cfcba1b21888a867698980a3f9434133d02ed607 100644 (file)
                        "revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
                        "revisionTime": "2016-08-13T22:13:03Z"
                },
+               {
+                       "checksumSHA1": "x7IEwuVYTztOJItr3jtePGyFDWA=",
+                       "path": "github.com/imdario/mergo",
+                       "revision": "5ef87b449ca75fbed1bc3765b749ca8f73f1fa69",
+                       "revisionTime": "2019-04-15T13:31:43Z"
+               },
                {
                        "checksumSHA1": "iCsyavJDnXC9OY//p52IWJWy7PY=",
                        "path": "github.com/jbenet/go-context/io",