Merge branch 'master' into 4570-multi-auth-method
authorPeter Amstutz <peter.amstutz@curoverse.com>
Tue, 6 Jan 2015 13:45:08 +0000 (08:45 -0500)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Tue, 6 Jan 2015 13:45:08 +0000 (08:45 -0500)
291 files changed:
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/angular_shim.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/application.js
apps/workbench/app/assets/javascripts/arvados_client.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/event_log.js
apps/workbench/app/assets/javascripts/pipeline_instances.js
apps/workbench/app/assets/javascripts/report_issue.js
apps/workbench/app/assets/javascripts/selection.js.erb
apps/workbench/app/assets/javascripts/tab_panes.js
apps/workbench/app/assets/javascripts/upload_to_collection.js [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/application.css.scss
apps/workbench/app/assets/stylesheets/jobs.css.scss
apps/workbench/app/assets/stylesheets/selection.css [deleted file]
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/collections_controller.rb
apps/workbench/app/controllers/jobs_controller.rb
apps/workbench/app/controllers/pipeline_instances_controller.rb
apps/workbench/app/controllers/search_controller.rb
apps/workbench/app/controllers/virtual_machines_controller.rb
apps/workbench/app/helpers/api_client_authorizations_helper.rb [deleted file]
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/helpers/authorized_keys_helper.rb [deleted file]
apps/workbench/app/helpers/groups_helper.rb [deleted file]
apps/workbench/app/helpers/humans_helper.rb [deleted file]
apps/workbench/app/helpers/job_tasks_helper.rb [deleted file]
apps/workbench/app/helpers/keep_disks_helper.rb [deleted file]
apps/workbench/app/helpers/links_helper.rb [deleted file]
apps/workbench/app/helpers/logs_helper.rb [deleted file]
apps/workbench/app/helpers/nodes_helper.rb [deleted file]
apps/workbench/app/helpers/projects_helper.rb [deleted file]
apps/workbench/app/helpers/provenance_helper.rb
apps/workbench/app/helpers/repositories_helper.rb [deleted file]
apps/workbench/app/helpers/sessions_helper.rb [deleted file]
apps/workbench/app/helpers/specimens_helper.rb [deleted file]
apps/workbench/app/helpers/traits_helper.rb [deleted file]
apps/workbench/app/helpers/user_agreements_helper.rb [deleted file]
apps/workbench/app/helpers/users_helper.rb [deleted file]
apps/workbench/app/helpers/vcf_pipeline_helper.rb [deleted file]
apps/workbench/app/helpers/version_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/virtual_machines_helper.rb [deleted file]
apps/workbench/app/models/collection.rb
apps/workbench/app/views/application/_projects_tree_menu.html.erb
apps/workbench/app/views/application/_report_issue_popup.html.erb
apps/workbench/app/views/application/report_issue_popup.js.erb
apps/workbench/app/views/authorized_keys/_show_help.html.erb [deleted file]
apps/workbench/app/views/collections/_index_tbody.html.erb
apps/workbench/app/views/collections/_sharing_button.html.erb
apps/workbench/app/views/collections/_show_files.html.erb
apps/workbench/app/views/collections/_show_recent.html.erb
apps/workbench/app/views/collections/_show_upload.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_log.html.erb
apps/workbench/app/views/keep_disks/_content_layout.html.erb
apps/workbench/app/views/layouts/application.html.erb
apps/workbench/app/views/layouts/body.html.erb
apps/workbench/app/views/notifications/_ssh_key_notification.html.erb
apps/workbench/app/views/pipeline_instances/_running_component.html.erb
apps/workbench/app/views/pipeline_instances/_show_recent.html.erb
apps/workbench/app/views/pipeline_instances/index.html.erb
apps/workbench/app/views/pipeline_templates/_show_recent.html.erb
apps/workbench/app/views/pipeline_templates/show.html.erb
apps/workbench/app/views/projects/_compute_node_status.html.erb
apps/workbench/app/views/projects/_show_dashboard.html.erb
apps/workbench/app/views/projects/_show_sharing.html.erb
apps/workbench/app/views/projects/_show_tab_contents.html.erb
apps/workbench/app/views/projects/show.html.erb
apps/workbench/app/views/user_agreements/index.html.erb
apps/workbench/app/views/users/_add_ssh_key_popup.html.erb
apps/workbench/app/views/users/_manage_ssh_keys.html.erb
apps/workbench/app/views/users/_show_admin.html.erb
apps/workbench/app/views/users/_tables.html.erb
apps/workbench/app/views/users/setup_popup.js.erb
apps/workbench/config/application.default.yml
apps/workbench/config/database.yml
apps/workbench/config/routes.rb
apps/workbench/test/controllers/actions_controller_test.rb [moved from apps/workbench/test/functional/actions_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/api_client_authorizations_controller_test.rb [moved from apps/workbench/test/functional/api_client_authorizations_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/application_controller_test.rb [moved from apps/workbench/test/functional/application_controller_test.rb with 98% similarity]
apps/workbench/test/controllers/authorized_keys_controller_test.rb [moved from apps/workbench/test/functional/authorized_keys_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/collections_controller_test.rb
apps/workbench/test/controllers/groups_controller_test.rb [moved from apps/workbench/test/functional/groups_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/humans_controller_test.rb [moved from apps/workbench/test/functional/humans_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/job_tasks_controller_test.rb [moved from apps/workbench/test/functional/job_tasks_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/jobs_controller_test.rb [moved from apps/workbench/test/functional/jobs_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/keep_disks_controller_test.rb [moved from apps/workbench/test/functional/keep_disks_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/links_controller_test.rb [moved from apps/workbench/test/functional/links_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/logs_controller_test.rb [moved from apps/workbench/test/functional/logs_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/nodes_controller_test.rb [moved from apps/workbench/test/functional/nodes_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/pipeline_instances_controller_test.rb
apps/workbench/test/controllers/pipeline_templates_controller_test.rb [moved from apps/workbench/test/functional/pipeline_templates_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/projects_controller_test.rb [moved from apps/workbench/test/functional/projects_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/repositories_controller_test.rb [moved from apps/workbench/test/functional/repositories_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/search_controller_test.rb
apps/workbench/test/controllers/sessions_controller_test.rb [moved from apps/workbench/test/functional/sessions_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/specimens_controller_test.rb [moved from apps/workbench/test/functional/specimens_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/traits_controller_test.rb [moved from apps/workbench/test/functional/traits_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/user_agreements_controller_test.rb [moved from apps/workbench/test/functional/user_agreements_controller_test.rb with 100% similarity]
apps/workbench/test/controllers/users_controller_test.rb [moved from apps/workbench/test/functional/users_controller_test.rb with 96% similarity]
apps/workbench/test/controllers/virtual_machines_controller_test.rb [moved from apps/workbench/test/functional/virtual_machines_controller_test.rb with 100% similarity]
apps/workbench/test/diagnostics_test_helper.rb
apps/workbench/test/functional/.gitkeep [deleted file]
apps/workbench/test/functional/collections_controller_test.rb [deleted file]
apps/workbench/test/functional/pipeline_instances_controller_test.rb [deleted file]
apps/workbench/test/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
apps/workbench/test/integration/application_layout_test.rb
apps/workbench/test/integration/collection_upload_test.rb [new file with mode: 0644]
apps/workbench/test/integration/collections_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/integration/projects_test.rb
apps/workbench/test/integration/report_issue_test.rb
apps/workbench/test/integration/user_manage_account_test.rb
apps/workbench/test/integration/websockets_test.rb
apps/workbench/test/performance/browsing_test.rb
apps/workbench/test/performance_test_helper.rb [new file with mode: 0644]
apps/workbench/test/test_helper.rb
apps/workbench/test/unit/api_client_authorization_test.rb [deleted file]
apps/workbench/test/unit/authorized_key_test.rb [deleted file]
apps/workbench/test/unit/human_test.rb [deleted file]
apps/workbench/test/unit/job_task_test.rb [deleted file]
apps/workbench/test/unit/keep_disk_test.rb [deleted file]
apps/workbench/test/unit/log_test.rb [deleted file]
apps/workbench/test/unit/node_test.rb [deleted file]
apps/workbench/test/unit/pipeline_template_test.rb [deleted file]
apps/workbench/test/unit/repository_test.rb [deleted file]
apps/workbench/test/unit/specimen_test.rb [deleted file]
apps/workbench/test/unit/trait_test.rb [deleted file]
apps/workbench/test/unit/user_agreement_test.rb [deleted file]
apps/workbench/test/unit/virtual_machine_test.rb [deleted file]
apps/workbench/vendor/assets/javascripts/jquery.number.min.js [new file with mode: 0644]
crunch_scripts/run-command
doc/_config.yml
doc/_includes/_0_filter_py.liquid
doc/_includes/_concurrent_hash_script_py.liquid
doc/_includes/_run_md5sum_py.liquid
doc/_includes/_tutorial_expectations.liquid
doc/_includes/_tutorial_hash_script_py.liquid
doc/api/schema/Job.html.textile.liquid
doc/css/bootstrap.css
doc/install/create-standard-objects.html.textile.liquid
doc/install/install-keepproxy.html.textile.liquid
doc/install/install-keepstore.html.textile.liquid
doc/install/install-shell-server.html.textile.liquid
doc/sdk/cli/index.html.textile.liquid
doc/sdk/cli/install.html.textile.liquid
doc/sdk/cli/reference.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/subcommands.html.textile.liquid
doc/sdk/python/sdk-python.html.textile.liquid
doc/user/getting_started/check-environment.html.textile.liquid
doc/user/reference/sdk-cli.html.textile.liquid [deleted file]
doc/user/topics/arv-run.html.textile.liquid
doc/user/tutorials/tutorial-firstscript.html.textile.liquid
doc/user/tutorials/tutorial-keep.html.textile.liquid
docker/api/Dockerfile
docker/api/arvados-clients.yml.in
docker/api/setup.sh.in
docker/api/ssh.sh [deleted file]
docker/api/supervisor.conf
docker/api/update-gitolite.rb
docker/arvdock
docker/base/Dockerfile
docker/build_tools/Makefile
docker/build_tools/build.rb
docker/build_tools/config.rb
docker/compute/Dockerfile
docker/compute/ssh.sh [deleted file]
docker/compute/supervisor.conf
docker/config.yml.example
docker/doc/apache2_foreground.sh
docker/java-bwa-samtools/Dockerfile
docker/jobs/Dockerfile
docker/passenger/Dockerfile
docker/shell/Dockerfile
docker/shell/setup.sh.in
docker/slurm/supervisor.conf
docker/sso/apache2_foreground.sh
docker/workbench/apache2_foreground.sh
sdk/cli/.gitignore
sdk/cli/Gemfile.lock [deleted file]
sdk/cli/bin/crunch-job
sdk/cli/test/test_arv-collection-create.rb
sdk/cli/test/test_arv-get.rb
sdk/cli/test/test_arv-put.rb
sdk/cli/test/test_arv-run-pipeline-instance.rb
sdk/cli/test/test_arv-tag.rb
sdk/go/keepclient/keepclient.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
sdk/python/.gitignore
sdk/python/arvados/__init__.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/ls.py [new file with mode: 0755]
sdk/python/bin/arv-get
sdk/python/bin/arv-ls
sdk/python/setup.py
sdk/python/tests/test_arv_ls.py [new file with mode: 0644]
sdk/python/tests/test_collections.py
sdk/python/tests/test_sdk.py [new file with mode: 0644]
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/assets/javascripts/api_client_authorizations.js.coffee [deleted file]
services/api/app/assets/javascripts/api_clients.js.coffee [deleted file]
services/api/app/assets/javascripts/application.js [deleted file]
services/api/app/assets/javascripts/authorized_keys.js.coffee [deleted file]
services/api/app/assets/javascripts/collections.js.coffee [deleted file]
services/api/app/assets/javascripts/commit_ancestors.js.coffee [deleted file]
services/api/app/assets/javascripts/commits.js.coffee [deleted file]
services/api/app/assets/javascripts/groups.js.coffee [deleted file]
services/api/app/assets/javascripts/humans.js.coffee [deleted file]
services/api/app/assets/javascripts/job_tasks.js.coffee [deleted file]
services/api/app/assets/javascripts/jobs.js.coffee [deleted file]
services/api/app/assets/javascripts/keep_disks.js.coffee [deleted file]
services/api/app/assets/javascripts/links.js.coffee [deleted file]
services/api/app/assets/javascripts/logs.js.coffee [deleted file]
services/api/app/assets/javascripts/nodes.js [deleted file]
services/api/app/assets/javascripts/nodes.js.coffee [deleted file]
services/api/app/assets/javascripts/pipeline_instances.js.coffee [deleted file]
services/api/app/assets/javascripts/pipeline_templates.js.coffee [deleted file]
services/api/app/assets/javascripts/repositories.js.coffee [deleted file]
services/api/app/assets/javascripts/specimens.js.coffee [deleted file]
services/api/app/assets/javascripts/traits.js.coffee [deleted file]
services/api/app/assets/javascripts/virtual_machines.js.coffee [deleted file]
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/collections_controller.rb
services/api/app/controllers/arvados/v1/keep_disks_controller.rb
services/api/app/controllers/arvados/v1/nodes_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/database_controller.rb [new file with mode: 0644]
services/api/app/controllers/static_controller.rb
services/api/app/controllers/user_sessions_controller.rb
services/api/app/models/arvados_model.rb
services/api/app/models/database_seeds.rb [new file with mode: 0644]
services/api/app/models/job.rb
services/api/app/models/node.rb
services/api/app/models/user.rb
services/api/config/application.default.yml
services/api/config/routes.rb
services/api/db/migrate/20140422011506_pipeline_instance_state.rb
services/api/db/migrate/20141208164553_owner_uuid_index.rb [new file with mode: 0644]
services/api/db/seeds.rb
services/api/db/structure.sql
services/api/lib/current_api_client.rb
services/api/lib/eventbus.rb
services/api/lib/simulate_job_log.rb [new file with mode: 0644]
services/api/lib/tasks/replay_job_log.rake [new file with mode: 0644]
services/api/script/crunch-dispatch.rb
services/api/test/fixtures/api_client_authorizations.yml
services/api/test/fixtures/collections.yml
services/api/test/fixtures/groups.yml
services/api/test/fixtures/users.yml
services/api/test/functional/application_controller_test.rb
services/api/test/functional/arvados/v1/collections_controller_test.rb
services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
services/api/test/functional/arvados/v1/keep_disks_controller_test.rb
services/api/test/functional/arvados/v1/links_controller_test.rb
services/api/test/functional/arvados/v1/logs_controller_test.rb
services/api/test/functional/arvados/v1/nodes_controller_test.rb
services/api/test/functional/database_controller_test.rb [new file with mode: 0644]
services/api/test/integration/cross_origin_test.rb [new file with mode: 0644]
services/api/test/integration/database_reset_test.rb [new file with mode: 0644]
services/api/test/integration/errors_test.rb
services/api/test/job_logs/crunchstatshort.log [new file with mode: 0644]
services/api/test/test_helper.rb
services/api/test/unit/arvados_model_test.rb
services/api/test/unit/job_test.rb
services/fuse/arvados_fuse/__init__.py
services/fuse/setup.py
services/fuse/tests/test_mount.py
services/keepproxy/keepproxy.go
services/keepproxy/keepproxy_test.go
services/keepstore/handler_test.go
services/keepstore/handlers.go
services/keepstore/logging_router.go
services/nodemanager/.gitignore [changed from file to symlink]
services/nodemanager/arvnodeman/clientactor.py
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
services/nodemanager/arvnodeman/computenode/driver/__init__.py
services/nodemanager/arvnodeman/computenode/driver/ec2.py
services/nodemanager/arvnodeman/config.py
services/nodemanager/arvnodeman/daemon.py
services/nodemanager/arvnodeman/jobqueue.py
services/nodemanager/arvnodeman/launcher.py
services/nodemanager/arvnodeman/nodelist.py
services/nodemanager/doc/ec2.example.cfg
services/nodemanager/setup.py
services/nodemanager/tests/test_computenode_dispatch.py
services/nodemanager/tests/test_computenode_dispatch_slurm.py
services/nodemanager/tests/test_computenode_driver_ec2.py
services/nodemanager/tests/test_daemon.py
services/nodemanager/tests/test_jobqueue.py
services/nodemanager/tests/testutil.py

index 5ab6eace2c8de0777e23daa62cc6de1d0863ce94..49f82f53151b86681be5f7f05f615a0f099c1ee0 100644 (file)
@@ -24,7 +24,7 @@ gem 'coffee-rails'
 # Gems used only for assets and not required
 # in production environments by default.
 group :assets do
-  gem 'sass-rails'
+  gem 'sass-rails', '~> 4.0.4'
 
   # See https://github.com/sstephenson/execjs#readme for more supported runtimes
   gem 'therubyracer', :platforms => :ruby
@@ -37,12 +37,20 @@ group :development do
   gem 'ruby-debug-passenger'
 end
 
-group :test do
-  gem 'rvm-capistrano'
+group :test, :diagnostics do
   gem 'selenium-webdriver'
   gem 'capybara'
   gem 'poltergeist'
   gem 'headless'
+end
+
+group :test, :performance do
+  gem 'rails-perftest'
+  gem 'ruby-prof'
+end
+
+group :test do
+  gem 'rvm-capistrano'
   # Note: "require: false" here tells bunder not to automatically
   # 'require' the packages during application startup. Installation is
   # still mandatory.
@@ -56,6 +64,8 @@ gem 'bootstrap-sass', '~> 3.1.0'
 gem 'bootstrap-x-editable-rails'
 gem 'bootstrap-tab-history-rails'
 
+gem 'angularjs-rails'
+
 gem 'less'
 gem 'less-rails'
 gem 'wiselinks'
@@ -87,3 +97,6 @@ gem 'httpclient', '~> 2.5.0'
 gem 'themes_for_rails', git: 'https://github.com/holtkampw/themes_for_rails', ref: '1fd2d7897d75ae0d6375f4c390df87b8e91ad417'
 
 gem "deep_merge", :require => 'deep_merge/rails_compat'
+
+gem 'morrisjs-rails'
+gem 'raphael-rails'
index 8b9ea947bb0ba9498ab347b673e887e664132414..d29c16ea4973020ffbb32d60ad6d7d54261116c2 100644 (file)
@@ -10,27 +10,27 @@ GEM
   remote: https://rubygems.org/
   specs:
     RedCloth (4.2.9)
-    actionmailer (4.1.1)
-      actionpack (= 4.1.1)
-      actionview (= 4.1.1)
-      mail (~> 2.5.4)
-    actionpack (4.1.1)
-      actionview (= 4.1.1)
-      activesupport (= 4.1.1)
+    actionmailer (4.1.8)
+      actionpack (= 4.1.8)
+      actionview (= 4.1.8)
+      mail (~> 2.5, >= 2.5.4)
+    actionpack (4.1.8)
+      actionview (= 4.1.8)
+      activesupport (= 4.1.8)
       rack (~> 1.5.2)
       rack-test (~> 0.6.2)
-    actionview (4.1.1)
-      activesupport (= 4.1.1)
+    actionview (4.1.8)
+      activesupport (= 4.1.8)
       builder (~> 3.1)
       erubis (~> 2.7.0)
-    activemodel (4.1.1)
-      activesupport (= 4.1.1)
+    activemodel (4.1.8)
+      activesupport (= 4.1.8)
       builder (~> 3.1)
-    activerecord (4.1.1)
-      activemodel (= 4.1.1)
-      activesupport (= 4.1.1)
+    activerecord (4.1.8)
+      activemodel (= 4.1.8)
+      activesupport (= 4.1.8)
       arel (~> 5.0.0)
-    activesupport (4.1.1)
+    activesupport (4.1.8)
       i18n (~> 0.6, >= 0.6.9)
       json (~> 1.7, >= 1.7.7)
       minitest (~> 5.1)
@@ -38,6 +38,7 @@ GEM
       tzinfo (~> 1.1)
     addressable (2.3.6)
     andand (1.3.3)
+    angularjs-rails (1.3.3)
     arel (5.0.1.20140414130214)
     arvados (0.1.20141114230720)
       activesupport (>= 3.2.13)
@@ -49,7 +50,7 @@ GEM
       addressable (>= 2.3.1)
       extlib (>= 0.9.15)
       multi_json (>= 1.0.0)
-    bootstrap-sass (3.1.0.1)
+    bootstrap-sass (3.1.1.1)
       sass (~> 3.2)
     bootstrap-tab-history-rails (0.1.0)
       railties (>= 3.1)
@@ -72,27 +73,27 @@ GEM
       rack (>= 1.0.0)
       rack-test (>= 0.5.4)
       xpath (~> 2.0)
-    childprocess (0.5.1)
+    childprocess (0.5.5)
       ffi (~> 1.0, >= 1.0.11)
     cliver (0.3.2)
-    coffee-rails (4.0.1)
+    coffee-rails (4.1.0)
       coffee-script (>= 2.2.0)
       railties (>= 4.0.0, < 5.0)
-    coffee-script (2.2.0)
+    coffee-script (2.3.0)
       coffee-script-source
       execjs
-    coffee-script-source (1.7.0)
+    coffee-script-source (1.8.0)
     columnize (0.8.9)
     commonjs (0.2.7)
-    daemon_controller (1.1.7)
+    daemon_controller (1.2.0)
     debugger-linecache (1.2.0)
     deep_merge (1.0.1)
     erubis (2.7.0)
-    execjs (2.0.2)
+    execjs (2.2.2)
     extlib (0.9.16)
     faraday (0.8.9)
       multipart-post (~> 1.2.0)
-    ffi (1.9.3)
+    ffi (1.9.6)
     google-api-client (0.6.4)
       addressable (>= 2.3.2)
       autoparse (>= 0.3.3)
@@ -103,12 +104,12 @@ GEM
       multi_json (>= 1.0.0)
       signet (~> 0.4.5)
       uuidtools (>= 2.1.0)
-    headless (1.0.1)
-    highline (1.6.20)
+    headless (1.0.2)
+    highline (1.6.21)
     hike (1.2.3)
-    httpclient (2.5.0)
-    i18n (0.6.9)
-    jquery-rails (3.0.4)
+    httpclient (2.5.3.3)
+    i18n (0.6.11)
+    jquery-rails (3.1.2)
       railties (>= 3.0, < 5.0)
       thor (>= 0.14, < 2.0)
     json (1.8.1)
@@ -116,82 +117,85 @@ GEM
       multi_json (>= 1.5)
     launchy (2.4.3)
       addressable (~> 2.3)
-    less (2.4.0)
+    less (2.6.0)
       commonjs (~> 0.2.7)
-    less-rails (2.4.2)
+    less-rails (2.6.0)
       actionpack (>= 3.1)
-      less (~> 2.4.0)
-    libv8 (3.16.14.3)
-    mail (2.5.4)
-      mime-types (~> 1.16)
-      treetop (~> 1.4.8)
+      less (~> 2.6.0)
+    libv8 (3.16.14.7)
+    mail (2.6.3)
+      mime-types (>= 1.16, < 3)
     metaclass (0.0.4)
-    mime-types (1.25.1)
-    mini_portile (0.6.0)
-    minitest (5.3.3)
+    mime-types (2.4.3)
+    mini_portile (0.6.1)
+    minitest (5.4.3)
     mocha (1.1.0)
       metaclass (~> 0.0.1)
-    multi_json (1.10.0)
+    morrisjs-rails (0.5.1)
+      railties (> 3.1, < 5)
+    multi_json (1.10.1)
     multipart-post (1.2.0)
-    net-scp (1.1.2)
+    net-scp (1.2.1)
       net-ssh (>= 2.6.5)
     net-sftp (2.1.2)
       net-ssh (>= 2.6.5)
-    net-ssh (2.7.0)
+    net-ssh (2.9.1)
     net-ssh-gateway (1.2.0)
       net-ssh (>= 2.6.5)
-    nokogiri (1.6.3.1)
-      mini_portile (= 0.6.0)
-    oj (2.1.7)
-    passenger (4.0.23)
-      daemon_controller (>= 1.1.0)
+    nokogiri (1.6.4.1)
+      mini_portile (~> 0.6.0)
+    oj (2.11.1)
+    passenger (4.0.53)
+      daemon_controller (>= 1.2.0)
       rack
       rake (>= 0.8.1)
     piwik_analytics (1.0.2)
       actionpack
       activesupport
       rails (>= 3.0.0)
-    poltergeist (1.5.0)
+    poltergeist (1.5.1)
       capybara (~> 2.1)
       cliver (~> 0.3.1)
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
-    polyglot (0.3.4)
     rack (1.5.2)
     rack-test (0.6.2)
       rack (>= 1.0)
-    rails (4.1.1)
-      actionmailer (= 4.1.1)
-      actionpack (= 4.1.1)
-      actionview (= 4.1.1)
-      activemodel (= 4.1.1)
-      activerecord (= 4.1.1)
-      activesupport (= 4.1.1)
+    rails (4.1.8)
+      actionmailer (= 4.1.8)
+      actionpack (= 4.1.8)
+      actionview (= 4.1.8)
+      activemodel (= 4.1.8)
+      activerecord (= 4.1.8)
+      activesupport (= 4.1.8)
       bundler (>= 1.3.0, < 2.0)
-      railties (= 4.1.1)
+      railties (= 4.1.8)
       sprockets-rails (~> 2.0)
-    railties (4.1.1)
-      actionpack (= 4.1.1)
-      activesupport (= 4.1.1)
+    rails-perftest (0.0.5)
+    railties (4.1.8)
+      actionpack (= 4.1.8)
+      activesupport (= 4.1.8)
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (10.3.1)
+    rake (10.4.0)
+    raphael-rails (2.1.2)
     ref (1.0.5)
     ruby-debug-passenger (0.2.0)
-    rubyzip (1.1.0)
-    rvm-capistrano (1.5.1)
+    ruby-prof (0.15.2)
+    rubyzip (1.1.6)
+    rvm-capistrano (1.5.5)
       capistrano (~> 2.15.4)
-    sass (3.2.12)
-    sass-rails (4.0.3)
+    sass (3.2.19)
+    sass-rails (4.0.4)
       railties (>= 4.0.0, < 5.0)
-      sass (~> 3.2.0)
-      sprockets (~> 2.8, <= 2.11.0)
+      sass (~> 3.2.2)
+      sprockets (~> 2.8, < 2.12)
       sprockets-rails (~> 2.0)
-    selenium-webdriver (2.40.0)
-      childprocess (>= 0.5.0)
+    selenium-webdriver (2.44.0)
+      childprocess (~> 0.5)
       multi_json (~> 1.0)
       rubyzip (~> 1.0)
-      websocket (~> 1.0.4)
+      websocket (~> 1.0)
     signet (0.4.5)
       addressable (>= 2.2.3)
       faraday (~> 0.8.1)
@@ -204,34 +208,31 @@ GEM
     simplecov-rcov (0.2.3)
       simplecov (>= 0.4.1)
     slop (3.6.0)
-    sprockets (2.11.0)
+    sprockets (2.11.3)
       hike (~> 1.2)
       multi_json (~> 1.0)
       rack (~> 1.0)
       tilt (~> 1.1, != 1.3.0)
-    sprockets-rails (2.1.3)
+    sprockets-rails (2.2.0)
       actionpack (>= 3.0)
       activesupport (>= 3.0)
-      sprockets (~> 2.8)
-    sqlite3 (1.3.8)
+      sprockets (>= 2.8, < 4.0)
+    sqlite3 (1.3.10)
     sshkey (1.6.1)
-    therubyracer (0.12.0)
+    therubyracer (0.12.1)
       libv8 (~> 3.16.14.0)
       ref
     thor (0.19.1)
-    thread_safe (0.3.3)
+    thread_safe (0.3.4)
     tilt (1.4.1)
-    treetop (1.4.15)
-      polyglot
-      polyglot (>= 0.3.1)
-    tzinfo (1.1.0)
+    tzinfo (1.2.2)
       thread_safe (~> 0.1)
-    uglifier (2.3.1)
+    uglifier (2.5.3)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
     uuidtools (2.1.5)
-    websocket (1.0.7)
-    websocket-driver (0.3.2)
+    websocket (1.2.1)
+    websocket-driver (0.4.0)
     wiselinks (1.2.1)
     xpath (2.0.0)
       nokogiri (~> 1.3)
@@ -242,6 +243,7 @@ PLATFORMS
 DEPENDENCIES
   RedCloth
   andand
+  angularjs-rails
   arvados (>= 0.1.20141114230720)
   bootstrap-sass (~> 3.1.0)
   bootstrap-tab-history-rails
@@ -257,16 +259,20 @@ DEPENDENCIES
   less-rails
   minitest (>= 5.0.0)
   mocha
+  morrisjs-rails
   multi_json
   oj
   passenger
   piwik_analytics
   poltergeist
   rails (~> 4.1.0)
+  rails-perftest
+  raphael-rails
   ruby-debug-passenger
+  ruby-prof
   rvm-capistrano
   sass
-  sass-rails
+  sass-rails (~> 4.0.4)
   selenium-webdriver
   simplecov (~> 0.7.1)
   simplecov-rcov
diff --git a/apps/workbench/app/assets/javascripts/angular_shim.js b/apps/workbench/app/assets/javascripts/angular_shim.js
new file mode 100644 (file)
index 0000000..a5366e3
--- /dev/null
@@ -0,0 +1,12 @@
+// Compile any new HTML content that was loaded via jQuery.ajax().
+// Currently this only works for tabs because they emit an
+// arv:pane:loaded event after updating the DOM.
+
+$(document).on('arv:pane:loaded', function(event, $updatedElement) {
+    if ($updatedElement) {
+        angular.element($updatedElement).injector().invoke(function($compile) {
+            var scope = angular.element($updatedElement).scope();
+            $compile($updatedElement)(scope);
+        });
+    }
+});
index 1990b8b0f55c8d2497db67cae6d6da21f714663b..63887b3ab970f3d75f4035169790e05752ea15a8 100644 (file)
 //= require bootstrap3-editable/bootstrap-editable
 //= require bootstrap-tab-history
 //= require wiselinks
+//= require angular
+//= require raphael
+//= require morris
+//= require jquery.number.min
 //= require_tree .
 
 jQuery(function($){
-    $.ajaxSetup({
-        headers: {
-            'X-CSRF-Token': $('meta[name="csrf-token"]').attr('content')
-        }
-    });
-
     $(document).ajaxStart(function(){
       $('.modal-with-loading-spinner .spinner').show();
     }).ajaxStop(function(){
diff --git a/apps/workbench/app/assets/javascripts/arvados_client.js b/apps/workbench/app/assets/javascripts/arvados_client.js
new file mode 100644 (file)
index 0000000..63f1de1
--- /dev/null
@@ -0,0 +1,101 @@
+angular.
+    module('Arvados', []).
+    service('ArvadosClient', ArvadosClient);
+
+ArvadosClient.$inject = ['arvadosApiToken', 'arvadosDiscoveryUri']
+function ArvadosClient(arvadosApiToken, arvadosDiscoveryUri) {
+    $.extend(this, {
+        apiPromise: apiPromise,
+        uniqueNameForManifest: uniqueNameForManifest
+    });
+    return this;
+    ////////////////////////////////
+
+    var that = this;
+    var promiseDiscovery;
+    var discoveryDoc;
+
+    function apiPromise(controller, action, params) {
+        // Start an API call. Return a promise that will resolve with
+        // the API response.
+        return getDiscoveryDoc().then(function() {
+            var meth = discoveryDoc.resources[controller].methods[action];
+            var data = $.extend({}, params, {_method: meth.httpMethod});
+            $.each(data, function(k, v) {
+                if (typeof(v) === 'object') {
+                    data[k] = JSON.stringify(v);
+                }
+            });
+            var path = meth.path.replace(/{(.*?)}/, function(_, key) {
+                var val = data[key];
+                delete data[key];
+                return encodeURIComponent(val);
+            });
+            return $.ajax({
+                url: discoveryDoc.baseUrl + path,
+                type: 'POST',
+                crossDomain: true,
+                dataType: 'json',
+                data: data,
+                headers: {
+                    Authorization: 'OAuth2 ' + arvadosApiToken
+                }
+            });
+        });
+    }
+
+    function uniqueNameForManifest(manifest, newStreamName, origName) {
+        // Return an (escaped) filename starting with (unescaped)
+        // origName that won't conflict with any existing names in the
+        // manifest if saved under newStreamName. newStreamName must
+        // be exactly as given in the manifest, e.g., "." or "./foo"
+        // or "./foo/bar".
+        //
+        // Example:
+        //
+        // uniqueNameForManifest('./foo [...] 0:0:bar\\040baz.txt\n', '.',
+        //                       'foo/bar baz.txt')
+        // =>
+        // 'foo/bar\\040baz\\040(1).txt'
+        var newName;
+        var nameStub = origName;
+        var suffixInt = null;
+        var ok = false;
+        var lineMatch, linesRe = /(\S+).*/gm;
+        var fileTokenMatch, fileTokensRe = / \d+:\d+:(\S+)/g;
+        while (!ok) {
+            ok = true;
+            // Add ' (N)' before the filename extension, if any.
+            newName = (!suffixInt ? nameStub :
+                       nameStub.replace(/(\.[^.]*)?$/, ' ('+suffixInt+')$1')).
+                replace(/ /g, '\\040');
+            while (ok && null !==
+                   (lineMatch = linesRe.exec(manifest))) {
+                // lineMatch is [theEntireLine, streamName]
+                while (ok && null !==
+                       (fileTokenMatch = fileTokensRe.exec(lineMatch[0]))) {
+                    // fileTokenMatch is [theEntireToken, fileName]
+                    if (lineMatch[1] + '/' + fileTokenMatch[1]
+                        ===
+                        newStreamName + '/' + newName) {
+                        ok = false;
+                    }
+                }
+            }
+            suffixInt = (suffixInt || 0) + 1;
+        }
+        return newName;
+    }
+
+    function getDiscoveryDoc() {
+        if (!promiseDiscovery) {
+            promiseDiscovery = $.ajax({
+                url: arvadosDiscoveryUri,
+                crossDomain: true
+            }).then(function(data, status, xhr) {
+                discoveryDoc = data;
+            });
+        }
+        return promiseDiscovery;
+    }
+}
index 36361a17d12e3f295910b87be2ff85a6e6077110..29ea74c417cb904f5b5da1cab364c1f1000f2018 100644 (file)
@@ -56,3 +56,315 @@ $(document).on('ajax:complete ready', function() {
         subscribeToEventLog();
     }
 });
+
+/* Assumes existence of:
+  window.jobGraphData = [];
+  window.jobGraphSeries = [];
+  window.jobGraphSortedSeries = [];
+  window.jobGraphMaxima = {};
+ */
+function processLogLineForChart( logLine ) {
+    try {
+        var match = logLine.match(/^(\S+) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+        if( !match ) {
+            match = logLine.match(/^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+            if( match ) {
+                match[1] = (new Date(match[1] + ' UTC')).toISOString().replace('Z','');
+            }
+        }
+        if( match ) {
+            var rawDetailData = '';
+            var datum = null;
+
+            // the timestamp comes first
+            var timestamp = match[1].replace('_','T') + 'Z';
+
+            // we are interested in "-- interval" recordings
+            var intervalMatch = match[6].match(/(.*) -- interval (.*)/);
+            if( intervalMatch ) {
+                var intervalData = intervalMatch[2].trim().split(' ');
+                var dt = parseFloat(intervalData[0]);
+                var dsum = 0.0;
+                for(var i=2; i < intervalData.length; i += 2 ) {
+                    dsum += parseFloat(intervalData[i]);
+                }
+                datum = dsum/dt;
+
+                if( datum < 0 ) {
+                    // not interested in negative deltas
+                    return;
+                }
+
+                rawDetailData = intervalMatch[2];
+
+                // for the series name use the task number (4th term) and then the first word after 'crunchstat:'
+                var series = 'T' + match[4] + '-' + match[5];
+
+                // special calculation for cpus
+                if( /-cpu$/.test(series) ) {
+                    // divide the stat by the number of cpus unless the time count is less than the interval length
+                    if( dsum.toFixed(1) > dt.toFixed(1) ) {
+                        var cpuCountMatch = intervalMatch[1].match(/(\d+) cpus/);
+                        if( cpuCountMatch ) {
+                            datum = datum / cpuCountMatch[1];
+                        }
+                    }
+                }
+
+                addJobGraphDatum( timestamp, datum, series, rawDetailData );
+            } else {
+                // we are also interested in memory ("mem") recordings
+                var memoryMatch = match[6].match(/(\d+) cache (\d+) swap (\d+) pgmajfault (\d+) rss/);
+                if( memoryMatch ) {
+                    rawDetailData = match[6];
+                    // one datapoint for rss and one for swap - only show the rawDetailData for rss
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[4]), 'T' + match[4] + "-rss", rawDetailData );
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[2]), 'T' + match[4] + "-swap", '' );
+                } else {
+                    // not interested
+                    return;
+                }
+            }
+
+            window.redraw = true;
+        }
+    } catch( err ) {
+        console.log( 'Ignoring error trying to process log line: ' + err);
+    }
+}
+
+function addJobGraphDatum(timestamp, datum, series, rawDetailData) {
+    // check for new series
+    if( $.inArray( series, jobGraphSeries ) < 0 ) {
+        var newIndex = jobGraphSeries.push(series) - 1;
+        jobGraphSortedSeries.push(newIndex);
+        jobGraphSortedSeries.sort( function(a,b) {
+            var matchA = jobGraphSeries[a].match(/^T(\d+)-(.*)/);
+            var matchB = jobGraphSeries[b].match(/^T(\d+)-(.*)/);
+            var termA = ('000000' + matchA[1]).slice(-6) + matchA[2];
+            var termB = ('000000' + matchB[1]).slice(-6) + matchB[2];
+            return termA > termB ? 1 : -1;
+        });
+        jobGraphMaxima[series] = null;
+        window.recreate = true;
+    }
+
+    if( datum !== 0 && ( jobGraphMaxima[series] === null || jobGraphMaxima[series] < datum ) ) {
+        if( isJobSeriesRescalable(series) ) {
+            // use old maximum to get a scale conversion
+            var scaleConversion = jobGraphMaxima[series]/datum;
+            // set new maximum and rescale the series
+            jobGraphMaxima[series] = datum;
+            rescaleJobGraphSeries( series, scaleConversion );
+        }
+    }
+
+    // scale
+    var scaledDatum = null;
+    if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null && jobGraphMaxima[series] !== 0 ) {
+        scaledDatum = datum/jobGraphMaxima[series]
+    } else {
+        scaledDatum = datum;
+    }
+    // identify x axis point, searching from the end of the array (most recent)
+    var found = false;
+    for( var i = jobGraphData.length - 1; i >= 0; i-- ) {
+        if( jobGraphData[i]['t'] === timestamp ) {
+            found = true;
+            jobGraphData[i][series] = scaledDatum;
+            jobGraphData[i]['raw-'+series] = rawDetailData;
+            break;
+        } else if( jobGraphData[i]['t'] < timestamp  ) {
+            // we've gone far enough back in time and this data is supposed to be sorted
+            break;
+        }
+    }
+    // index counter from previous loop will have gone one too far, so add one
+    var insertAt = i+1;
+    if(!found) {
+        // create a new x point for this previously unrecorded timestamp
+        var entry = { 't': timestamp };
+        entry[series] = scaledDatum;
+        entry['raw-'+series] = rawDetailData;
+        jobGraphData.splice( insertAt, 0, entry );
+        var shifted = [];
+        // now let's see about "scrolling" the graph, dropping entries that are too old (>10 minutes)
+        while( jobGraphData.length > 0
+                 && (Date.parse( jobGraphData[0]['t'] ) + 10*60000 < Date.parse( jobGraphData[jobGraphData.length-1]['t'] )) ) {
+            shifted.push(jobGraphData.shift());
+        }
+        if( shifted.length > 0 ) {
+            // from those that we dropped, were any of them maxima? if so we need to rescale
+            jobGraphSeries.forEach( function(series) {
+                // test that every shifted entry in this series was either not a number (in which case we don't care)
+                // or else approximately (to 2 decimal places) smaller than the scaled maximum (i.e. 1),
+                // because otherwise we just scrolled off something that was a maximum point
+                // and so we need to recalculate a new maximum point by looking at all remaining displayed points in the series
+                if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null
+                      && !shifted.every( function(e) { return( !$.isNumeric(e[series]) || e[series].toFixed(2) < 1.0 ) } ) ) {
+                    // check the remaining displayed points and find the new (scaled) maximum
+                    var seriesMax = null;
+                    jobGraphData.forEach( function(entry) {
+                        if( $.isNumeric(entry[series]) && (seriesMax === null || entry[series] > seriesMax)) {
+                            seriesMax = entry[series];
+                        }
+                    });
+                    if( seriesMax !== null && seriesMax !== 0 ) {
+                        // set new actual maximum using the new maximum as the conversion conversion and rescale the series
+                        jobGraphMaxima[series] *= seriesMax;
+                        var scaleConversion = 1/seriesMax;
+                        rescaleJobGraphSeries( series, scaleConversion );
+                    }
+                    else {
+                        // we no longer have any data points displaying for this series
+                        jobGraphMaxima[series] = null;
+                    }
+                }
+            });
+        }
+        // add a 10 minute old null data point to keep the chart honest if the oldest point is less than 9.9 minutes old
+        if( jobGraphData.length > 0 ) {
+            var earliestTimestamp = jobGraphData[0]['t'];
+            var mostRecentTimestamp = jobGraphData[jobGraphData.length-1]['t'];
+            if( (Date.parse( earliestTimestamp ) + 9.9*60000 > Date.parse( mostRecentTimestamp )) ) {
+                var tenMinutesBefore = (new Date(Date.parse( mostRecentTimestamp ) - 600*1000)).toISOString();
+                jobGraphData.unshift( { 't': tenMinutesBefore } );
+            }
+        }
+    }
+
+}
+
+function createJobGraph(elementName) {
+    delete jobGraph;
+    var emptyGraph = false;
+    if( jobGraphData.length === 0 ) {
+        // If there is no data we still want to show an empty graph,
+        // so add an empty datum and placeholder series to fool it into displaying itself.
+        // Note that when finally a new series is added, the graph will be recreated anyway.
+        jobGraphData.push( {} );
+        jobGraphSeries.push( '' );
+        emptyGraph = true;
+    }
+    var graphteristics = {
+        element: elementName,
+        data: jobGraphData,
+        ymax: 1.0,
+        yLabelFormat: function () { return ''; },
+        xkey: 't',
+        ykeys: jobGraphSeries,
+        labels: jobGraphSeries,
+        resize: true,
+        hideHover: 'auto',
+        parseTime: true,
+        hoverCallback: function(index, options, content) {
+            var s = "<div class='morris-hover-row-label'>";
+            s += options.data[index][options.xkey];
+            s += "</div> ";
+            for( i = 0; i < jobGraphSortedSeries.length; i++ ) {
+                var sortedIndex = jobGraphSortedSeries[i];
+                var series = options.ykeys[sortedIndex];
+                var datum = options.data[index][series];
+                var point = ''
+                point += "<div class='morris-hover-point' style='color: ";
+                point += options.lineColors[sortedIndex % options.lineColors.length];
+                point += "'>";
+                var labelMatch = options.labels[sortedIndex].match(/^T(\d+)-(.*)/);
+                point += 'Task ' + labelMatch[1] + ' ' + labelMatch[2];
+                point += ": ";
+                if ( datum !== undefined ) {
+                    if( isJobSeriesRescalable( series ) ) {
+                        datum *= jobGraphMaxima[series];
+                    }
+                    if( parseFloat(datum) !== 0 ) {
+                        if( /-cpu$/.test(series) ){
+                            datum = $.number(datum * 100, 1) + '%';
+                        } else if( datum < 10 ) {
+                            datum = $.number(datum, 2);
+                        } else {
+                            datum = $.number(datum);
+                        }
+                        if(options.data[index]['raw-'+series]) {
+                            datum += ' (' + options.data[index]['raw-'+series] + ')';
+                        }
+                    }
+                    point += datum;
+                } else {
+                    continue;
+                }
+                point += "</div> ";
+                s += point;
+            }
+            return s;
+        }
+    }
+    if( emptyGraph ) {
+        graphteristics['axes'] = false;
+        graphteristics['parseTime'] = false;
+        graphteristics['hideHover'] = 'always';
+    }
+    window.jobGraph = Morris.Line( graphteristics );
+    if( emptyGraph ) {
+        jobGraphData = [];
+        jobGraphSeries = [];
+    }
+}
+
+function rescaleJobGraphSeries( series, scaleConversion ) {
+    if( isJobSeriesRescalable() ) {
+        $.each( jobGraphData, function( i, entry ) {
+            if( entry[series] !== null && entry[series] !== undefined ) {
+                entry[series] *= scaleConversion;
+            }
+        });
+    }
+}
+
+// that's right - we never do this for the 'cpu' series, which will always be between 0 and 1 anyway
+function isJobSeriesRescalable( series ) {
+    return !/-cpu$/.test(series);
+}
+
+$(document).on('arv-log-event', '#log_graph_div', function(event, eventData) {
+    if( eventData.properties.text ) {
+        eventData.properties.text.split('\n').forEach( function( logLine ) {
+            processLogLineForChart( logLine );
+        } );
+    }
+} );
+
+$(document).on('ready ajax:complete', function() {
+    $('#log_graph_div').not('.graph-is-setup').addClass('graph-is-setup').each( function( index, graph_div ) {
+        window.jobGraphData = [];
+        window.jobGraphSeries = [];
+        window.jobGraphSortedSeries = [];
+        window.jobGraphMaxima = {};
+        window.recreate = false;
+        window.redraw = false;
+
+        createJobGraph($(graph_div).attr('id'));
+        var object_uuid = $(graph_div).data('object-uuid');
+        // if there are any listeners for this object uuid or "all", we will trigger the event
+        var matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"]";
+
+        $(document).trigger('ajax:send');
+        $.get('/jobs/' + $(graph_div).data('object-uuid') + '/logs.json', function(data) {
+            data.forEach( function( entry ) {
+                $(matches).trigger('arv-log-event', entry);
+            });
+        });
+
+        setInterval( function() {
+            if( recreate ) {
+                window.recreate = false;
+                window.redraw = false;
+                // series have changed, draw entirely new graph
+                $(graph_div).html('');
+                createJobGraph($(graph_div).attr('id'));
+            } else if( redraw ) {
+                window.redraw = false;
+                jobGraph.setData( jobGraphData );
+            }
+        }, 5000);
+    });
+});
index bd87b28028ac2586b90bdc375e4a7563184fd145..e820ba978ec3dddf07dcec83f08fb9573d6a7727 100644 (file)
@@ -98,7 +98,11 @@ $(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(e
     var wasatbottom = ($(this).scrollTop() + $(this).height() >= this.scrollHeight);
 
     if (eventData.event_type == "stderr" || eventData.event_type == "stdout") {
-        $(this).append(eventData.properties.text);
+        if( eventData.prepend ) {
+            $(this).prepend(eventData.properties.text);
+        } else {
+            $(this).append(eventData.properties.text);
+        }
     }
 
     if (wasatbottom) {
@@ -106,23 +110,20 @@ $(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(e
     }
 });
 
-var showhide_compare = function() {
-    var form = $('form#compare')[0];
-    $('input[type=hidden][name="uuids[]"]', form).remove();
-    $('input[type=submit]', form).prop('disabled',true).show();
-    var checked_inputs = $('[data-object-uuid*=-d1hrv-] input[name="uuids[]"]:checked');
-    if (checked_inputs.length >= 2 && checked_inputs.length <= 3) {
-        checked_inputs.each(function(){
-            if(this.checked) {
-                $('input[type=submit]', form).prop('disabled',false).show();
-                $(form).append($('<input type="hidden" name="uuids[]"/>').val(this.value));
-            }
-        });
-    }
-};
-$(document).on('change', '[data-object-uuid*=-d1hrv-] input[name="uuids[]"]', function(e) {
-    if(e.target == this) {
-        showhide_compare();
-    }
-});
-$(document).on('ready ajax:success', showhide_compare);
+// Set up all events for the pipeline instances compare button.
+(function() {
+    var compare_form = '#compare';
+    var compare_inputs = '#comparedInstances :checkbox[name="uuids[]"]';
+    var update_button = function(event) {
+        var $form = $(compare_form);
+        var $checked_inputs = $(compare_inputs).filter(':checked');
+        $(':submit', $form).prop('disabled', (($checked_inputs.length < 2) ||
+                                              ($checked_inputs.length > 3)));
+        $('input[name="uuids[]"]', $form).remove();
+        $form.append($checked_inputs.clone()
+                     .removeAttr('id').attr('type', 'hidden'));
+    };
+    $(document)
+        .on('ready ajax:success', compare_form, update_button)
+        .on('change', compare_inputs, update_button);
+})();
index e769d809570743eff729ca15e9bbcc80178c987e..f3c323c63ba72bd05dde4e40a87cdcd46e936511 100644 (file)
@@ -1,7 +1,7 @@
 $(document).
   on('click', "#report-issue-submit", function(e){
     $(this).html('Sending');
-    $(this).attr('disabled', true);
+    $(this).prop('disabled', true);
     var $cancelButton = $('#report-issue-cancel');
     if ($cancelButton) {
       $cancelButton.html('Close');
@@ -21,7 +21,7 @@ $(document).
           $('div').remove('.modal-footer-status');
           $('.modal-footer').append('<div><br/></div><div class="modal-footer-status alert alert-danger"><p class="contain-align-left">We are sorry. We could not submit your report! We really want this to work, though -- please try again.</p></div>');
           $sendButton.html('Send problem report');
-          $sendButton.attr('disabled',false);
+          $sendButton.prop('disabled', false);
         }
         var $cancelButton = $('#report-issue-cancel');
         $cancelButton.html('Cancel');
index 40724bb566b4f8903687edfc9e2e69bf331d4ff4..0068b738ec903067c3352e9ae92d5a6ed82c2b19 100644 (file)
 //= require jquery
 //= require jquery_ujs
 
-/** Javascript for local persistent selection. */
-
-get_selection_list = null;
-form_selection_sources = {};
+/** Javascript for selection. */
 
 jQuery(function($){
-    var storage = localStorage; // sessionStorage
-
-    get_selection_list = function() {
-        if (!storage.persistentSelection) {
-            storage.persistentSelection = JSON.stringify([]);
-        }
-        return JSON.parse(storage.persistentSelection);
-    }
-
-    var put_storage = function(lst) {
-        storage.persistentSelection = JSON.stringify(lst);
-    }
-
-    var add_selection = function(uuid, name, href, type) {
-        var lst = get_selection_list();
-        lst.push({"uuid": uuid, "name": name, "href": href, "type": type});
-        put_storage(lst);
-        update_count();
-    };
-
-    var remove_selection = function(uuid) {
-        var lst = get_selection_list();
-        for (var i = 0; i < lst.length; i++) {
-            if (lst[i].uuid == uuid) {
-                lst.splice(i, 1);
-                i--;
-            }
-        }
-        put_storage(lst);
-        update_count();
-    };
-
-    var remove_selection_click = function(e) {
-        remove_selection($(this).val());
-    };
-
-    var clear_selections = function() {
-        put_storage([]);
-        update_count();
-    }
-
-    var update_count = function(e) {
-        var html;
-        var this_object_uuid = $('#selection-form-content').
-            closest('form').
-            find('input[name=uuid]').val();
-        var lst = get_selection_list();
-        $("#persistent-selection-count").text(lst.length);
-        if (lst.length > 0) {
-            html = '<li><a href="#" class="btn btn-xs btn-info" id="clear_selections_button"><i class="fa fa-fw fa-ban"></i> Clear selections</a></li>';
-            $('#selection-form-content').html(html);
-
-            for (var i = 0; i < lst.length; i++) {
-                $('#selection-form-content > li > table').append("<tr>"
-                                                       + "<td>"
-                                                       + "<input class='remove-selection' name='selection[]' type='checkbox' value='" + lst[i].uuid + "' checked='true' data-stoppropagation='true' />"
-                                                       + "</td>"
-
-                                                       + "<td>"
-                                                       + "<div style='padding-left: 1em'><a href=\"" + lst[i].href + "\">" + lst[i].name + "</a></div>"
-                                                       + "</td>"
-
-                                                       + "<td style=\"vertical-align: top\">"
-                                                       + "<span style=\"padding-right: 1em\">" + lst[i].type + "</span>"
-                                                       + "</td>"
-
-                                                       + "</tr>");
-            }
-        } else {
-            $('#selection-form-content').html("<li class='notification empty'>No selections.</li>");
-        }
-
-        var checkboxes = $('.persistent-selection:checkbox');
-        for (i = 0; i < checkboxes.length; i++) {
-            for (var j = 0; j < lst.length; j++) {
-                if (lst[j].uuid == $(checkboxes[i]).val()) {
-                    checkboxes[i].checked = true;
-                    break;
-                }
-            }
-            if (j == lst.length) {
-                checkboxes[i].checked = false;
-            }
-        }
-
-        $('.remove-selection').on('click', remove_selection_click);
-        $('#clear_selections_button').on('click', clear_selections);
-        $(document).trigger('selections-updated', [lst]);
-    };
-
     $(document).
         on('change', '.persistent-selection:checkbox', function(e) {
-            var inc = 0;
-            if ($(this).is(":checked")) {
-                add_selection($(this).val(), $(this).attr('friendly_name'), $(this).attr('href'), $(this).attr('friendly_type'));
-            }
-            else {
-                remove_selection($(this).val());
-            }
+            $(document).trigger('selections-updated');
         });
-
-    $(window).on('load', clear_selections);
-    $(window).on('storage', update_count);
-
-    $('#selection-form-content').on("click", function(e) {
-        e.stopPropagation();
-    });
 });
 
-add_form_selection_sources = null;
-select_form_sources = null;
-
-(function() {
-    var form_selection_sources = {};
-    add_form_selection_sources = function (src) {
-        for (var i = 0; i < src.length; i++) {
-            var t = form_selection_sources[src[i].type];
-            if (!t) {
-                t = form_selection_sources[src[i].type] = {};
-            }
-            if (!t[src[i].uuid]) {
-                t[src[i].uuid] = src[i];
-            }
-        }
-    };
-
-    select_form_sources = function(type) {
-        var ret = [];
-
-        if (get_selection_list) {
-            var lst = get_selection_list();
-            if (lst.length > 0) {
-                var text = "&horbar; Selections &horbar;";
-                var span = document.createElement('span');
-                span.innerHTML = text;
-                ret.push({text: span.innerHTML, value: "***invalid***"});
-
-                for (var i = 0; i < lst.length; i++) {
-                    if (lst[i].type == type) {
-                        var n = lst[i].name;
-                        n = n.replace(/<span[^>]*>/i, "[");
-                        n = n.replace(/<\/span>/i, "]");
-                        ret.push({text: n, value: lst[i].uuid})
-                    }
-                }
-            }
-        }
-
-        var text = "&horbar; Recent &horbar;";
-        var span = document.createElement('span');
-        span.innerHTML = text;
-        ret.push({text: span.innerHTML, value: "***invalid***"});
-
-        var t = form_selection_sources[type];
-        for (var key in t) {
-            if (t.hasOwnProperty(key)) {
-                var obj = t[key];
-                ret.push({text: obj.name, value: obj.uuid})
-            }
-        }
-        return ret;
-    };
-})();
-
 function dispatch_selection_action() {
     // Build a new "href" attribute for this link by starting with the
     // "data-href" attribute and appending ?foo[]=bar&foo[]=baz (or
@@ -180,7 +19,7 @@ function dispatch_selection_action() {
     var param_name = $(this).attr('data-selection-param-name');
     var href = $(this).attr('data-href');
     if ($(this).closest('.disabled').length > 0) {
-       return false;
+        return false;
     }
     $(this).
         closest('.selection-action-container').
@@ -198,25 +37,25 @@ function dispatch_selection_action() {
 }
 
 function enable_disable_selection_actions() {
-    var $container = $(this).closest('.selection-action-container');
+    var $container = $(this);
     var $checked = $('.persistent-selection:checkbox:checked', $container);
-    $('[data-selection-action]').
+    $('[data-selection-action]', $container).
         closest('div.btn-group-sm').
         find('ul li').
         toggleClass('disabled', ($checked.length == 0));
-    $('[data-selection-action=compare]').
+    $('[data-selection-action=compare]', $container).
         closest('li').
         toggleClass('disabled',
                     ($checked.filter('[value*=-d1hrv-]').length < 2) ||
                     ($checked.not('[value*=-d1hrv-]').length > 0));
     <% unless Group.copies_to_projects? %>
-        $('[data-selection-action=copy]').
+        $('[data-selection-action=copy]', $container).
             closest('li').
             toggleClass('disabled',
                         ($checked.filter('[value*=-j7d0g-]').length > 0) ||
                         ($checked.length < 1));
     <% end %>
-    $('[data-selection-action=combine-project-contents]').
+    $('[data-selection-action=combine-project-contents]', $container).
         closest('li').
         toggleClass('disabled',
                     ($checked.filter('[value*=-4zz18-]').length < 1) ||
@@ -224,8 +63,12 @@ function enable_disable_selection_actions() {
 }
 
 $(document).
-    on('selections-updated ready ajax:complete', function() {
-        var $btn = $('[data-selection-action]');
-        $btn.click(dispatch_selection_action);
-        enable_disable_selection_actions.call($btn);
+    on('selections-updated', function() {
+        $('.selection-action-container').each(enable_disable_selection_actions);
+    }).
+    on('ready ajax:complete', function() {
+        $('[data-selection-action]').
+            off('click', dispatch_selection_action).
+            on('click', dispatch_selection_action);
+        $(this).trigger('selections-updated');
     });
index 07e46fe65fc845328eb21c0c0bc7dd6042ba5d21..6565ea9f4c17ea46285420a4d91105f167e9040d 100644 (file)
@@ -124,7 +124,7 @@ $(document).on('arv:pane:reload', '[data-pane-content-url]', function(e) {
             $pane.removeClass('pane-loading');
             $pane.addClass('pane-loaded');
             $pane.attr('data-loaded-at', (new Date()).getTime());
-            $pane.trigger('arv:pane:loaded');
+            $pane.trigger('arv:pane:loaded', [$pane]);
 
             if ($pane.hasClass('pane-stale')) {
                 $pane.trigger('arv:pane:reload');
diff --git a/apps/workbench/app/assets/javascripts/upload_to_collection.js b/apps/workbench/app/assets/javascripts/upload_to_collection.js
new file mode 100644 (file)
index 0000000..d4333fa
--- /dev/null
@@ -0,0 +1,476 @@
+var app = angular.module('Workbench', ['Arvados']);
+app.controller('UploadToCollection', UploadToCollection);
+app.directive('arvUuid', arvUuid);
+
+function arvUuid() {
+    // Copy the given uuid into the current $scope.
+    return {
+        restrict: 'A',
+        link: function(scope, element, attributes) {
+            scope.uuid = attributes.arvUuid;
+        }
+    };
+}
+
+UploadToCollection.$inject = ['$scope', '$filter', '$q', '$timeout',
+                              'ArvadosClient', 'arvadosApiToken'];
+function UploadToCollection($scope, $filter, $q, $timeout,
+                            ArvadosClient, arvadosApiToken) {
+    $.extend($scope, {
+        uploadQueue: [],
+        uploader: new QueueUploader(),
+        addFilesToQueue: function(files) {
+            // Angular binding doesn't work its usual magic for file
+            // inputs, so we need to $scope.$apply() this update.
+            $scope.$apply(function(){
+                var i, nItemsTodo;
+                // Add these new files after the items already waiting
+                // in the queue -- but before the items that are
+                // 'Done' and have therefore been pushed to the
+                // bottom.
+                for (nItemsTodo = 0;
+                     (nItemsTodo < $scope.uploadQueue.length &&
+                      $scope.uploadQueue[nItemsTodo].state !== 'Done'); ) {
+                    nItemsTodo++;
+                }
+                for (i=0; i<files.length; i++) {
+                    $scope.uploadQueue.splice(nItemsTodo+i, 0,
+                        new FileUploader(files[i]));
+                }
+            });
+        },
+        go: function() {
+            $scope.uploader.go();
+        },
+        stop: function() {
+            $scope.uploader.stop();
+        },
+        removeFileFromQueue: function(index) {
+            var wasRunning = $scope.uploader.running;
+            $scope.uploadQueue[index].stop();
+            $scope.uploadQueue.splice(index, 1);
+            if (wasRunning)
+                $scope.go();
+        },
+        countInStates: function(want_states) {
+            var found = 0;
+            $.each($scope.uploadQueue, function() {
+                if (want_states.indexOf(this.state) >= 0) {
+                    ++found;
+                }
+            });
+            return found;
+        }
+    });
+    ////////////////////////////////
+
+    var keepProxy;
+
+    function SliceReader(_slice) {
+        var that = this;
+        $.extend(this, {
+            go: go
+        });
+        ////////////////////////////////
+        var _deferred;
+        var _reader;
+        function go() {
+            // Return a promise, which will be resolved with the
+            // requested slice data.
+            _deferred = $.Deferred();
+            _reader = new FileReader();
+            _reader.onload = resolve;
+            _reader.onerror = _deferred.reject;
+            _reader.onprogress = _deferred.notify;
+            _reader.readAsArrayBuffer(_slice.blob);
+            return _deferred.promise();
+        }
+        function resolve() {
+            if (that._reader.result.length !== that._slice.size) {
+                // Sometimes we get an onload event even if the read
+                // did not return the desired number of bytes. We
+                // treat that as a fail.
+                _deferred.reject(
+                    null, "Read error",
+                    "Short read: wanted " + _slice.size +
+                        ", received " + _reader.result.length);
+                return;
+            }
+            return _deferred.resolve(_reader.result);
+        }
+    }
+
+    function SliceUploader(_label, _data, _dataSize) {
+        $.extend(this, {
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;
+        var _failCount = 0;
+        var _failMax = 3;
+        var _jqxhr;
+        function go() {
+            // Send data to the Keep proxy. Retry a few times on
+            // fail. Return a promise that will get resolved with
+            // resolve(locator) when the block is accepted by the
+            // proxy.
+            _deferred = $.Deferred();
+            goSend();
+            return _deferred.promise();
+        }
+        function stop() {
+            _failMax = 0;
+            _jqxhr.abort();
+            _deferred.reject({
+                textStatus: 'stopped',
+                err: 'interrupted at slice '+_label
+            });
+        }
+        function goSend() {
+            _jqxhr = $.ajax({
+                url: proxyUriBase(),
+                type: 'POST',
+                crossDomain: true,
+                headers: {
+                    'Authorization': 'OAuth2 '+arvadosApiToken,
+                    'Content-Type': 'application/octet-stream',
+                    'X-Keep-Desired-Replicas': '2'
+                },
+                xhr: function() {
+                    // Make an xhr that reports upload progress
+                    var xhr = $.ajaxSettings.xhr();
+                    if (xhr.upload) {
+                        xhr.upload.onprogress = onSendProgress;
+                    }
+                    return xhr;
+                },
+                processData: false,
+                data: _data
+            });
+            _jqxhr.then(onSendResolve, onSendReject);
+        }
+        function onSendProgress(xhrProgressEvent) {
+            _deferred.notify(xhrProgressEvent.loaded, _dataSize);
+        }
+        function onSendResolve(data, textStatus, jqxhr) {
+            _deferred.resolve(data, _dataSize);
+        }
+        function onSendReject(xhr, textStatus, err) {
+            if (++_failCount < _failMax) {
+                // TODO: nice to tell the user that retry is happening.
+                console.log('slice ' + _label + ': ' +
+                            textStatus + ', retry ' + _failCount);
+                goSend();
+            } else {
+                _deferred.reject(
+                    {xhr: xhr, textStatus: textStatus, err: err});
+            }
+        }
+        function proxyUriBase() {
+            return ((keepProxy.service_ssl_flag ? 'https' : 'http') +
+                    '://' + keepProxy.service_host + ':' +
+                    keepProxy.service_port + '/');
+        }
+    }
+
+    function FileUploader(file) {
+        $.extend(this, {
+            file: file,
+            locators: [],
+            progress: 0.0,
+            state: 'Queued',    // Queued, Uploading, Paused, Uploaded, Done
+            statistics: null,
+            go: go,
+            stop: stop          // User wants to stop.
+        });
+        ////////////////////////////////
+        var that = this;
+        var _currentUploader;
+        var _currentSlice;
+        var _deferred;
+        var _maxBlobSize = Math.pow(2,26);
+        var _bytesDone = 0;
+        var _queueTime = Date.now();
+        var _startTime;
+        var _startByte;
+        var _finishTime;
+        var _readPos = 0;       // number of bytes confirmed uploaded
+        function go() {
+            if (_deferred)
+                _deferred.reject({textStatus: 'restarted'});
+            _deferred = $.Deferred();
+            that.state = 'Uploading';
+            _startTime = Date.now();
+            _startByte = _readPos;
+            setProgress();
+            goSlice();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            if (_deferred) {
+                that.state = 'Paused';
+                _deferred.reject({textStatus: 'stopped', err: 'interrupted'});
+            }
+            if (_currentUploader) {
+                _currentUploader.stop();
+                _currentUploader = null;
+            }
+        }
+        function goSlice() {
+            // Ensure this._deferred gets resolved or rejected --
+            // either right here, or when a new promise arranged right
+            // here is fulfilled.
+            _currentSlice = nextSlice();
+            if (!_currentSlice) {
+                // All slices have been uploaded, but the work won't
+                // be truly Done until the target collection has been
+                // updated by the QueueUploader. This state is called:
+                that.state = 'Uploaded';
+                setProgress(_readPos);
+                _currentUploader = null;
+                _deferred.resolve([that]);
+                return;
+            }
+            _currentUploader = new SliceUploader(
+                _readPos.toString(),
+                _currentSlice.blob,
+                _currentSlice.size);
+            _currentUploader.go().then(
+                onUploaderResolve,
+                onUploaderReject,
+                onUploaderProgress);
+        }
+        function onUploaderResolve(locator, dataSize) {
+            var sizeHint = (''+locator).split('+')[1];
+            if (!locator || parseInt(sizeHint) !== dataSize) {
+                console.log("onUploaderResolve, but locator '" + locator +
+                            "' with size hint '" + sizeHint +
+                            "' does not look right for dataSize=" + dataSize);
+                return onUploaderReject({
+                    textStatus: "error",
+                    err: "Bad response from slice upload"
+                });
+            }
+            that.locators.push(locator);
+            _readPos += dataSize;
+            _currentUploader = null;
+            goSlice();
+        }
+        function onUploaderReject(reason) {
+            that.state = 'Paused';
+            setProgress(_readPos);
+            _currentUploader = null;
+            if (_deferred)
+                _deferred.reject(reason);
+        }
+        function onUploaderProgress(sliceDone, sliceSize) {
+            setProgress(_readPos + sliceDone);
+        }
+        function nextSlice() {
+            var size = Math.min(
+                _maxBlobSize,
+                that.file.size - _readPos);
+            setProgress(_readPos);
+            if (size === 0) {
+                return false;
+            }
+            var blob = that.file.slice(
+                _readPos, _readPos+size,
+                'application/octet-stream; charset=x-user-defined');
+            return {blob: blob, size: size};
+        }
+        function setProgress(bytesDone) {
+            var kBps;
+            if (that.file.size == 0)
+                that.progress = 100;
+            else
+                that.progress = Math.min(100, 100 * bytesDone / that.file.size);
+            if (bytesDone > _startByte) {
+                kBps = (bytesDone - _startByte) /
+                    (Date.now() - _startTime);
+                that.statistics = (
+                    '' + $filter('number')(bytesDone/1024, '0') + ' KiB ' +
+                        'at ~' + $filter('number')(kBps, '0') + ' KiB/s')
+                if (that.state === 'Paused') {
+                    that.statistics += ', paused';
+                } else if (that.state === 'Uploading') {
+                    that.statistics += ', ETA ' +
+                        $filter('date')(
+                            new Date(
+                                Date.now() + (that.file.size - bytesDone) / kBps),
+                            'shortTime')
+                }
+            } else {
+                that.statistics = that.state;
+            }
+            if (that.state === 'Uploaded') {
+                // 'Uploaded' gets reported as 'finished', which is a
+                // little misleading because the collection hasn't
+                // been updated yet. But FileUploader's portion of the
+                // work (and the time when it makes sense to show
+                // speed and ETA) is finished.
+                that.statistics += ', finished ' +
+                    $filter('date')(Date.now(), 'shortTime');
+                _finishTime = Date.now();
+            }
+            if (_deferred)
+                _deferred.notify();
+        }
+    }
+
+    function QueueUploader() {
+        $.extend(this, {
+            state: 'Idle',      // Idle, Running, Stopped, Failed
+            stateReason: null,
+            statusSuccess: null,
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;          // the one we promise to go()'s caller
+        var _deferredAppend;    // tracks current appendToCollection
+        function go() {
+            if (_deferred) return _deferred.promise();
+            if (_deferredAppend) return _deferredAppend.promise();
+            _deferred = $.Deferred();
+            that.state = 'Running';
+            ArvadosClient.apiPromise(
+                'keep_services', 'list',
+                {filters: [['service_type','=','proxy']]}).
+                then(doQueueWithProxy);
+            onQueueProgress();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            that.state = 'Stopped';
+            if (_deferred) {
+                _deferred.reject({});
+            }
+            for (var i=0; i<$scope.uploadQueue.length; i++)
+                $scope.uploadQueue[i].stop();
+            onQueueProgress();
+        }
+        function doQueueWithProxy(data) {
+            keepProxy = data.items[0];
+            if (!keepProxy) {
+                that.state = 'Failed';
+                that.stateReason =
+                    'There seems to be no Keep proxy service available.';
+                _deferred.reject(null, 'error', that.stateReason);
+                return;
+            }
+            return doQueueWork();
+        }
+        function doQueueWork() {
+            // If anything is not Done, do it.
+            if ($scope.uploadQueue.length > 0 &&
+                $scope.uploadQueue[0].state !== 'Done') {
+                if (_deferred) {
+                    that.stateReason = null;
+                    return $scope.uploadQueue[0].go().
+                        then(appendToCollection, null, onQueueProgress).
+                        then(doQueueWork, onQueueReject);
+                } else {
+                    // Queue work has been stopped. Just update the
+                    // view.
+                    onQueueProgress();
+                    return;
+                }
+            }
+            // If everything is Done, resolve the promise and clean
+            // up. Note this can happen even after the _deferred
+            // promise has been rejected: specifically, when stop() is
+            // called too late to prevent completion of the last
+            // upload. In that case we want to update state to "Idle",
+            // rather than leave it at "Stopped".
+            onQueueResolve();
+        }
+        function onQueueReject(reason) {
+            if (!_deferred) {
+                // Outcome has already been decided (by stop()).
+                return;
+            }
+
+            that.state = 'Failed';
+            that.stateReason = (
+                (reason.textStatus || 'Error') +
+                    (reason.xhr && reason.xhr.options
+                     ? (' (from ' + reason.xhr.options.url + ')')
+                     : '') +
+                    ': ' +
+                    (reason.err || ''));
+            if (reason.xhr && reason.xhr.responseText)
+                that.stateReason += ' -- ' + reason.xhr.responseText;
+            _deferred.reject(reason);
+            onQueueProgress();
+        }
+        function onQueueResolve() {
+            that.state = 'Idle';
+            that.stateReason = 'Done!';
+            if (_deferred)
+                _deferred.resolve();
+            onQueueProgress();
+        }
+        function onQueueProgress() {
+            // Ensure updates happen after FileUpload promise callbacks.
+            $timeout(function(){$scope.$apply();});
+        }
+        function appendToCollection(uploads) {
+            _deferredAppend = $.Deferred();
+            ArvadosClient.apiPromise(
+                'collections', 'get',
+                { uuid: $scope.uuid }).
+                then(function(collection) {
+                    var manifestText = '';
+                    $.each(uploads, function(_, upload) {
+                        var locators = upload.locators;
+                        if (locators.length === 0) {
+                            // Every stream must have at least one
+                            // data locator, even if it is zero bytes
+                            // long:
+                            locators = ['d41d8cd98f00b204e9800998ecf8427e+0'];
+                        }
+                        filename = ArvadosClient.uniqueNameForManifest(
+                            collection.manifest_text,
+                            '.', upload.file.name);
+                        collection.manifest_text += '. ' +
+                            locators.join(' ') +
+                            ' 0:' + upload.file.size.toString() + ':' +
+                            filename +
+                            '\n';
+                    });
+                    return ArvadosClient.apiPromise(
+                        'collections', 'update',
+                        { uuid: $scope.uuid,
+                          collection:
+                          { manifest_text:
+                            collection.manifest_text }
+                        });
+                }).
+                then(function() {
+                    // Mark the completed upload(s) as Done and push
+                    // them to the bottom of the queue.
+                    var i, qLen = $scope.uploadQueue.length;
+                    for (i=0; i<qLen; i++) {
+                        if (uploads.indexOf($scope.uploadQueue[i]) >= 0) {
+                            $scope.uploadQueue[i].state = 'Done';
+                            $scope.uploadQueue.push.apply(
+                                $scope.uploadQueue,
+                                $scope.uploadQueue.splice(i, 1));
+                            --i;
+                            --qLen;
+                        }
+                    }
+                }).
+                then(_deferredAppend.resolve,
+                     _deferredAppend.reject);
+            return _deferredAppend.promise().
+                always(function() {
+                    _deferredAppend = null;
+                });
+        }
+    }
+}
index 7dbeac9d4ee6b59773ad842d60c572090196f898..9bc93e32bd49574ff998e60980c6417cb98d4770 100644 (file)
@@ -11,6 +11,7 @@
  *= require_self
  *= require bootstrap
  *= require bootstrap3-editable/bootstrap-editable
+ *= require morris
  *= require_tree .
  */
 
@@ -47,6 +48,9 @@ table.table-justforlayout {
     font-size: .8em;
     color: #888;
 }
+.lighten {
+    color: #888;
+}
 .arvados-filename,
 .arvados-uuid {
     font-size: .8em;
@@ -200,11 +204,8 @@ table.table-fixed-header-row tbody {
     top:1.5em;
 }
 
-/* Setting the height needs to be fixed with javascript. */
 .dropdown-menu {
-    padding-right: 20px;
-    max-height: 440px;
-    width: 400px;
+    max-height: 30em;
     overflow-y: auto;
 }
 
@@ -275,3 +276,7 @@ span.editable-textile {
 .arv-log-refresh-control {
   display: none;
 }
+
+[ng\:cloak], [ng-cloak], .ng-cloak {
+    display: none !important;
+}
index f76c70bdc9a203d009618d42d742106bf84991e2..cf93f20350a6e3b14b1899c3907fb07750b40fb7 100644 (file)
@@ -8,3 +8,8 @@
     font-size: .8em;
     border: 2px solid black;
 }
+
+.morris-hover-point {
+    text-align: left;
+    width: 100%;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/selection.css b/apps/workbench/app/assets/stylesheets/selection.css
deleted file mode 100644 (file)
index 5e0da41..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#persistent-selection-list {
-    width: 500px;
-}
-
-#selection-form-content > li > a, #selection-form-content > li > button {
-    margin: 3px 20px;
-}
-
-#selection-form-content li table tr {
-    padding: 3px 20px;
-    line-height: 1.42857;
-    border-top: 1px solid rgb(221, 221, 221);
-}
-
-#selection-form-content li table tr:last-child {
-    border-bottom: 1px solid rgb(221, 221, 221);
-}
index 3270cfb376a1f30be9c214e42fe283a5a81cdc9b..c7575176dec97fa4762bb3d730fd3ec0ef95b7ca 100644 (file)
@@ -256,7 +256,9 @@ class ApplicationController < ActionController::Base
         elsif request.method.in? ['GET', 'HEAD']
           render
         else
-          redirect_to params[:return_to] || @object
+          redirect_to (params[:return_to] ||
+                       polymorphic_url(@object,
+                                       anchor: params[:redirect_to_anchor]))
         end
       }
       f.js { render }
@@ -320,16 +322,11 @@ class ApplicationController < ActionController::Base
     @new_resource_attrs ||= {}
     @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
     @object ||= model_class.new @new_resource_attrs, params["options"]
+
     if @object.save
-      respond_to do |f|
-        f.json { render json: @object.attributes.merge(href: url_for(action: :show, id: @object)) }
-        f.html {
-          redirect_to @object
-        }
-        f.js { render }
-      end
+      show
     else
-      self.render_error status: 422
+      render_error status: 422
     end
   end
 
index 39f637e2274d5ac12037363c8051fd17b63d5d9e..f4aa0395f3d273a2948f3415be6ad4bfaf539673 100644 (file)
@@ -14,7 +14,9 @@ class CollectionsController < ApplicationController
   RELATION_LIMIT = 5
 
   def show_pane_list
-    %w(Files Provenance_graph Used_by Advanced)
+    panes = %w(Files Upload Provenance_graph Used_by Advanced)
+    panes = panes - %w(Upload) unless (@object.editable? rescue false)
+    panes
   end
 
   def set_persistent
@@ -233,10 +235,7 @@ class CollectionsController < ApplicationController
 
   def sharing_popup
     @search_sharing = search_scopes
-    respond_to do |format|
-      format.html
-      format.js
-    end
+    render("sharing_popup.js", content_type: "text/javascript")
   end
 
   helper_method :download_link
@@ -246,18 +245,15 @@ class CollectionsController < ApplicationController
   end
 
   def share
-    a = ApiClientAuthorization.create(scopes: sharing_scopes)
-    @search_sharing = search_scopes
-    render 'sharing_popup'
+    ApiClientAuthorization.create(scopes: sharing_scopes)
+    sharing_popup
   end
 
   def unshare
-    @search_sharing = search_scopes
-    @search_sharing.each do |s|
+    search_scopes.each do |s|
       s.destroy
     end
-    @search_sharing = search_scopes
-    render 'sharing_popup'
+    sharing_popup
   end
 
   protected
index 536518277f1ca86da6256af0887a5be9ee7bc560..08fb94d2f085d4d7eb777f81cea43d84f9f2dbcf 100644 (file)
@@ -1,4 +1,5 @@
 class JobsController < ApplicationController
+  include JobsHelper
 
   def generate_provenance(jobs)
     return if params['tab_pane'] != "Provenance"
@@ -56,6 +57,20 @@ class JobsController < ApplicationController
     super
   end
 
+  def logs
+    @logs = Log.select(%w(event_type object_uuid event_at properties))
+               .order('event_at DESC')
+               .filter([["event_type",  "=", "stderr"],
+                        ["object_uuid", "in", [@object.uuid]]])
+               .limit(500)
+               .results
+               .to_a
+               .map{ |e| e.serializable_hash.merge({ 'prepend' => true }) }
+    respond_to do |format|
+      format.json { render json: @logs }
+    end
+  end
+
   def index_pane_list
     if params[:uuid]
       %w(Recent Provenance)
index 3326527cc8912e9ea45dc72d1a5277d548e91550..25f5ee421c58dc860806b9a0f6b726e2e8816406 100644 (file)
@@ -86,7 +86,7 @@ class PipelineInstancesController < ApplicationController
                 obj = Collection.find value_info_value
                 if value_info_partitioned
                   value_info[:value] = obj.portable_data_hash + value_info_partitioned[1] + value_info_partitioned[2]
-                  value_info[:selection_name] = obj.name + value_info_partitioned[1] + value_info_partitioned[2]
+                  value_info[:selection_name] = obj.name ? obj.name + value_info_partitioned[1] + value_info_partitioned[2] : obj.name
                 else
                   value_info[:value] = obj.portable_data_hash
                   value_info[:selection_name] = obj.name
@@ -174,12 +174,9 @@ class PipelineInstancesController < ApplicationController
   end
 
   def show
-    @pipelines = [@object]
-
-    if params[:compare]
-      PipelineInstance.where(uuid: params[:compare]).each do |p|
-        @pipelines << p
-      end
+    # the #show action can also be called by #compare, which does its own work to set up @pipelines
+    unless defined? @pipelines
+      @pipelines = [@object]
     end
 
     provenance, pips = graph(@pipelines)
@@ -259,18 +256,7 @@ class PipelineInstancesController < ApplicationController
     end
 
     if params['tab_pane'] == "Graph"
-      provenance, pips = graph(@objects)
-
       @pipelines = @objects
-
-      if provenance
-        @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
-          :request => request,
-          :all_script_parameters => true,
-          :combine_jobs => :script_and_version,
-          :script_version_nodes => true,
-          :pips => pips }
-      end
     end
 
     @object = @objects.first
index 6f209a5a9e0d11fc0a1a0fbaab0b6fe8b47834cc..9e2ff1b00b7e38c31129a3694833023aaebbc825 100644 (file)
@@ -21,6 +21,8 @@ class SearchController < ApplicationController
   end
 
   def next_page_href with_params={}
-    super with_params.merge(last_object_class: @objects.last.class.to_s)
+    super with_params.merge(last_object_class: @objects.last.class.to_s,
+                            project_uuid: params[:project_uuid],
+                            filters: @filters.to_json)
   end
 end
index c3512e2e4d1028635cda1a51b1c09f2a2b63ade5..a62ba81b0941cf46870ddc7f27bf3b97692fd172 100644 (file)
@@ -1,7 +1,4 @@
 class VirtualMachinesController < ApplicationController
-  def index_pane_list
-    %w(recent help)
-  end
   def index
     @objects ||= model_class.all
     @vm_logins = {}
diff --git a/apps/workbench/app/helpers/api_client_authorizations_helper.rb b/apps/workbench/app/helpers/api_client_authorizations_helper.rb
deleted file mode 100644 (file)
index 98ddddc..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module ApiClientAuthorizationsHelper
-end
index c8c4ccc1a96a89facdbc580ab7f9c40eb7f80505..f1502afee10cf02edece673f9d05c26275ee3fcc 100644 (file)
@@ -307,64 +307,17 @@ module ApplicationHelper
       end
     end
 
-    if dataclass.andand.is_a?(Class)
-      datatype = 'select'
-    elsif dataclass == 'number'
-      datatype = 'number'
-    elsif attrvalue.is_a? Array
-      # TODO: find a way to edit arrays with x-editable
-      return attrvalue
-    elsif attrvalue.is_a? Fixnum or attrvalue.is_a? Float
+    if dataclass == 'number' or attrvalue.is_a? Fixnum or attrvalue.is_a? Float
       datatype = 'number'
     elsif attrvalue.is_a? String
       datatype = 'text'
+    elsif attrvalue.is_a?(Array) or dataclass.andand.is_a?(Class)
+      # TODO: find a way to edit with x-editable
+      return attrvalue
     end
 
-    # preload data
-    preload_uuids = []
-    items = []
-    selectables = []
-
-    attrtext = attrvalue
-    if dataclass.is_a? Class and dataclass < ArvadosBase
-      objects = get_n_objects_of_class dataclass, 10
-      objects.each do |item|
-        items << item
-        preload_uuids << item.uuid
-      end
-      if attrvalue and !attrvalue.empty?
-        preload_uuids << attrvalue
-      end
-      preload_links_for_objects preload_uuids
-
-      if attrvalue and !attrvalue.empty?
-        links_for_object(attrvalue).each do |link|
-          if link.link_class.in? ["tag", "identifier"]
-            attrtext += " [#{link.name}]"
-          end
-        end
-        selectables.append({name: attrtext, uuid: attrvalue, type: dataclass.to_s})
-      end
-      itemuuids = []
-      items.each do |item|
-        itemuuids << item.uuid
-        selectables.append({name: item.uuid, uuid: item.uuid, type: dataclass.to_s})
-      end
-
-      itemuuids.each do |itemuuid|
-        links_for_object(itemuuid).each do |link|
-          if link.link_class.in? ["tag", "identifier"]
-            selectables.each do |selectable|
-              if selectable['uuid'] == link.head_uuid
-                selectable['name'] += ' [' + link.name + ']'
-              end
-            end
-          end
-        end
-      end
-    end
-
-    lt = link_to attrtext, '#', {
+    # When datatype is a String or Fixnum, link_to the attrvalue
+    lt = link_to attrvalue, '#', {
       "data-emptytext" => "none",
       "data-placement" => "bottom",
       "data-type" => datatype,
@@ -379,16 +332,6 @@ module ApplicationHelper
       :id => id
     }.merge(htmloptions)
 
-    lt += raw("\n<script>")
-
-    if selectables.any?
-      lt += raw("add_form_selection_sources(#{selectables.to_json});\n")
-    end
-
-    lt += raw("$('[data-name=\"#{dn}\"]').editable({source: function() { return select_form_sources('#{dataclass}'); } });\n")
-
-    lt += raw("</script>")
-
     lt
   end
 
diff --git a/apps/workbench/app/helpers/authorized_keys_helper.rb b/apps/workbench/app/helpers/authorized_keys_helper.rb
deleted file mode 100644 (file)
index 9a486f2..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module AuthorizedKeysHelper
-end
diff --git a/apps/workbench/app/helpers/groups_helper.rb b/apps/workbench/app/helpers/groups_helper.rb
deleted file mode 100644 (file)
index c091b2f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module GroupsHelper
-end
diff --git a/apps/workbench/app/helpers/humans_helper.rb b/apps/workbench/app/helpers/humans_helper.rb
deleted file mode 100644 (file)
index ca84af0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module HumansHelper
-end
diff --git a/apps/workbench/app/helpers/job_tasks_helper.rb b/apps/workbench/app/helpers/job_tasks_helper.rb
deleted file mode 100644 (file)
index b08a1ae..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module JobTasksHelper
-end
diff --git a/apps/workbench/app/helpers/keep_disks_helper.rb b/apps/workbench/app/helpers/keep_disks_helper.rb
deleted file mode 100644 (file)
index 9cf6b4a..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module KeepDisksHelper
-end
diff --git a/apps/workbench/app/helpers/links_helper.rb b/apps/workbench/app/helpers/links_helper.rb
deleted file mode 100644 (file)
index f6bc988..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module LinksHelper
-end
diff --git a/apps/workbench/app/helpers/logs_helper.rb b/apps/workbench/app/helpers/logs_helper.rb
deleted file mode 100644 (file)
index 99736f0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module LogsHelper
-end
diff --git a/apps/workbench/app/helpers/nodes_helper.rb b/apps/workbench/app/helpers/nodes_helper.rb
deleted file mode 100644 (file)
index 673b561..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module NodesHelper
-end
diff --git a/apps/workbench/app/helpers/projects_helper.rb b/apps/workbench/app/helpers/projects_helper.rb
deleted file mode 100644 (file)
index db5c5ce..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module ProjectsHelper
-end
index e8850d5b47e6e41d10330a58b32baac52219b800..a4723a3ec16848d4eb2cd46dab2ea61d783a2bef 100644 (file)
@@ -133,7 +133,10 @@ module ProvenanceHelper
         gr += edge(uuid, job[:output], {label: "output" })
       end
 
-      gr += edge(uuid, job[:log], {label: "log"}) if job[:log] and !edge_opts[:no_log]
+      if job[:log] and !edge_opts[:no_log]
+        gr += describe_node(job[:log])
+        gr += edge(uuid, job[:log], {label: "log"})
+      end
 
       gr
     end
diff --git a/apps/workbench/app/helpers/repositories_helper.rb b/apps/workbench/app/helpers/repositories_helper.rb
deleted file mode 100644 (file)
index 2860b5a..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module RepositoriesHelper
-end
diff --git a/apps/workbench/app/helpers/sessions_helper.rb b/apps/workbench/app/helpers/sessions_helper.rb
deleted file mode 100644 (file)
index 309f8b2..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module SessionsHelper
-end
diff --git a/apps/workbench/app/helpers/specimens_helper.rb b/apps/workbench/app/helpers/specimens_helper.rb
deleted file mode 100644 (file)
index 8c30d97..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module SpecimensHelper
-end
diff --git a/apps/workbench/app/helpers/traits_helper.rb b/apps/workbench/app/helpers/traits_helper.rb
deleted file mode 100644 (file)
index a4260eb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module TraitsHelper
-end
diff --git a/apps/workbench/app/helpers/user_agreements_helper.rb b/apps/workbench/app/helpers/user_agreements_helper.rb
deleted file mode 100644 (file)
index ab8d3d3..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module UserAgreementsHelper
-end
diff --git a/apps/workbench/app/helpers/users_helper.rb b/apps/workbench/app/helpers/users_helper.rb
deleted file mode 100644 (file)
index 2310a24..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module UsersHelper
-end
diff --git a/apps/workbench/app/helpers/vcf_pipeline_helper.rb b/apps/workbench/app/helpers/vcf_pipeline_helper.rb
deleted file mode 100644 (file)
index 9a3d6e7..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-module VcfPipelineHelper
-  require 'csv'
-
-  def reset_vcf_pipeline_instance(pi, input_manifest)
-    params = {
-      'PICARD_ZIP' => '7a4073e29bfa87154b7102e75668c454+83+K@van',
-      'GATK_BUNDLE' => '0a37aaf212464efa2a77ff9ba51c0148+10524+K@van',
-      'GATK_TAR_BZ2' => '482ebab0408e173370c499f0b7c00878+93+K@van',
-      'BWA' => '73be5598809c66f260fedd253c8608bd+67+K@van',
-      'SAM' => '55d2115faa608eb95dab4f875b7511b1+72+K@van',
-      'REGION_PADDING' => '10',
-      'REGIONS' => 'e52c086f41c2f089d88ec2bbd45355d3+87+K@van/SeqCap_EZ_Exome_v2.hg19.bed',
-      'STAND_CALL_CONF' => '4.0',
-      'STAND_EMIT_CONF' => '4.0',
-      "bwa/INPUT" => input_manifest
-    }
-    pi.components = PipelineTemplate.find(pi.pipeline_uuid).components
-    pi.update_job_parameters(params)
-    pi.active = true
-    pi.success = nil
-  end
-
-  def vcf_pipeline_summary(pi)
-    stats = {}
-    collection_link = Link.
-      where(head_uuid: pi.uuid,
-            link_class: 'client-defined',
-            name: 'vcffarm-pipeline-invocation').
-      last
-    if collection_link
-      stats[:collection_uuid] = collection_link.tail_uuid
-    else
-      pi.components[:steps].each do |step|
-        if step[:name] == 'bwa'
-          step[:params].each do |param|
-            if param[:name] == 'INPUT'
-              stats[:collection_uuid] = param[:data_locator] || param[:value]
-              break
-            end
-          end
-        end
-      end
-    end
-    if stats[:collection_uuid]
-      Link.where(tail_uuid: stats[:collection_uuid],
-                 head_kind: Group)[0..0].each do |c2p|
-        stats[:project_uuid] = c2p.head_uuid
-        group = Group.find stats[:project_uuid]
-        stats[:project_name] = group.name rescue nil
-      end
-      Link.where(tail_uuid: stats[:collection_uuid],
-                 head_kind: Specimen)[0..0].each do |c2s|
-        stats[:specimen_uuid] = c2s.head_uuid
-        specimen = Specimen.find stats[:specimen_uuid]
-        stats[:specimen_id] = specimen.properties[:specimen_id] rescue nil
-      end
-    end
-    stats[:runtime] = {}
-    stats[:alignment_for_step] = {}
-    stats[:alignment] = {}
-    stats[:coverage] = []
-    pi.components[:steps].each do |step|
-      if step[:warehousejob]
-        if step[:name] == 'bwa' and step[:warehousejob][:starttime]
-          stats[:runtime][:started_at] = step[:warehousejob][:starttime]
-        end
-        if step[:warehousejob][:finishtime]
-          stats[:runtime][:finished_at] =
-            [ step[:warehousejob][:finishtime],
-              stats[:runtime][:finished_at] ].compact.max
-        end
-      end
-      if step[:name] == 'picard-casm' and
-          step[:complete] and
-          step[:output_data_locator]
-        tsv = IO.
-          popen("whget -r #{step[:output_data_locator]}/ -").
-          readlines.
-          collect { |x| x.strip.split "\t" }
-        casm = {}
-        head = []
-        tsv.each do |data|
-          if data.size < 4 or data[0].match /^\#/
-            next
-          elsif data[0] == 'CATEGORY' or data[1].match /[^\d\.]/
-            head = data
-          elsif data[0] == 'PAIR'
-            head.each_with_index do |name, index|
-              x = data[index]
-              if x and x.match /^\d+$/
-                x = x.to_i
-              elsif x and x.match /^\d+\.\d+$/
-                x = x.to_f
-              end
-              name = name.downcase.to_sym
-              casm[name] ||= []
-              casm[name] << x
-            end
-          end
-        end
-        stats[:picard_alignment_summary] = casm
-      end
-      if step[:name] == 'gatk-stats' and
-          step[:complete] and
-          step[:output_data_locator]
-        csv = IO.
-          popen("whget #{step[:output_data_locator]}/mincoverage_nlocus.csv").
-          readlines.
-          collect { |x| x.strip.split ',' }
-        csv.each do |depth, nlocus, percent|
-          stats[:coverage][depth.to_i] = nlocus.to_i
-        end
-      end
-      if step[:name] == 'gatk-realign' and
-          step[:complete] and
-          step[:output_data_locator]
-        logs = IO.
-          popen("whget #{step[:warehousejob][:metakey]}").
-          readlines.
-          collect(&:strip)
-        logs.each do |logline|
-          if (re = logline.match /\s(\d+) stderr INFO .* (\d+) reads were filtered out.*of (\d+) total/)
-            stats[:alignment_for_step][re[1]] ||= {}
-            stats[:alignment_for_step][re[1]][:filtered_reads] = re[2].to_i
-            stats[:alignment_for_step][re[1]][:total_reads] = re[3].to_i
-          elsif (re = logline.match /(\d+) reads.* failing BadMate/)
-            stats[:alignment][:bad_mate_reads] = re[1].to_i
-          elsif (re = logline.match /(\d+) reads.* failing MappingQualityZero/)
-            stats[:alignment][:mapq0_reads] = re[1].to_i
-          end
-        end
-      end
-      if step[:name] == 'gatk-merge-call' and
-          step[:complete] and
-          step[:output_data_locator]
-        stats[:vcf_file_name] = "#{stats[:project_name]}-#{stats[:specimen_id]}-#{step[:output_data_locator][0..31]}.vcf"
-        logs = IO.
-          popen("whget #{step[:warehousejob][:metakey]}").
-          readlines.
-          collect(&:strip)
-        logs.each do |logline|
-          if (re = logline.match /(\d+) reads were filtered out.*of (\d+) total/)
-            stats[:alignment][:filtered_reads] = re[1].to_i
-            stats[:alignment][:total_realigned_reads] = re[2].to_i
-          elsif (re = logline.match /(\d+) reads.* failing BadMate/)
-            stats[:alignment][:bad_mate_reads] = re[1].to_i
-          elsif (re = logline.match /(\d+) reads.* failing UnmappedRead/)
-            stats[:alignment][:unmapped_reads] = re[1].to_i
-          end
-        end
-
-        stats[:chromosome_calls] = {}
-        tsv = IO.
-          popen("whget #{step[:output_data_locator]}/merged.vcf | egrep -v '^#' | cut -f1 | uniq -c").
-          readlines.
-          collect { |x| x.strip.split }
-        tsv.each do |n_variants, sequence_name|
-          stats[:chromosome_calls][sequence_name] = n_variants.to_i
-        end
-
-        stats[:inferred_sex] = false
-        calls = stats[:chromosome_calls]
-        if calls['X'] and calls['X'] > 200
-          if !calls['Y']
-            stats[:inferred_sex] = 'female'
-          elsif calls['Y'] * 60 < calls['X']
-            # if Y < X/60 they are presumed to be misalignments
-            stats[:inferred_sex] = 'female'
-          elsif calls['Y'] * 25 > calls['X']
-            # if Y > X/25 we presume a Y chromosome was present
-            stats[:inferred_sex] = 'male'
-          end
-        end
-      end
-    end
-    stats[:alignment][:total_reads] = 0
-    stats[:alignment][:filtered_reads] ||= 0
-    stats[:alignment][:bad_mate_reads] ||= 0
-    stats[:alignment][:mapq0_reads] ||= 0
-    stats[:alignment_for_step].values.each do |a4s|
-      stats[:alignment][:total_reads] += (a4s[:total_reads] || 0)
-      stats[:alignment][:filtered_reads] += (a4s[:filtered_reads] || 0)
-      stats[:alignment][:bad_mate_reads] += (a4s[:bad_mate_reads] || 0)
-      stats[:alignment][:mapq0_reads] += (a4s[:mapq0_reads] || 0)
-    end
-
-    if stats[:collection_uuid]
-      csv = CSV.parse IO.
-        popen("whget #{stats[:collection_uuid]}/SampleSheet.csv -").
-        read
-      if !csv.empty?
-        pivoted = []
-        csv[0].each_with_index do |head, col|
-          pivoted << csv.collect { |row| row[col] }
-        end
-        stats[:source_data_csv_columns] = pivoted
-      end
-    end
-
-    picardas = stats[:picard_alignment_summary]
-    stats[:summary_csv_columns] =
-      [['PROJECT', stats[:project_name]],
-       ['SPECIMEN', stats[:specimen_id]],
-       ['VCF_FILE_NAME', stats[:vcf_file_name]],
-       ['INFERRED_SEX', stats[:inferred_sex]],
-       ['SOURCE_DATA', stats[:collection_uuid]],
-       ['PIPELINE_UUID', pi.pipeline_uuid],
-       ['PIPELINE_RUN_UUID', pi.uuid],
-       ['PIPELINE_RUN_START', (stats[:runtime][:started_at] rescue nil)],
-       ['PIPELINE_RUN_FINISH', (stats[:runtime][:finished_at] rescue nil)],
-       ['N_READS_RAW',
-        (n_raw = picardas[:total_reads].inject(0,:+) rescue nil)],
-       ['N_READS_MAPPED',
-        (n_mapped = picardas[:reads_aligned_in_pairs].inject(0,:+) rescue nil)],
-       ['PERCENT_READS_MAPPED',
-        (100.0 * n_mapped / n_raw rescue nil)],
-       ['N_READS_ON_TARGET',
-        (n_on_target = stats[:alignment][:total_reads] - stats[:alignment][:filtered_reads] rescue nil)],
-       ['PERCENT_READS_ON_TARGET',
-        (100.0 * n_on_target / n_raw rescue nil)],
-       ['PERCENT_TARGET_COVERAGE_1X',
-        (100.0 * stats[:coverage][1] / stats[:coverage][0] rescue nil)],
-       ['PERCENT_TARGET_COVERAGE_10X',
-        (100.0 * stats[:coverage][10] / stats[:coverage][0] rescue nil)],
-       ['PERCENT_TARGET_COVERAGE_20X',
-        (100.0 * stats[:coverage][20] / stats[:coverage][0] rescue nil)],
-       ['PERCENT_TARGET_COVERAGE_50X',
-        (100.0 * stats[:coverage][50] / stats[:coverage][0] rescue nil)],
-       ['PERCENT_TARGET_COVERAGE_100X',
-        (100.0 * stats[:coverage][100] / stats[:coverage][0] rescue nil)]]
-
-    stats
-  end
-end
diff --git a/apps/workbench/app/helpers/version_helper.rb b/apps/workbench/app/helpers/version_helper.rb
new file mode 100644 (file)
index 0000000..6cae78f
--- /dev/null
@@ -0,0 +1,30 @@
+module VersionHelper
+  # api_version returns the git commit hash for the API server's
+  # current version.  It is extracted from api_version_text, which
+  # returns the source_version provided by the discovery document and
+  # may have the word "-modified" appended to it (if the API server is
+  # running from a locally modified repository).
+
+  def api_version
+    api_version_text.sub(/[^[:xdigit:]].*/, '')
+  end
+
+  def api_version_text
+    arvados_api_client.discovery[:source_version]
+  end
+
+  # wb_version and wb_version_text provide the same strings for the
+  # code version that this Workbench is currently running.
+
+  def wb_version
+    Rails.configuration.source_version
+  end
+
+  def wb_version_text
+    wb_version + (Rails.configuration.local_modified or '')
+  end
+
+  def version_link_target version
+    "https://arvados.org/projects/arvados/repository/changes?rev=#{version}"
+  end
+end
diff --git a/apps/workbench/app/helpers/virtual_machines_helper.rb b/apps/workbench/app/helpers/virtual_machines_helper.rb
deleted file mode 100644 (file)
index cbb398d..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-module VirtualMachinesHelper
-end
index b5347dce00f24c64a388baac1dd9011935ab99c9..686b816c08e2343419ea6e44f0f983a05cb0d6ce 100644 (file)
@@ -90,20 +90,8 @@ class Collection < ArvadosBase
     end
   end
 
-  def portable_data_hash
-    if self[:portable_data_hash].nil?
-      return self[:uuid]
-    else
-      super
-    end
-  end
-
   def friendly_link_name lookup=nil
-    if self.respond_to? :name
-      self.name
-    else
-      self.portable_data_hash
-    end
+    name || portable_data_hash
   end
 
   def textile_attributes
index 600c6ab96fcfca4f8a5980746e9a7fca8ffa75e9..4de3c2330ed55407c6a01053906a3ddc37dc91b6 100644 (file)
@@ -1,25 +1,25 @@
-              <li>
-                <%= project_link_to.call({object: current_user, depth: 0}) do %>
-                  <span style="padding-left: 0"></span>Home
-                <% end %>
-              </li>
-              <% my_project_tree.each do |pnode| %>
-                <% next if pnode[:object].class != Group %>
-                <li>
-                  <%= project_link_to.call pnode do %>
-                    <span style="padding-left: <%= pnode[:depth] %>em"></span><%= pnode[:object].name %>
-                  <% end %>
-                </li>
-              <% end %>
-              <li class="divider" />
-              <li role="presentation" class="dropdown-header">
-                Projects shared with me
-              </li>
-              <% shared_project_tree.each do |pnode| %>
-                <% next if pnode[:object].class != Group %>
-                <li>
-                  <%= project_link_to.call pnode do %>
-                    <span style="padding-left: <%= pnode[:depth]-1 %>em"></span><i class="fa fa-fw fa-share-alt" style="color:#aaa"></i> <%= pnode[:object].name %>
-                  <% end %>
-                </li>
-              <% end %>
+<li>
+  <%= project_link_to.call({object: current_user, depth: 0}) do %>
+    <span style="padding-left: 0">Home</span>
+  <% end %>
+</li>
+<% my_project_tree.each do |pnode| %>
+  <% next if pnode[:object].class != Group %>
+  <li>
+    <%= project_link_to.call pnode do %>
+      <span style="padding-left: <%= pnode[:depth] %>em"></span><%= pnode[:object].name %>
+    <% end %>
+  </li>
+<% end %>
+<li class="divider" />
+<li role="presentation" class="dropdown-header">
+  Projects shared with me
+</li>
+<% shared_project_tree.each do |pnode| %>
+  <% next if pnode[:object].class != Group %>
+  <li>
+    <%= project_link_to.call pnode do %>
+      <span style="padding-left: <%= pnode[:depth]-1 %>em"></span><i class="fa fa-fw fa-share-alt" style="color:#aaa"></i> <%= pnode[:object].name %>
+    <% end %>
+  </li>
+<% end %>
index fe47fb08a5d0b355a83f90cd2f96abd9f3f7530c..315116bbc8926e1c8dfd9953a8bf32561b575090 100644 (file)
@@ -1,20 +1,20 @@
 <%
-  api_version = arvados_api_client.discovery[:source_version]
   generated_at = arvados_api_client.discovery[:generatedAt]
-  wb_version = Rails.configuration.source_version
-  wb_version += Rails.configuration.local_modified if Rails.configuration.local_modified
   arvados_base = Rails.configuration.arvados_v1_base
   support_email = Rails.configuration.support_email_address
 
+  api_version_link = link_to api_version_text, version_link_target(api_version)
+  wb_version_link = link_to wb_version_text, version_link_target(wb_version)
+
   additional_info = {}
   additional_info['Current location'] = params[:current_location]
   additional_info['User UUID'] = current_user.uuid if current_user
 
   additional_info_str = additional_info.map {|k,v| "#{k}=#{v}"}.join("\n")
 
-  additional_info['api_version'] = api_version
+  additional_info['api_version'] = api_version_text
   additional_info['generated_at'] = generated_at
-  additional_info['workbench_version'] = wb_version
+  additional_info['workbench_version'] = wb_version_text
   additional_info['arvados_base'] = arvados_base
   additional_info['support_email'] = support_email
   additional_info['error_message'] = params[:error_message] if params[:error_message]
         <div class="form-group">
           <label for="wb_version" class="col-sm-4 control-label"> Workbench version </label>
           <div class="col-sm-8">
-            <p class="form-control-static" name="wb_version"><%=wb_version%></p>
+            <p class="form-control-static" name="wb_version"><%= wb_version_link %></p>
           </div>
         </div>
 
         <div class="form-group">
           <label for="server_version" class="col-sm-4 control-label"> API version </label>
           <div class="col-sm-8">
-            <p class="form-control-static" name="server_version"><%=api_version%></p>
+            <p class="form-control-static" name="server_version"><%= api_version_link %></p>
           </div>
         </div>
 
index 6795c091fc332a3007e0c575e2f6fc42d2916aa2..73830ee0494b6e6039cdf3fb2a3c2c52c717f415 100644 (file)
@@ -3,14 +3,10 @@ $("#report-issue-modal-window .modal").modal('show');
 
 // Disable the submit button on modal loading
 $submit = $('#report-issue-submit');
-$submit.attr('disabled', true);
+$submit.prop('disabled', true);
 
 // capture events to enable submit button when applicable
 $('#report_issue_text').bind('input propertychange', function() {
   var problem_desc = document.forms["report-issue-form"]["report_issue_text"].value;
-  if (problem_desc === null || problem_desc === "") {
-    $submit.attr('disabled', true)
-  } else {
-    $submit.removeAttr('disabled');
-  }
+  $submit.prop('disabled', (problem_desc === null) || (problem_desc === ""));
 });
diff --git a/apps/workbench/app/views/authorized_keys/_show_help.html.erb b/apps/workbench/app/views/authorized_keys/_show_help.html.erb
deleted file mode 100644 (file)
index 42fff3c..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-<p>
-  More information about how to log in to VMs:
-</p>
-<ul>
-  <li>
-    <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
-        "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
-        target: "_blank"%>.
-  </li>
-</ul>
index 25d8796b41889ac913953d75dfeb22a710d0f70f..5d2fe2cbd0667b20d7e740db4c0d3c6fa49b6409 100644 (file)
@@ -2,11 +2,16 @@
 
 <tr class="collection" data-object-uuid="<%= c.uuid %>">
   <td>
-    <% friendly_name = c.friendly_link_name  %>
-    <% @collection_info[c.uuid][:tag_links].each do |tag_link| %>
-      <% friendly_name += raw(" <span class='label label-info'>#{tag_link.name}</span>") %>
-    <% end %>
-    <%= render :partial => "selection_checkbox", :locals => {:object => c, :friendly_name => friendly_name} %>
+    <%=
+       friendly_name = c.friendly_link_name
+       @collection_info[c.uuid][:tag_links].each do |tag_link|
+         friendly_name += raw(" <span class='label label-info'>#{tag_link.name}</span>")
+       end
+       render partial: "selection_checkbox", locals: {
+         object: c,
+         friendly_name: friendly_name
+       }
+    %>
 
     <%= render :partial => "show_object_button", :locals => {object: c, size: 'xs'} %>
   </td>
index fc81e705e4b4096d4f18d34e96cd07e5a3f2a4ca..4050e3aee4d124c7f976ba9148ea7415b32637dd 100644 (file)
@@ -1,21 +1,17 @@
-<%# a nil @search_sharing means we got an AccessForbiddenException and should
-disable this feature entirely. %>
-<% if @search_sharing != nil %>
-  <% if @search_sharing.any? %>
-    <div>Shared at:
-      <span class="pull-right">
-        <%= link_to "Unshare", unshare_collection_url, {
-              class: 'btn-xs btn-info',
-              remote: true,
-              method: 'post'
-            } %></span>
-      <div class="smaller-text" style="word-break: break-all"><%= link_to download_link, download_link %></div>
-    </div>
-  <% else %>
-    <%= link_to "Create sharing link", share_collection_url, {
-          class: 'btn-xs btn-info',
-          remote: true,
-          method: 'post'
-        } %>
-  <% end %>
+<% button_attrs = {
+     class: 'btn btn-xs btn-info',
+     remote: true,
+     method: :post,
+   } %>
+<% if @search_sharing.nil? %>
+  <p>Your API token is not authorized to manage collection sharing links.</p>
+<% elsif @search_sharing.empty? %>
+  <%= button_to("Create sharing link", {action: "share"}, button_attrs) %>
+<% else %>
+  <div>
+    <% button_attrs[:class] += " pull-right" %>
+    <%= button_to("Unshare", {action: "unshare"}, button_attrs) %>
+    Shared at:
+    <div class="smaller-text" style="clear: both; word-break: break-all"><%= link_to download_link, download_link %></div>
+  </div>
 <% end %>
index 76d8731a9353e4ae55ae68734d176258e8f2b9ed..603dc34f4fc6ab038ad727b95688ee8e9a6831fe 100644 (file)
@@ -1,21 +1,10 @@
 <script>
-// The "each" loop in select_all_files() and unselect_all_files()
-// is needed because .trigger("change") does not play well with clippy.
-// Once clippy has been retired, we should be able to compress this
-// into .filter(":visible").prop("checked", true).trigger("change").
-//
 function select_all_files() {
-  $("#collection_files :checkbox").filter(":visible").each(
-    function() {
-      $(this).prop("checked", true).trigger("change");
-    });
+  $("#collection_files :checkbox").filter(":visible").prop("checked", true).trigger("change");
 }
 
 function unselect_all_files() {
-  $("#collection_files :checkbox").filter(":visible").each(
-    function() {
-      $(this).prop("checked", false).trigger("change");
-    });
+  $("#collection_files :checkbox").filter(":visible").prop("checked", false).trigger("change");
 }
 </script>
 
@@ -38,12 +27,12 @@ function unselect_all_files() {
         <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
         <ul class="dropdown-menu" role="menu">
           <li><%= link_to "Create new collection with selected files", '#',
+                  method: :post,
                   'data-href' => combine_selected_path(
                     action_data: {current_project_uuid: @object.owner_uuid}.to_json
                   ),
                   'data-selection-param-name' => 'selection[]',
                   'data-selection-action' => 'combine-collections',
-                  'method' => 'post',
                   'data-toggle' => 'dropdown'
             %></li>
         </ul>
@@ -75,7 +64,7 @@ function unselect_all_files() {
     <% if size.nil?  # This is a subdirectory. %>
       <% dirstack.push(File.join(dirname, filename)) %>
       <div class="collection_files_row">
-       <div class="collection_files_name><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
+       <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
       </div>
       <ul class="collection_files">
     <% else %>
@@ -107,12 +96,12 @@ function unselect_all_files() {
           <% end %>
       <% if CollectionsHelper::is_image(filename) %>
           <i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
-       </div>
         <div class="collection_files_inline">
           <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
                       link_params.merge(disposition: 'inline'),
                       {title: file_path}) %>
         </div>
+       </div>
       <% else %>
           <i class="fa fa-fw fa-file" href="<%=@object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
        </div>
index 6ebb3b2a28e67632b134959e612be7fa8c319637..39651cc419fcd6eb026ef11d299d122ecbf5625d 100644 (file)
@@ -5,10 +5,10 @@
         <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
         <ul class="dropdown-menu" role="menu">
           <li><%= link_to "Create new collection with selected collections", '#',
+                  method: :post,
                   'data-href' => combine_selected_path,
                   'data-selection-param-name' => 'selection[]',
                   'data-selection-action' => 'combine-collections',
-                  'method' => 'post',
                   'data-toggle' => 'dropdown'
             %></li>
         </ul>
diff --git a/apps/workbench/app/views/collections/_show_upload.html.erb b/apps/workbench/app/views/collections/_show_upload.html.erb
new file mode 100644 (file)
index 0000000..40bef52
--- /dev/null
@@ -0,0 +1,66 @@
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+     ></div>
+<div ng-cloak ng-controller="UploadToCollection" arv-uuid="<%= @object.uuid %>">
+  <div class="panel panel-primary">
+    <div class="panel-body">
+      <div class="row">
+        <div class="col-sm-4">
+          <input type="file" multiple id="file_selector" ng-model="incoming" onchange="angular.element(this).scope().addFilesToQueue(this.files); $(this).val('');">
+          <div class="btn-group btn-group-sm" role="group" style="margin-top: 1.5em">
+            <button type="button" class="btn btn-default" ng-click="stop()" ng-disabled="uploader.state !== 'Running'"><i class="fa fa-fw fa-pause"></i> Pause</button>
+            <button type="button" class="btn btn-primary" ng-click="go()" ng-disabled="uploader.state === 'Running' || countInStates(['Paused', 'Queued']) === 0"><i class="fa fa-fw fa-play"></i> Start</button>
+          </div>
+        </div>
+        <div class="col-sm-8">
+          <div ng-show="uploader.state === 'Running'"
+               class="alert alert-info"
+               ><i class="fa fa-gear"></i>
+            Upload in progress.
+            <span ng-show="countInStates(['Done']) > 0">
+              {{countInStates(['Done'])}} file{{countInStates(['Done'])>1?'s':''}} finished.
+            </span>
+          </div>
+          <div ng-show="uploader.state === 'Idle' && uploader.stateReason"
+               class="alert alert-success"
+               ><i class="fa fa-fw fa-flag-checkered"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Failed'"
+               class="alert alert-danger"
+               ><i class="fa fa-fw fa-warning"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Stopped'"
+               class="alert alert-info"
+               ><i class="fa fa-fw fa-info"></i> &nbsp; Paused. Click the Start button to resume uploading.
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+  <div ng-repeat="upload in uploadQueue" class="row" ng-class="{lighten: upload.state==='Done'}">
+    <div class="col-sm-1">
+      <button class="btn btn-xs btn-default"
+              ng-show="upload.state!=='Done'"
+              ng-click="removeFileFromQueue($index)"
+              title="cancel"><i class="fa fa-fw fa-times"></i></button>
+      <span class="label label-success label-info"
+            ng-show="upload.state==='Done'">finished</span>
+    </div>
+    <div class="col-sm-4 nowrap" style="overflow-x:hidden;text-overflow:ellipsis">
+      <span title="{{upload.file.name}}">
+        {{upload.file.name}}
+      </span>
+    </div>
+    <div class="col-sm-1" style="text-align: right">
+      {{upload.file.size/1024 | number:0}}&nbsp;KiB
+    </div>
+    <div class="col-sm-2">
+      <div class="progress">
+        <span class="progress-bar" style="width: {{upload.progress}}%"></span>
+      </div>
+    </div>
+    <div class="col-sm-4" ng-class="{lighten: upload.state !== 'Uploading'}">
+      {{upload.statistics}}
+    </div>
+  </div>
+</div>
index 8082d6f5a44f42d21fe07830c2978df45c5e3961..315c8c1831242faedef0c0a1a10b1991f88e06c7 100644 (file)
@@ -1,11 +1,13 @@
 <% if !@object.log %>
 
-<% log_history = stderr_log_history([@object.uuid]) %>
+<div id="log_graph_div"
+     class="arv-log-event-listener"
+     data-object-uuid="<%= @object.uuid %>"></div>
 
 <div id="event_log_div"
      class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
      data-object-uuid="<%= @object.uuid %>"
-     ><%= log_history.join("\n") %></div>
+     ></div>
 
 <%# Applying a long throttle suppresses the auto-refresh of this
     partial that would normally be triggered by arv-log-event. %>
@@ -56,7 +58,7 @@ var makeFilter = function() {
   });
 }
 
-<% if @object.log %>
+<% if @object.log and !@object.log.empty? %>
   <% logcollection = Collection.find @object.log %>
   <% if logcollection %>
     log_size = <%= logcollection.files[0][2] %>
index 56177ae6e57cef33c7c9b959a8287eb0f96a5008..2cf3291c5c7173c3072c942ae2c9529f4a5ced10 100644 (file)
@@ -1,8 +1,5 @@
 <% unless @histogram_pretty_date.nil? %>
   <% content_for :tab_panes do %>
-  <%# We use protocol-relative paths here to avoid browsers refusing to load javascript over http in a page that was loaded over https. %>
-  <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/raphael/2.1.2/raphael-min.js' %>
-  <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/morris.js/0.4.3/morris.min.js' %>
   <script type="text/javascript">
     $(document).ready(function(){
       $.renderHistogram(<%= raw @cache_age_histogram.to_json %>);
index 324714e5346efe574fa6e74465c7fdeb2827b9ec..cdc47c17169401995a24a77c9e2a8cdcbc46d72f 100644 (file)
@@ -1,5 +1,5 @@
 <!DOCTYPE html>
-<html>
+<html ng-app="Workbench">
 <head>
   <meta charset="utf-8">
   <title>
@@ -23,6 +23,8 @@
   <%= csrf_meta_tags %>
   <%= yield :head %>
   <%= javascript_tag do %>
+    angular.module('Arvados').value('arvadosApiToken', '<%=Thread.current[:arvados_api_token]%>');
+    angular.module('Arvados').value('arvadosDiscoveryUri', '<%= Rails.configuration.arvados_v1_base.sub '/arvados/v1', '/discovery/v1/apis/arvados/v1/rest' %>');
   <%= yield :js %>
   <% end %>
   <style>
index 9e966e148a9de3e54254259a7d1e911a35aaaaf7..824e370c582d6ad2ea80f12ac8b342bb099a2464 100644 (file)
                 <%= current_user.email %> <span class="caret"></span>
               </a>
               <ul class="dropdown-menu" role="menu">
-                <li role="presentation" class="dropdown-header">
-                  My account
-                </li>
                 <% if current_user.is_active %>
                 <li role="menuitem"><a href="/manage_account" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage account</a></li>
                 <% if Rails.configuration.user_profile_form_fields %>
                   <li role="menuitem"><a href="/users/<%=current_user.uuid%>/profile" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage profile</a></li>
                 <% end %>
-                <li role="presentation" class="divider"></li>
                 <% end %>
                 <li role="menuitem"><a href="<%= logout_path %>" role="menuitem"><i class="fa fa-sign-out fa-fw"></i> Log out</a></li>
                 <% if current_user.is_active and
               Projects
               <span class="caret"></span>
             </a>
-            <ul class="dropdown-menu" role="menu">
-            <%= render partial: "projects_tree_menu", locals: {
+            <ul class="dropdown-menu" style="min-width: 20em" role="menu">
+              <li>
+                <%= link_to projects_path(options: {ensure_unique_name: true}), method: :post, class: 'btn btn-xs btn-default pull-right' do %>
+                  <i class="fa fa-plus"></i> Add a new project
+                <% end %>
+              </li>
+              <%= render partial: "projects_tree_menu", locals: {
                   :project_link_to => Proc.new do |pnode, &block|
                     link_to(project_path(pnode[:object].uuid),
                       data: { 'object-uuid' => pnode[:object].uuid,
                               'name' => 'name' },
                       &block)
                   end,
-                  :top_button => Proc.new do %>
-                    <% link_to projects_path, method: 'post', class: 'btn btn-xs btn-default pull-right' do %>
-                      <i class="fa fa-plus"></i> New project
-                    <% end %>
-                  <% end %>
-            <% } %>
+              } %>
             </ul>
           </li>
           <% if @name_link or @object %>
index 1dedb4dc17ee94f84bd7bc16393472573cc4b7c9..989db3daff21d66de26ca13477bd2f5520c0de0a 100644 (file)
@@ -1,20 +1,6 @@
-   <%= image_tag "dax.png", class: "dax" %> 
+   <%= image_tag "dax.png", class: "dax" %>
     <div>
-      Hi, I noticed that you have not yet set up an SSH public key for use with Arvados.  
-      <%= link_to "Click here to learn about SSH keys in Arvados.",
-         "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html", 
-         style: "font-weight: bold",
-         target: "_blank" %>
-      When you have an SSH key you would like to use, paste the SSH public key
-      in the text box.
+      Hi, I noticed that you have not yet set up an SSH public key for use with Arvados.
+      <%= link_to "Click here to set up an SSH public key for use with Arvados.",
+      "/manage_account", style: "font-weight: bold" %>
     </div>
-    <%= form_for AuthorizedKey.new, remote: true do |f| %>
-      <div class="row-fluid">
-          <%= hidden_field_tag :return_to, request.original_url %>
-          <%= hidden_field_tag :disable_element, 'input[type=submit]' %>
-          <%= f.text_area :public_key, rows: 4, placeholder: "Paste your public key here", style: "width: 100%" %>
-      </div>
-      <div class="row-fluid" style="padding-top: 0; padding-bottom: 15px">
-          <%= f.submit :Save, value: raw("&check;"), class: "btn btn-primary pull-right" %>
-      </div>
-<% end %>
index 9176652ca34a9a43ad59fda8dd3f8b9941966c0f..c916ee95306ea9c1cc752f32e7b0d0031bf6f499 100644 (file)
@@ -4,9 +4,9 @@
     <div class="container-fluid">
       <div class="row-fluid">
         <%# column offset 0 %>
-        <div class="col-md-3">
+        <div class="col-md-3" style="word-break:break-all;">
           <h4 class="panel-title">
-            <a data-toggle="collapse" href="#collapse<%= i %>" style="white-space: nowrap;">
+            <a data-toggle="collapse" href="#collapse<%= i %>">
               <%= pj[:name] %> <span class="caret"></span>
             </a>
           </h4>
index e1a7cea0ada087f0f63ef77f5783aaa4c2cba30b..7d1fd39f7681a77754e0a7ca187b42f22e88330b 100644 (file)
@@ -1,4 +1,4 @@
-<%= form_tag do |f| %>
+<%= form_tag({}, {id: "comparedInstances"}) do |f| %>
 
 <table class="table table-condensed table-fixedlayout arv-recent-pipeline-instances">
   <colgroup>
index e538815b228adb84c6264cc2153c0b25eacd0722..1181b3e467978ed108cdb0d6704311c2cf5cf9e8 100644 (file)
@@ -9,8 +9,7 @@
   </div>
 
   <%= form_tag({action: 'compare', controller: params[:controller], method: 'get'}, {method: 'get', id: 'compare', class: 'pull-right small-form-margin'}) do |f| %>
-  <%= submit_tag 'Compare 2 or 3 selected', {class: 'btn btn-primary', disabled: true, style: 'display: none'} %>
-  &nbsp;
+    <%= submit_tag 'Compare 2 or 3 selected', {class: 'btn btn-primary', disabled: true} %>
   <% end rescue nil %>
 
 <% end %>
index 252b93ad45664ac041c7016ab6b82c09bcbf4ba2..a1749a7de91303d3e4d2db5f905dd942aef4b0e3 100644 (file)
@@ -38,7 +38,7 @@
                                                    'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (ob.name.andand.size.andand>0 ? " using the pipeline template *#{ob.name}*" : ""),
                                                    'success' => 'redirect-to-created-object'
                                                   }.to_json),
-                { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: 'get' }
+                { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: :get }
             ) do %>
                <i class="fa fa-fw fa-play"></i> Run
               <% end %>
index 02ebd6d9cd09d5aecdd59338714932e8d8f16067..0faa48fed3091ce825e993989b25d62e2a63e619 100644 (file)
@@ -10,7 +10,7 @@
                                                    'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (@object.name.andand.size.andand>0 ? " using the pipeline template *#{@object.name}*" : ""),
                                                    'success' => 'redirect-to-created-object'
                                                   }.to_json),
-                { class: "btn btn-primary btn-sm", remote: true, method: 'get', title: 'Run this pipeline' }
+                { class: "btn btn-primary btn-sm", remote: true, title: 'Run this pipeline' }
                ) do %>
                    <i class="fa fa-gear"></i> Run this pipeline
                  <% end %>
index da5871cb7a662f3018434830cf6b922640750a6a..527dc643e545123b8f1194ac254015882d29fd26 100644 (file)
@@ -38,7 +38,7 @@
   <div>    
   </div>
 <% else %>
-  There are currently no jobs in the queue.
+  There are currently no jobs in your queue.
 <% end %>
 
 <h4>Node status</h4>
index 0d71f947c758aca3829326ef4230f280a529fbd3..1fbe5057be0a9cf528553eda276297770d437010 100644 (file)
@@ -5,13 +5,13 @@
         <div class="panel-heading"><span class="panel-title">Active pipelines</span>
           <span class="pull-right">
     <%= link_to(
-         choose_pipeline_templates_path(
-           title: 'Choose a pipeline to run:',
-           action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
-           action_href: pipeline_instances_path,
-           action_method: 'post',
-           action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
-         { class: "btn btn-primary btn-xs", remote: true, method: 'get' }) do %>
+          choose_pipeline_templates_path(
+            title: 'Choose a pipeline to run:',
+            action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+            action_href: pipeline_instances_path,
+            action_method: 'post',
+            action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-xs", remote: true }) do %>
       <i class="fa fa-fw fa-gear"></i> Run a pipeline...
     <% end %>
     </span>
index cc862c425c168e5ab61e4a12827bed2df517adbc..480f401f7ad2750bb35e0499ca68502932a79839 100644 (file)
@@ -45,7 +45,7 @@
       action_href: share_with_project_path,
       action_name: 'Add',
       action_data: {selection_param: 'uuids[]', success: 'tab-refresh'}.to_json),
-      class: "btn btn-primary btn-sm", remote: true, method: 'get') do %>
+      class: "btn btn-primary btn-sm", remote: true) do %>
   <i class="fa fa-fw fa-plus"></i> Share with <%= share_class %>&hellip;
   <% end %>
 
index 1ef7fbd5979db07c28fc5f0bd20c2c1878df7257..fe9595a527b20dc96f038e745550052bf50e0d0c 100644 (file)
@@ -3,16 +3,16 @@
   <div class="row">
     <div class="col-sm-5">
       <div class="btn-group btn-group-sm">
-        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection <span class="caret"></span></button>
         <ul class="dropdown-menu" role="menu">
           <li><%= link_to "Create new collection with selected collections", '#',
                   'data-href' => combine_selected_path(
                     action_data: {current_project_uuid: @object.uuid}.to_json
                   ),
                   'id' => 'combine_selections_button',
+                  method: :post,
                   'data-selection-param-name' => 'selection[]',
                   'data-selection-action' => 'combine-project-contents',
-                  'method' => 'post',
                   'data-toggle' => 'dropdown'
             %></li>
           <li><%= link_to "Compare selected", 'action',
@@ -20,7 +20,7 @@
                   'data-selection-param-name' => 'uuids[]',
                   'data-selection-action' => 'compare'
             %></li>
-          <li><%= link_to "Copy selected", '#',
+          <li><%= link_to "Copy selected...", '#',
                   'data-href' => choose_projects_path(
                     title: 'Copy selected items to...',
                     editable: true,
@@ -36,7 +36,8 @@
                   'data-selection-param-name' => 'selection[]',
                   'data-selection-action' => 'copy'
             %></li>
-          <li><%= link_to "Move selected", '#',
+          <% if @object.editable? %>
+          <li><%= link_to "Move selected...", '#',
                   'data-href' => choose_projects_path(
                     title: 'Move selected items to...',
                     editable: true,
                   'data-selection-action' => 'move'
             %></li>
           <li><%= link_to "Remove selected", '#',
+                  method: :delete,
                   'data-href' => url_for(action: :remove_items),
                   'data-selection-param-name' => 'item_uuids[]',
                   'data-selection-action' => 'remove',
                   'data-remote' => true,
-                  'method' => 'delete',
                   'data-toggle' => 'dropdown'
             %></li>
+          <% end %>
         </ul>
       </div>
     </div>
index 0429f33b4102920ab424f13a887321af9dd4f7f7..0cab11797c72c978b06c5e742aa9c81abb48ba95 100644 (file)
@@ -6,28 +6,42 @@
 
 <% content_for :tab_line_buttons do %>
   <% if @object.editable? %>
+    <div class="btn-group btn-group-sm">
+      <button type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown"><i class="fa fa-fw fa-plus"></i> Add data <span class="caret"></span></button>
+      <ul class="dropdown-menu pull-right" role="menu">
+        <li>
+          <%= link_to(
+                choose_collections_path(
+                  title: 'Choose a collection to copy into this project:',
+                  multiple: true,
+                  action_name: 'Copy',
+                  action_href: actions_path(id: @object.uuid),
+                  action_method: 'post',
+                  action_data: {selection_param: 'selection[]', copy_selections_into_project: @object.uuid, success: 'page-refresh'}.to_json),
+                { remote: true, data: {'event-after-select' => 'page-refresh', 'toggle' => 'dropdown'} }) do %>
+            <i class="fa fa-fw fa-clipboard"></i> Copy data from another project
+          <% end %>
+        </li>
+        <li>
+          <%= link_to(collections_path(options: {ensure_unique_name: true}, collection: {manifest_text: "", name: "New collection", owner_uuid: @object.uuid}, redirect_to_anchor: 'Upload'), {
+              method: 'post',
+              data: {toggle: 'dropdown'}}) do %>
+            <i class="fa fa-fw fa-upload"></i> Upload files from my computer
+          <% end %>
+        </li>
+      </ul>
+    </div>
     <%= link_to(
-         choose_collections_path(
-           title: 'Add data to project:',
-           multiple: true,
-           action_name: 'Add',
-           action_href: actions_path(id: @object.uuid),
-           action_method: 'post',
-           action_data: {selection_param: 'selection[]', copy_selections_into_project: @object.uuid, success: 'page-refresh'}.to_json),
-         { class: "btn btn-primary btn-sm", remote: true, method: 'get', title: "Add data to this project", data: {'event-after-select' => 'page-refresh'} }) do %>
-      <i class="fa fa-fw fa-plus"></i> Add data...
-    <% end %>
-    <%= link_to(
-         choose_pipeline_templates_path(
-           title: 'Choose a pipeline to run:',
-           action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
-           action_href: pipeline_instances_path,
-           action_method: 'post',
-           action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
-         { class: "btn btn-primary btn-sm", remote: true, method: 'get', title: "Run a pipeline in this project" }) do %>
+          choose_pipeline_templates_path(
+            title: 'Choose a pipeline to run:',
+            action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+            action_href: pipeline_instances_path,
+            action_method: 'post',
+            action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline in this project" }) do %>
       <i class="fa fa-fw fa-gear"></i> Run a pipeline...
     <% end %>
-    <%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: 'post', title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
+    <%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: :post, title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
       <i class="fa fa-fw fa-plus"></i>
       Add a subproject
     <% end %>
index d73ccffef8ddde3d6961975f3b64bf9cc4e8901f..4c80cafd8dfb22447dfda4aab20e9287c58d341b 100644 (file)
@@ -9,7 +9,7 @@ User agreements
 <% end %>
 <% end %>
 
-<%= form_for(unsigned_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}, method: 'post'}) do |f| %>
+<%= form_for(unsigned_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}, method: :post}) do |f| %>
 <%= hidden_field_tag :return_to, request.url %>
 <div id="open_user_agreement">
   <div class="alert alert-info">
index efa8cae5c98f7356b92216fb0c71d1d5d0b16811..98f54ef252ed97b61ec5b9b66cf7e2449e2e4e14 100644 (file)
@@ -14,9 +14,9 @@
 
       <div class="modal-body">
         <div> <%= link_to "Click here to learn about SSH keys in Arvados.",
-                 "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html", 
-                 style: "font-weight: bold",
-                 target: "_blank" %>
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>
         </div>
         <div class="form-group">
           <label for="public_key">Public Key</label>
index 17b64b27f9f7c7d74141bc55ea52de7ba3b080ff..1ea8f0bf87f698a5199cd357163c7f07df973796 100644 (file)
@@ -2,9 +2,9 @@
   <% if !@my_ssh_keys.any? %>
      <p> You have not yet set up an SSH public key for use with Arvados. </p>
      <p>  <%= link_to "Click here",
-                 "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html", 
-                 style: "font-weight: bold",
-                 target: "_blank" %>  to learn about SSH keys in Arvados.
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>  to learn about SSH keys in Arvados.
      </p>
      <p> When you have an SSH key you would like to use, add it using the <b>Add</b> button. </p>
   <% else %>
@@ -43,7 +43,7 @@
               <% end %>
             </td>
             <td>
-              <%= link_to(authorized_key_path(id: key[:uuid]), method: 'delete', class: 'btn btn-sm', data: {confirm: "Really delete key?"}) do %>
+              <%= link_to(authorized_key_path(id: key[:uuid]), method: :delete, class: 'btn btn-sm', data: {confirm: "Really delete key?"}) do %>
                   <i class="fa fa-fw fa-trash-o"></i>
               <% end %>
             </td>
index 262dfa06876cf72cccab68317cd92d8779c340a7..a34d7e6949ba1df3f9073896db5d2e61a67a9be8 100644 (file)
@@ -83,7 +83,7 @@
                 <small>group&rarr;user</small>
               </label>
               <label class="checkbox-inline">
-                <%= group.name || '(unnamed)' %> <span class="deemphasize">(owned by <%= User.find(group.owner_uuid).andand.full_name %>)</span>
+                <%= group.name || '(unnamed)' %> <span class="deemphasize">(owned by <%= User.find?(group.owner_uuid).andand.full_name %>)</span>
               </label>
             </div>
           <% end.empty? and begin %>
index 45ca939281431a2aaee8301a80d489d77771e2a0..b0f57530de9d054756b2f238edffe9b68fd356d3 100644 (file)
@@ -1,7 +1,7 @@
 <% if current_user.andand.is_active %>
   <div>
     <strong>Recent jobs</strong>
-    <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
+    <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
     <%= link_to raw("Show all jobs &rarr;"), jobs_path, class: 'pull-right' %>
     <% if not current_user.andand.is_active or @my_jobs.empty? %>
       <p>(None)</p>
 
 <div>
   <strong>Recent pipeline instances</strong>
-  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
   <%= link_to raw("Show all pipeline instances &rarr;"), pipeline_instances_path, class: 'pull-right' %>
   <% if not current_user.andand.is_active or @my_pipelines.empty? %>
     <p>(None)</p>
 
 <div>
   <strong>Recent collections</strong>
-  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
   <%= link_to raw("Show all collections &rarr;"), collections_path, class: 'pull-right' %>
   <div class="pull-right" style="padding-right: 1em; width: 30%;">
     <%= form_tag collections_path,
index 5671cc2d9c87d595d0b499382af5db5fa05d914e..b1aa796cd14642aea9fa7414779fe5e58064fcb3 100644 (file)
@@ -9,7 +9,7 @@ var email_value = document.forms["setup_form"]["email"].value;
 var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
 if ((email_disabled == false) && (email_value == null || email_value == "" ||
         prefix_value == null || prefix_value == "")) {
-  $register.attr('disabled', true);
+  $register.prop('disabled', true);
 }
 
 // capture events to enable submit button when applicable
@@ -22,7 +22,7 @@ $input.on('keyup paste mouseleave', function() {
 
   var emailRegExp = /^([\w-\.]+@([\w-]+\.)+[\w-]{2,4})?$/;
   var validEmail = false;
+
   if (emailRegExp.test(email_value )) {
     validEmail = true;
   }
@@ -32,7 +32,7 @@ $input.on('keyup paste mouseleave', function() {
     trigger = true;
   }
 
-  trigger ? $register.attr('disabled', true) : $register.removeAttr('disabled');
+  $register.prop('disabled', trigger);
 });
 
 // reset form input fields, for the next time around
index 8d5c6160a526ee1bcd90a5f6380638c5e144c361..dd306414b4cf80ce03d2f187c70b9ae2ace6613c 100644 (file)
@@ -27,6 +27,13 @@ diagnostics:
       template_uuid: zzzzz-p5p6p-1xbobfobk94ppbv
       input_paths: [zzzzz-4zz18-nz98douzhaa3jh2, zzzzz-4zz18-gpw9o5wpcti3nib]
 
+# Below is a sample setting for performance testing.
+# Configure workbench URL as "arvados_workbench_url"
+# Configure test user token as "user_token".
+performance:
+  arvados_workbench_url: https://localhost:3031
+  user_token: eu33jurqntstmwo05h1jr3eblmi961e802703y6657s8zb14r
+
 development:
   cache_classes: false
   eager_load: true
@@ -38,7 +45,7 @@ development:
   assets.debug: true
   profiling_enabled: true
   site_name: Arvados Workbench (dev)
-  local_modified: <%= '-modified' if `git status -s` %>
+  local_modified: "<%= '-modified' if `git status -s` != '' %>"
 
   # API server configuration
   arvados_login_base: ~
@@ -170,7 +177,7 @@ common:
   user_profile_form_message: Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.
 
   # source_version
-  source_version: "<%= `git log -n 1 --format=%h` %>"
+  source_version: "<%= `git log -n 1 --format=%h`.strip %>"
   local_modified: false
 
   # report notification to and from addresses
index dd7669cb281555304f789cc7971281ba1b93d9cc..34a3224cfc2526d936cda60a6c3882dbc760d2a6 100644 (file)
@@ -30,3 +30,10 @@ diagnostics:
   database: db/diagnostics.sqlite3
   pool: 5
   timeout: 5000
+
+# Note: The "performance" database configuration is not actually used.
+performance:
+  adapter: sqlite3
+  database: db/diagnostics.sqlite3
+  pool: 5
+  timeout: 5000
index 9f9696facecac5be44dc62f7506e2cb371985f44..86cdc38171263f25d8ecd34f9b7cdc009bcc4691 100644 (file)
@@ -22,6 +22,7 @@ ArvadosWorkbench::Application.routes.draw do
   resources :job_tasks
   resources :jobs do
     post 'cancel', :on => :member
+    get 'logs', :on => :member
   end
   match '/logout' => 'sessions#destroy', via: [:get, :post]
   get '/logged_out' => 'sessions#index'
similarity index 98%
rename from apps/workbench/test/functional/application_controller_test.rb
rename to apps/workbench/test/controllers/application_controller_test.rb
index c2828020bdca966400301c68b486b505a20c20ff..d0d9c5dfd142357a7c024a57f0e0bce10cbd4efb 100644 (file)
@@ -1,6 +1,10 @@
 require 'test_helper'
 
 class ApplicationControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
 
   setup do
     @user_dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('users')['active']['uuid'])
index a5c6033616a4a59fbf66a586b556af4af15579c4..14db674292c666b1558f27a809b66f6569840174 100644 (file)
@@ -1,8 +1,255 @@
 require 'test_helper'
 
 class CollectionsControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
   include PipelineInstancesHelper
 
+  NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
+
+  def stub_file_content
+    # For the duration of the current test case, stub file download
+    # content with a randomized (but recognizable) string. Return the
+    # string, the test case can use it in assertions.
+    txt = 'the quick brown fox ' + rand(2**32).to_s
+    @controller.stubs(:file_enumerator).returns([txt])
+    txt
+  end
+
+  def collection_params(collection_name, file_name=nil)
+    uuid = api_fixture('collections')[collection_name.to_s]['uuid']
+    params = {uuid: uuid, id: uuid}
+    params[:file] = file_name if file_name
+    params
+  end
+
+  def assert_hash_includes(actual_hash, expected_hash, msg=nil)
+    expected_hash.each do |key, value|
+      assert_equal(value, actual_hash[key], msg)
+    end
+  end
+
+  def assert_no_session
+    assert_hash_includes(session, {arvados_api_token: nil},
+                         "session includes unexpected API token")
+  end
+
+  def assert_session_for_auth(client_auth)
+    api_token =
+      api_fixture('api_client_authorizations')[client_auth.to_s]['api_token']
+    assert_hash_includes(session, {arvados_api_token: api_token},
+                         "session token does not belong to #{client_auth}")
+  end
+
+  def show_collection(params, session={}, response=:success)
+    params = collection_params(params) if not params.is_a? Hash
+    session = session_for(session) if not session.is_a? Hash
+    get(:show, params, session)
+    assert_response response
+  end
+
+  test "viewing a collection" do
+    show_collection(:foo_file, :active)
+    assert_equal([['.', 'foo', 3]], assigns(:object).files)
+  end
+
+  test "viewing a collection fetches related projects" do
+    show_collection({id: api_fixture('collections')["foo_file"]['portable_data_hash']}, :active)
+    assert_includes(assigns(:same_pdh).map(&:owner_uuid),
+                    api_fixture('groups')['aproject']['uuid'],
+                    "controller did not find linked project")
+  end
+
+  test "viewing a collection fetches related permissions" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:permissions).map(&:uuid),
+                    api_fixture('links')['bar_file_readable_by_active']['uuid'],
+                    "controller did not find permission link")
+  end
+
+  test "viewing a collection fetches jobs that output it" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:output_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find output job")
+  end
+
+  test "viewing a collection fetches jobs that logged it" do
+    show_collection(:baz_file, :active)
+    assert_includes(assigns(:log_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find logger job")
+  end
+
+  test "viewing a collection fetches logs about it" do
+    show_collection(:foo_file, :active)
+    assert_includes(assigns(:logs).map(&:uuid),
+                    api_fixture('logs')['log4']['uuid'],
+                    "controller did not find related log")
+  end
+
+  test "sharing auths available to admin" do
+    show_collection("collection_owned_by_active", "admin_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to owner" do
+    show_collection("collection_owned_by_active", "active_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to reader" do
+    show_collection("foo_collection_in_aproject",
+                    "project_viewer_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "viewing collection files with a reader token" do
+    params = collection_params(:foo_file)
+    params[:reader_token] = api_fixture("api_client_authorizations",
+                                        "active_all_collections", "api_token")
+    get(:show_file_links, params)
+    assert_response :success
+    assert_equal([['.', 'foo', 3]], assigns(:object).files)
+    assert_no_session
+  end
+
+  test "fetching collection file with reader token" do
+    expected = stub_file_content
+    params = collection_params(:foo_file, "foo")
+    params[:reader_token] = api_fixture("api_client_authorizations",
+                                        "active_all_collections", "api_token")
+    get(:show_file, params)
+    assert_response :success
+    assert_equal(expected, @response.body,
+                 "failed to fetch a Collection file with a reader token")
+    assert_no_session
+  end
+
+  test "reader token Collection links end with trailing slash" do
+    # Testing the fix for #2937.
+    session = session_for(:active_trustedclient)
+    post(:share, collection_params(:foo_file), session)
+    assert(@controller.download_link.ends_with? '/',
+           "Collection share link does not end with slash for wget")
+  end
+
+  test "getting a file from Keep" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:active)
+    expect_content = stub_file_content
+    get(:show_file, params, sess)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep")
+  end
+
+  test "can't get a file from Keep without permission" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:spectator)
+    get(:show_file, params, sess)
+    assert_response 404
+  end
+
+  test "trying to get a nonexistent file from Keep returns a 404" do
+    params = collection_params(:foo_file, 'gone')
+    sess = session_for(:admin)
+    get(:show_file, params, sess)
+    assert_response 404
+  end
+
+  test "getting a file from Keep with a good reader token" do
+    params = collection_params(:foo_file, 'foo')
+    read_token = api_fixture('api_client_authorizations')['active']['api_token']
+    params[:reader_token] = read_token
+    expect_content = stub_file_content
+    get(:show_file, params)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep using a reader token")
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  test "trying to get from Keep with an unscoped reader token prompts login" do
+    params = collection_params(:foo_file, 'foo')
+    params[:reader_token] =
+      api_fixture('api_client_authorizations')['active_noscope']['api_token']
+    get(:show_file, params)
+    assert_response :redirect
+  end
+
+  test "can get a file with an unpermissioned auth but in-scope reader token" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:expired)
+    read_token = api_fixture('api_client_authorizations')['active']['api_token']
+    params[:reader_token] = read_token
+    expect_content = stub_file_content
+    get(:show_file, params, sess)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep using a reader token")
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  test "inactive user can retrieve user agreement" do
+    ua_collection = api_fixture('collections')['user_agreement']
+    # Here we don't test whether the agreement can be retrieved from
+    # Keep. We only test that show_file decides to send file content,
+    # so we use the file content stub.
+    stub_file_content
+    get :show_file, {
+      uuid: ua_collection['uuid'],
+      file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
+    }, session_for(:inactive)
+    assert_nil(assigns(:unsigned_user_agreements),
+               "Did not skip check_user_agreements filter " +
+               "when showing the user agreement.")
+    assert_response :success
+  end
+
+  test "requesting nonexistent Collection returns 404" do
+    show_collection({uuid: NONEXISTENT_COLLECTION, id: NONEXISTENT_COLLECTION},
+                    :active, 404)
+  end
+
+  test "use a reasonable read buffer even if client requests a huge range" do
+    fakefiledata = mock
+    IO.expects(:popen).returns(fakefiledata)
+    fakefiledata.expects(:read).twice.with() do |length|
+      # Fail the test if read() is called with length>1MiB:
+      length < 2**20
+      ## Force the ActionController::Live thread to lose the race to
+      ## verify that @response.body.length actually waits for the
+      ## response (see below):
+      # sleep 3
+    end.returns("foo\n", nil)
+    fakefiledata.expects(:close)
+    foo_file = api_fixture('collections')['foo_file']
+    @request.headers['Range'] = 'bytes=0-4294967296/*'
+    get :show_file, {
+      uuid: foo_file['uuid'],
+      file: foo_file['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
+    }, session_for(:active)
+    # Wait for the whole response to arrive before deciding whether
+    # mocks' expectations were met. Otherwise, Mocha will fail the
+    # test depending on how slowly the ActionController::Live thread
+    # runs.
+    @response.body.length
+  end
+
+  test "show file in a subdirectory of a collection" do
+    params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
+    expect_content = stub_file_content
+    get(:show_file, params, session_for(:user1_with_load))
+    assert_response :success
+    assert_equal(expect_content, @response.body, "failed to get a correct file from Keep")
+  end
+
   test 'provenance graph' do
     use_token 'admin'
 
index b518b077bcf212582756ef0bce79394151fe061d..ac36f197f4306e5d0ac82f77a47cf8d7c0b1d977 100644 (file)
@@ -3,39 +3,158 @@ require 'test_helper'
 class PipelineInstancesControllerTest < ActionController::TestCase
   include PipelineInstancesHelper
 
-  test "one" do
-    r = [{started_at: 1, finished_at: 3}]
-    assert_equal 2, determine_wallclock_runtime(r)
-
-    r = [{started_at: 1, finished_at: 5}]
-    assert_equal 4, determine_wallclock_runtime(r)
-
-    r = [{started_at: 1, finished_at: 2}, {started_at: 3, finished_at: 5}]
-    assert_equal 3, determine_wallclock_runtime(r)
+  def create_instance_long_enough_to(instance_attrs={})
+    # create 'two_part' pipeline with the given instance attributes
+    pt_fixture = api_fixture('pipeline_templates')['two_part']
+    post :create, {
+      pipeline_instance: instance_attrs.merge({
+        pipeline_template_uuid: pt_fixture['uuid']
+      }),
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    pi_uuid = assigns(:object).uuid
+    assert_not_nil assigns(:object)
+
+    # yield
+    yield pi_uuid, pt_fixture
+
+    # delete the pipeline instance
+    use_token :active
+    PipelineInstance.where(uuid: pi_uuid).first.destroy
+  end
 
-    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2}]
-    assert_equal 3, determine_wallclock_runtime(r)
+  test "pipeline instance components populated after create" do
+    create_instance_long_enough_to do |new_instance_uuid, template_fixture|
+      assert_equal(template_fixture['components'].to_json,
+                   assigns(:object).components.to_json)
+    end
+  end
 
-    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2},
-         {started_at: 2, finished_at: 4}]
-    assert_equal 4, determine_wallclock_runtime(r)
+  test "can render pipeline instance with tagged collections" do
+    # Make sure to pass in a tagged collection to test that part of the rendering behavior.
+    get(:show,
+        {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
 
-    r = [{started_at: 1, finished_at: 5}, {started_at: 2, finished_at: 3}]
-    assert_equal 4, determine_wallclock_runtime(r)
+  test "update script_parameters one at a time using merge param" do
+      template_fixture = api_fixture('pipeline_templates')['two_part']
+      post :update, {
+        id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
+        pipeline_instance: {
+          components: {
+            "part-two" => {
+              script_parameters: {
+                integer_with_value: {
+                  value: 9
+                },
+                plain_string: {
+                  value: 'quux'
+                },
+              }
+            }
+          }
+        },
+        merge: true,
+        format: :json
+      }, session_for(:active)
+      assert_response :success
+      assert_not_nil assigns(:object)
+      orig_params = template_fixture['components']['part-two']['script_parameters']
+      new_params = assigns(:object).components[:'part-two'][:script_parameters]
+      orig_params.keys.each do |k|
+        unless %w(integer_with_value plain_string).index(k)
+          assert_equal orig_params[k].to_json, new_params[k.to_sym].to_json
+        end
+      end
+  end
 
-    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 4}]
-    assert_equal 4, determine_wallclock_runtime(r)
+  test "component rendering copes with unexpected components format" do
+    get(:show,
+        {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
 
-    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5}]
-    assert_equal 4, determine_wallclock_runtime(r)
+  test "dates in JSON components are parsed" do
+    get(:show,
+        {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
+        session_for(:active))
+    assert_response :success
+    assert_not_nil assigns(:object)
+    assert_not_nil assigns(:object).components[:foo][:job]
+    assert assigns(:object).components[:foo][:job][:started_at].is_a? Time
+    assert assigns(:object).components[:foo][:job][:finished_at].is_a? Time
+  end
 
-    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
-         {started_at: 5, finished_at: 8}]
-    assert_equal 7, determine_wallclock_runtime(r)
+  # The next two tests ensure that a pipeline instance can be copied
+  # when the template has components that do not exist in the
+  # instance (ticket #4000).
+
+  test "copy pipeline instance with components=use_latest" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_latest',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Component 'foo' has script parameters only in the pipeline instance.
+    # Component 'bar' is present only in the pipeline_template.
+    # Test that the copied pipeline instance includes parameters for
+    # component 'foo' from the source instance, and parameters for
+    # component 'bar' from the source template.
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
+  end
 
-    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
-         {started_at: 6, finished_at: 8}]
-    assert_equal 6, determine_wallclock_runtime(r)
+  test "copy pipeline instance on newer template works with script=use_same" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_same',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Test that relevant component parameters were copied from both
+    # the source instance and source template, respectively (see
+    # previous test)
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
   end
 
   test "generate graph" do
index bfbf22d8b6958559f02137ab65ea425827104acc..a09d966a184c219cd5fad8a5dc69abea06d1a86c 100644 (file)
@@ -1,7 +1,42 @@
 require 'test_helper'
 
 class SearchControllerTest < ActionController::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  include Rails.application.routes.url_helpers
+
+  test 'Get search dialog' do
+    xhr :get, :choose, {
+      format: :js,
+      title: 'Search',
+      action_name: 'Show',
+      action_href: url_for(host: 'localhost', controller: :actions, action: :show),
+      action_data: {}.to_json,
+    }, session_for(:active)
+    assert_response :success
+  end
+
+  test 'Get search results for all projects' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+    }, session_for(:active)
+    assert_response :success
+    assert_not_empty(json_response['content'],
+                     'search results for all projects should not be empty')
+  end
+
+  test 'Get search results for empty project' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+      project_uuid: api_fixture('groups')['empty_project']['uuid'],
+    }, session_for(:active)
+    assert_response :success
+    assert_empty(json_response['content'],
+                 'search results for empty project should be empty')
+  end
 end
similarity index 96%
rename from apps/workbench/test/functional/users_controller_test.rb
rename to apps/workbench/test/controllers/users_controller_test.rb
index a734391e98e6367edb3c0766e001e0a3ef0000a6..213a2a53c1630db44f3da2e1d1568670ec84a6d2 100644 (file)
@@ -1,7 +1,7 @@
 require 'test_helper'
 
 class UsersControllerTest < ActionController::TestCase
-  test "valid token works in functional test" do
+  test "valid token works in controller test" do
     get :index, {}, session_for(:active)
     assert_response :success
   end
index 01d351a2b6a82f68227d12609cdbb02a67598ea7..c7433bb247450464fb42fa780e309ce09fdb27b7 100644 (file)
@@ -5,6 +5,9 @@ require 'yaml'
 # When "RAILS_ENV=test" is used, tests in the "diagnostics" directory
 # will not be executed.
 
+# Command to run diagnostics tests:
+#   RAILS_ENV=diagnostics bundle exec rake TEST=test/diagnostics/**/*.rb
+
 class DiagnosticsTest < ActionDispatch::IntegrationTest
 
   # Prepends workbench URL to the path provided and visits that page
diff --git a/apps/workbench/test/functional/.gitkeep b/apps/workbench/test/functional/.gitkeep
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/apps/workbench/test/functional/collections_controller_test.rb b/apps/workbench/test/functional/collections_controller_test.rb
deleted file mode 100644 (file)
index 6c64ac9..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-require 'test_helper'
-
-class CollectionsControllerTest < ActionController::TestCase
-  NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
-
-  def stub_file_content
-    # For the duration of the current test case, stub file download
-    # content with a randomized (but recognizable) string. Return the
-    # string, the test case can use it in assertions.
-    txt = 'the quick brown fox ' + rand(2**32).to_s
-    @controller.stubs(:file_enumerator).returns([txt])
-    txt
-  end
-
-  def collection_params(collection_name, file_name=nil)
-    uuid = api_fixture('collections')[collection_name.to_s]['uuid']
-    params = {uuid: uuid, id: uuid}
-    params[:file] = file_name if file_name
-    params
-  end
-
-  def assert_hash_includes(actual_hash, expected_hash, msg=nil)
-    expected_hash.each do |key, value|
-      assert_equal(value, actual_hash[key], msg)
-    end
-  end
-
-  def assert_no_session
-    assert_hash_includes(session, {arvados_api_token: nil},
-                         "session includes unexpected API token")
-  end
-
-  def assert_session_for_auth(client_auth)
-    api_token =
-      api_fixture('api_client_authorizations')[client_auth.to_s]['api_token']
-    assert_hash_includes(session, {arvados_api_token: api_token},
-                         "session token does not belong to #{client_auth}")
-  end
-
-  def show_collection(params, session={}, response=:success)
-    params = collection_params(params) if not params.is_a? Hash
-    session = session_for(session) if not session.is_a? Hash
-    get(:show, params, session)
-    assert_response response
-  end
-
-  test "viewing a collection" do
-    show_collection(:foo_file, :active)
-    assert_equal([['.', 'foo', 3]], assigns(:object).files)
-  end
-
-  test "viewing a collection fetches related projects" do
-    show_collection({id: api_fixture('collections')["foo_file"]['portable_data_hash']}, :active)
-    assert_includes(assigns(:same_pdh).map(&:owner_uuid),
-                    api_fixture('groups')['aproject']['uuid'],
-                    "controller did not find linked project")
-  end
-
-  test "viewing a collection fetches related permissions" do
-    show_collection(:bar_file, :active)
-    assert_includes(assigns(:permissions).map(&:uuid),
-                    api_fixture('links')['bar_file_readable_by_active']['uuid'],
-                    "controller did not find permission link")
-  end
-
-  test "viewing a collection fetches jobs that output it" do
-    show_collection(:bar_file, :active)
-    assert_includes(assigns(:output_of).map(&:uuid),
-                    api_fixture('jobs')['foobar']['uuid'],
-                    "controller did not find output job")
-  end
-
-  test "viewing a collection fetches jobs that logged it" do
-    show_collection(:baz_file, :active)
-    assert_includes(assigns(:log_of).map(&:uuid),
-                    api_fixture('jobs')['foobar']['uuid'],
-                    "controller did not find logger job")
-  end
-
-  test "viewing a collection fetches logs about it" do
-    show_collection(:foo_file, :active)
-    assert_includes(assigns(:logs).map(&:uuid),
-                    api_fixture('logs')['log4']['uuid'],
-                    "controller did not find related log")
-  end
-
-  test "viewing collection files with a reader token" do
-    params = collection_params(:foo_file)
-    params[:reader_token] = api_fixture("api_client_authorizations",
-                                        "active_all_collections", "api_token")
-    get(:show_file_links, params)
-    assert_response :success
-    assert_equal([['.', 'foo', 3]], assigns(:object).files)
-    assert_no_session
-  end
-
-  test "fetching collection file with reader token" do
-    expected = stub_file_content
-    params = collection_params(:foo_file, "foo")
-    params[:reader_token] = api_fixture("api_client_authorizations",
-                                        "active_all_collections", "api_token")
-    get(:show_file, params)
-    assert_response :success
-    assert_equal(expected, @response.body,
-                 "failed to fetch a Collection file with a reader token")
-    assert_no_session
-  end
-
-  test "reader token Collection links end with trailing slash" do
-    # Testing the fix for #2937.
-    session = session_for(:active_trustedclient)
-    post(:share, collection_params(:foo_file), session)
-    assert(@controller.download_link.ends_with? '/',
-           "Collection share link does not end with slash for wget")
-  end
-
-  test "getting a file from Keep" do
-    params = collection_params(:foo_file, 'foo')
-    sess = session_for(:active)
-    expect_content = stub_file_content
-    get(:show_file, params, sess)
-    assert_response :success
-    assert_equal(expect_content, @response.body,
-                 "failed to get a correct file from Keep")
-  end
-
-  test "can't get a file from Keep without permission" do
-    params = collection_params(:foo_file, 'foo')
-    sess = session_for(:spectator)
-    get(:show_file, params, sess)
-    assert_response 404
-  end
-
-  test "trying to get a nonexistent file from Keep returns a 404" do
-    params = collection_params(:foo_file, 'gone')
-    sess = session_for(:admin)
-    get(:show_file, params, sess)
-    assert_response 404
-  end
-
-  test "getting a file from Keep with a good reader token" do
-    params = collection_params(:foo_file, 'foo')
-    read_token = api_fixture('api_client_authorizations')['active']['api_token']
-    params[:reader_token] = read_token
-    expect_content = stub_file_content
-    get(:show_file, params)
-    assert_response :success
-    assert_equal(expect_content, @response.body,
-                 "failed to get a correct file from Keep using a reader token")
-    assert_not_equal(read_token, session[:arvados_api_token],
-                     "using a reader token set the session's API token")
-  end
-
-  test "trying to get from Keep with an unscoped reader token prompts login" do
-    params = collection_params(:foo_file, 'foo')
-    params[:reader_token] =
-      api_fixture('api_client_authorizations')['active_noscope']['api_token']
-    get(:show_file, params)
-    assert_response :redirect
-  end
-
-  test "can get a file with an unpermissioned auth but in-scope reader token" do
-    params = collection_params(:foo_file, 'foo')
-    sess = session_for(:expired)
-    read_token = api_fixture('api_client_authorizations')['active']['api_token']
-    params[:reader_token] = read_token
-    expect_content = stub_file_content
-    get(:show_file, params, sess)
-    assert_response :success
-    assert_equal(expect_content, @response.body,
-                 "failed to get a correct file from Keep using a reader token")
-    assert_not_equal(read_token, session[:arvados_api_token],
-                     "using a reader token set the session's API token")
-  end
-
-  test "inactive user can retrieve user agreement" do
-    ua_collection = api_fixture('collections')['user_agreement']
-    # Here we don't test whether the agreement can be retrieved from
-    # Keep. We only test that show_file decides to send file content,
-    # so we use the file content stub.
-    stub_file_content
-    get :show_file, {
-      uuid: ua_collection['uuid'],
-      file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
-    }, session_for(:inactive)
-    assert_nil(assigns(:unsigned_user_agreements),
-               "Did not skip check_user_agreements filter " +
-               "when showing the user agreement.")
-    assert_response :success
-  end
-
-  test "requesting nonexistent Collection returns 404" do
-    show_collection({uuid: NONEXISTENT_COLLECTION, id: NONEXISTENT_COLLECTION},
-                    :active, 404)
-  end
-
-  test "use a reasonable read buffer even if client requests a huge range" do
-    fakefiledata = mock
-    IO.expects(:popen).returns(fakefiledata)
-    fakefiledata.expects(:read).twice.with() do |length|
-      # Fail the test if read() is called with length>1MiB:
-      length < 2**20
-      ## Force the ActionController::Live thread to lose the race to
-      ## verify that @response.body.length actually waits for the
-      ## response (see below):
-      # sleep 3
-    end.returns("foo\n", nil)
-    fakefiledata.expects(:close)
-    foo_file = api_fixture('collections')['foo_file']
-    @request.headers['Range'] = 'bytes=0-4294967296/*'
-    get :show_file, {
-      uuid: foo_file['uuid'],
-      file: foo_file['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
-    }, session_for(:active)
-    # Wait for the whole response to arrive before deciding whether
-    # mocks' expectations were met. Otherwise, Mocha will fail the
-    # test depending on how slowly the ActionController::Live thread
-    # runs.
-    @response.body.length
-  end
-
-  test "show file in a subdirectory of a collection" do
-    params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
-    expect_content = stub_file_content
-    get(:show_file, params, session_for(:user1_with_load))
-    assert_response :success
-    assert_equal(expect_content, @response.body, "failed to get a correct file from Keep")
-  end
-end
diff --git a/apps/workbench/test/functional/pipeline_instances_controller_test.rb b/apps/workbench/test/functional/pipeline_instances_controller_test.rb
deleted file mode 100644 (file)
index a14d419..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-require 'test_helper'
-
-class PipelineInstancesControllerTest < ActionController::TestCase
-  def create_instance_long_enough_to(instance_attrs={})
-    # create 'two_part' pipeline with the given instance attributes
-    pt_fixture = api_fixture('pipeline_templates')['two_part']
-    post :create, {
-      pipeline_instance: instance_attrs.merge({
-        pipeline_template_uuid: pt_fixture['uuid']
-      }),
-      format: :json
-    }, session_for(:active)
-    assert_response :success
-    pi_uuid = assigns(:object).uuid
-    assert_not_nil assigns(:object)
-
-    # yield
-    yield pi_uuid, pt_fixture
-
-    # delete the pipeline instance
-    use_token :active
-    PipelineInstance.where(uuid: pi_uuid).first.destroy
-  end
-
-  test "pipeline instance components populated after create" do
-    create_instance_long_enough_to do |new_instance_uuid, template_fixture|
-      assert_equal(template_fixture['components'].to_json,
-                   assigns(:object).components.to_json)
-    end
-  end
-
-  test "can render pipeline instance with tagged collections" do
-    # Make sure to pass in a tagged collection to test that part of the rendering behavior.
-    get(:show,
-        {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
-        session_for(:active))
-    assert_response :success
-  end
-
-  test "update script_parameters one at a time using merge param" do
-      template_fixture = api_fixture('pipeline_templates')['two_part']
-      post :update, {
-        id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
-        pipeline_instance: {
-          components: {
-            "part-two" => {
-              script_parameters: {
-                integer_with_value: {
-                  value: 9
-                },
-                plain_string: {
-                  value: 'quux'
-                },
-              }
-            }
-          }
-        },
-        merge: true,
-        format: :json
-      }, session_for(:active)
-      assert_response :success
-      assert_not_nil assigns(:object)
-      orig_params = template_fixture['components']['part-two']['script_parameters']
-      new_params = assigns(:object).components[:'part-two'][:script_parameters]
-      orig_params.keys.each do |k|
-        unless %w(integer_with_value plain_string).index(k)
-          assert_equal orig_params[k].to_json, new_params[k.to_sym].to_json
-        end
-      end
-  end
-
-  test "component rendering copes with unexpected components format" do
-    get(:show,
-        {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
-        session_for(:active))
-    assert_response :success
-  end
-
-  test "dates in JSON components are parsed" do
-    get(:show,
-        {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
-        session_for(:active))
-    assert_response :success
-    assert_not_nil assigns(:object)
-    assert_not_nil assigns(:object).components[:foo][:job]
-    assert assigns(:object).components[:foo][:job][:started_at].is_a? Time
-    assert assigns(:object).components[:foo][:job][:finished_at].is_a? Time
-  end
-
-  # The next two tests ensure that a pipeline instance can be copied
-  # when the template has components that do not exist in the
-  # instance (ticket #4000).
-
-  test "copy pipeline instance with components=use_latest" do
-    post(:copy,
-         {
-           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
-           components: 'use_latest',
-           script: 'use_latest',
-           pipeline_instance: {
-             state: 'RunningOnServer'
-           }
-         },
-         session_for(:active))
-    assert_response 302
-    assert_not_nil assigns(:object)
-
-    # Component 'foo' has script parameters only in the pipeline instance.
-    # Component 'bar' is present only in the pipeline_template.
-    # Test that the copied pipeline instance includes parameters for
-    # component 'foo' from the source instance, and parameters for
-    # component 'bar' from the source template.
-    #
-    assert_not_nil assigns(:object).components[:foo]
-    foo = assigns(:object).components[:foo]
-    assert_not_nil foo[:script_parameters]
-    assert_not_nil foo[:script_parameters][:input]
-    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
-
-    assert_not_nil assigns(:object).components[:bar]
-    bar = assigns(:object).components[:bar]
-    assert_not_nil bar[:script_parameters]
-    assert_not_nil bar[:script_parameters][:input]
-    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
-  end
-
-  test "copy pipeline instance on newer template works with script=use_same" do
-    post(:copy,
-         {
-           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
-           components: 'use_latest',
-           script: 'use_same',
-           pipeline_instance: {
-             state: 'RunningOnServer'
-           }
-         },
-         session_for(:active))
-    assert_response 302
-    assert_not_nil assigns(:object)
-
-    # Test that relevant component parameters were copied from both
-    # the source instance and source template, respectively (see
-    # previous test)
-    #
-    assert_not_nil assigns(:object).components[:foo]
-    foo = assigns(:object).components[:foo]
-    assert_not_nil foo[:script_parameters]
-    assert_not_nil foo[:script_parameters][:input]
-    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
-
-    assert_not_nil assigns(:object).components[:bar]
-    bar = assigns(:object).components[:bar]
-    assert_not_nil bar[:script_parameters]
-    assert_not_nil bar[:script_parameters][:input]
-    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
-  end
-end
diff --git a/apps/workbench/test/helpers/pipeline_instances_helper_test.rb b/apps/workbench/test/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..a785683
--- /dev/null
@@ -0,0 +1,38 @@
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+  test "one" do
+    r = [{started_at: 1, finished_at: 3}]
+    assert_equal 2, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 2}, {started_at: 3, finished_at: 5}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2},
+         {started_at: 2, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}, {started_at: 2, finished_at: 3}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 5, finished_at: 8}]
+    assert_equal 7, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 6, finished_at: 8}]
+    assert_equal 6, determine_wallclock_runtime(r)
+  end
+end
index 69e346d078e0a4b3a744eb52493e0afce46266cb..093915905498f9cd529690614e36f54751119813 100644 (file)
@@ -3,6 +3,11 @@ require 'selenium-webdriver'
 require 'headless'
 
 class ApplicationLayoutTest < ActionDispatch::IntegrationTest
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
   setup do
     headless = Headless.new
     headless.start
diff --git a/apps/workbench/test/integration/collection_upload_test.rb b/apps/workbench/test/integration/collection_upload_test.rb
new file mode 100644 (file)
index 0000000..9a26373
--- /dev/null
@@ -0,0 +1,100 @@
+require 'integration_helper'
+
+class CollectionUploadTest < ActionDispatch::IntegrationTest
+  setup do
+    Headless.new.start
+  end
+
+  setup do
+    testfiles.each do |filename, content|
+      open(testfile_path(filename), 'w') do |io|
+        io.write content
+      end
+    end
+  end
+
+  teardown do
+    testfiles.each do |filename, _|
+      File.unlink(testfile_path filename)
+    end
+  end
+
+  test "Create new collection using upload button" do
+    Capybara.current_driver = :poltergeist
+    visit page_with_token 'active', aproject_path
+    find('.btn', text: 'Add data').click
+    click_link 'Upload files from my computer'
+    # Should be looking at a new empty collection.
+    assert_text 'New collection'
+    assert_text 'd41d8cd98f00b204e9800998ecf8427e+0'
+    # The "Upload" tab should be active and loaded.
+    assert_selector 'div#Upload.active div.panel'
+  end
+
+  test "No Upload tab on non-writable collection" do
+    Capybara.current_driver = :poltergeist
+    visit(page_with_token 'active',
+          '/collections/'+api_fixture('collections')['user_agreement']['uuid'])
+    assert_no_selector '.nav-tabs Upload'
+  end
+
+  test "Upload two empty files with the same name" do
+    Capybara.current_driver = :selenium
+    visit page_with_token 'active', sandbox_path
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div', text: 'empty.txt'
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div.row div span[title]', text: 'empty.txt', count: 2
+    click_button 'Start'
+    assert_text :visible, 'Done!'
+    visit sandbox_path+'.json'
+    assert_match /_text":"\. d41d8\S+ 0:0:empty.txt\\n\. d41d8\S+ 0:0:empty\\\\040\(1\).txt\\n"/, body
+  end
+
+  test "Upload non-empty files, report errors" do
+    Capybara.current_driver = :selenium
+    visit page_with_token 'active', sandbox_path
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('a')
+    attach_file 'file_selector', testfile_path('foo.txt')
+    assert_selector 'button:not([disabled])', text: 'Start'
+    click_button 'Start'
+    if "test environment does not have a keepproxy yet, see #4534"
+      using_wait_time 20 do
+        assert_text :visible, 'error'
+      end
+    else
+      assert_text :visible, 'Done!'
+      visit sandbox_path+'.json'
+      assert_match /_text":"\. 0cc1\S+ 0:1:a\\n\. acbd\S+ 0:3:foo.txt\\n"/, body
+    end
+  end
+
+  protected
+
+  def aproject_path
+    '/projects/' + api_fixture('groups')['aproject']['uuid']
+  end
+
+  def sandbox_uuid
+    api_fixture('collections')['upload_sandbox']['uuid']
+  end
+
+  def sandbox_path
+    '/collections/' + sandbox_uuid
+  end
+
+  def testfiles
+    {
+      'empty.txt' => '',
+      'a' => 'a',
+      'foo.txt' => 'foo'
+    }
+  end
+
+  def testfile_path filename
+    # Must be an absolute path. https://github.com/jnicklas/capybara/issues/621
+    File.join Dir.getwd, 'tmp', filename
+  end
+end
index f4fc4cb5ce6621f0152a5524971e1f33ae9401b5..201be6d77696671b022bca6cd44f0aa03e8dd9d3 100644 (file)
@@ -44,6 +44,35 @@ class CollectionsTest < ActionDispatch::IntegrationTest
     assert(page.has_link?('foo'), "Collection page did not include file link")
   end
 
+  def check_sharing(want_state, link_regexp)
+    # We specifically want to click buttons.  See #4291.
+    if want_state == :off
+      click_button "Unshare"
+      text_assertion = :assert_no_text
+      link_assertion = :assert_empty
+    else
+      click_button "Create sharing link"
+      text_assertion = :assert_text
+      link_assertion = :refute_empty
+    end
+    using_wait_time(Capybara.default_wait_time * 3) do
+      send(text_assertion, "Shared at:")
+    end
+    send(link_assertion, all("a").select { |a| a[:href] =~ link_regexp })
+  end
+
+  test "creating and uncreating a sharing link" do
+    Capybara.current_driver = Capybara.javascript_driver
+    coll_uuid = api_fixture("collections", "collection_owned_by_active", "uuid")
+    download_link_re =
+      Regexp.new(Regexp.escape("/collections/download/#{coll_uuid}/"))
+    visit page_with_token("active_trustedclient", "/collections/#{coll_uuid}")
+    within "#sharing-button" do
+      check_sharing(:on, download_link_re)
+      check_sharing(:off, download_link_re)
+    end
+  end
+
   test "can download an entire collection with a reader token" do
     CollectionsController.any_instance.
       stubs(:file_enumerator).returns(["foo\n", "file\n"])
@@ -78,7 +107,7 @@ class CollectionsTest < ActionDispatch::IntegrationTest
   test "can view empty collection" do
     uuid = 'd41d8cd98f00b204e9800998ecf8427e+0'
     visit page_with_token('active', "/collections/#{uuid}")
-    assert page.has_text?('This collection is empty')
+    assert page.has_text?(/This collection is empty|The following collections have this content/)
   end
 
   test "combine selected collections into new collection" do
@@ -282,7 +311,7 @@ class CollectionsTest < ActionDispatch::IntegrationTest
     assert_checkboxes_state('[value*="file1"]', true, 'checkboxes for file1 should be selected after filtering')
     assert_checkboxes_state('[value*="file2"]', true, 'checkboxes for file2 should be selected after filtering')
     assert_checkboxes_state('[value*="file3"]', false, 'checkboxes for file3 should be clear after filtering')
+
     # Select all files, then filter, then "unselect all", then unfilter
     page.find_field('file_regex').set("\b") # backspace
     find('button#select-all').click
index 6bb78906bb10e3ccf8fad4442bae9ba7a3917cec..e9c84c158b945bf0d589a0da11a8614ccdae1c3c 100644 (file)
@@ -34,10 +34,11 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
     find("#projects-menu").click
     find('.dropdown-menu a,button', text: 'A Project').click
     find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
     within('.modal-dialog') do
       wait_for_ajax
       first('span', text: 'foo_tag').click
-      find('.btn', text: 'Add').click
+      find('.btn', text: 'Copy').click
     end
     using_wait_time(Capybara.default_wait_time * 3) do
       wait_for_ajax
@@ -87,14 +88,14 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
     # are saved in the desired places. (#4015)
 
     # foo_collection_in_aproject is the collection tagged with foo_tag.
-    col = api_fixture('collections', 'foo_collection_in_aproject')
+    collection = api_fixture('collections', 'foo_collection_in_aproject')
     click_link 'Advanced'
     click_link 'API response'
     api_response = JSON.parse(find('div#advanced_api_response pre').text)
     input_params = api_response['components']['part-one']['script_parameters']['input']
-    assert_equal input_params['value'], col['portable_data_hash']
-    assert_equal input_params['selection_name'], col['name']
-    assert_equal input_params['selection_uuid'], col['uuid']
+    assert_equal input_params['value'], collection['portable_data_hash']
+    assert_equal input_params['selection_name'], collection['name']
+    assert_equal input_params['selection_uuid'], collection['uuid']
 
     # "Run" button is now enabled
     page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
@@ -124,22 +125,23 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
     find("#projects-menu").click
     find('.dropdown-menu a,button', text: 'A Project').click
     find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
     within('.modal-dialog') do
       wait_for_ajax
       first('span', text: 'foo_tag').click
-      find('.btn', text: 'Add').click
+      find('.btn', text: 'Copy').click
     end
     using_wait_time(Capybara.default_wait_time * 3) do
       wait_for_ajax
     end
 
-    create_and_run_pipeline_in_aproject true, 'Two Part Pipeline Template', false
+    create_and_run_pipeline_in_aproject true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
   end
 
   # Create a pipeline instance from outside of a project
   test 'Run a pipeline from dashboard' do
     visit page_with_token('active_trustedclient')
-    create_and_run_pipeline_in_aproject false, 'Two Part Pipeline Template', false
+    create_and_run_pipeline_in_aproject false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
   end
 
   test 'view pipeline with job and see graph' do
@@ -221,20 +223,13 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   end
 
   [
-    ['active', false, false, false, 'Two Part Pipeline Template', false],
-    ['active', false, false, true, 'Two Part Pipeline Template', false],
-    ['active', true, false, false, 'Two Part Pipeline Template', false],
-    ['active', true, true, false, 'Two Part Pipeline Template', false],
-    ['active', true, false, true, 'Two Part Pipeline Template', false],
-    ['active', true, true, true, 'Two Part Pipeline Template', false],
-    ['project_viewer', false, false, true, 'Two Part Pipeline Template', false],
-    ['project_viewer', true, false, true, 'Two Part Pipeline Template', false],
-    ['project_viewer', true, true, true, 'Two Part Pipeline Template', false],
-    ['active', false, false, false, 'Two Part Template with dataclass File', true],
-    ['active', false, false, true, 'Two Part Template with dataclass File', true],
-  ].each do |user, with_options, choose_options, in_aproject, template_name, choose_file|
-    test "Rerun pipeline instance as #{user} using options #{with_options} #{choose_options}
-          in #{in_aproject} with #{template_name} with file #{choose_file}" do
+    [true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [true, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [false, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [true, 'Two Part Pipeline Template', 'collection_with_no_name_in_aproject', false],
+  ].each do |in_aproject, template_name, collection, choose_file|
+    test "Run pipeline instance in #{in_aproject} with #{template_name} with #{collection} file #{choose_file}" do
       visit page_with_token('active')
 
       # need bigger modal size when choosing a file from collection
@@ -245,7 +240,48 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
         find('.dropdown-menu a,button', text: 'A Project').click
       end
 
-      create_and_run_pipeline_in_aproject in_aproject, template_name, choose_file
+      create_and_run_pipeline_in_aproject in_aproject, template_name, collection, choose_file
+      instance_path = current_path
+
+      # Pause the pipeline
+      find('a,button', text: 'Pause').click
+      assert page.has_text? 'Paused'
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+      page.assert_selector 'a,button', text: 'Re-run with latest'
+      page.assert_selector 'a,button', text: 'Re-run options'
+
+      # Verify that the newly created instance is created in the right project.
+      assert page.has_text? 'Home'
+      if in_aproject
+        assert page.has_text? 'A Project'
+      else
+        assert page.has_no_text? 'A Project'
+      end
+    end
+  end
+
+  [
+    ['active', false, false, false],
+    ['active', false, false, true],
+    ['active', true, false, false],
+    ['active', true, true, false],
+    ['active', true, false, true],
+    ['active', true, true, true],
+    ['project_viewer', false, false, true],
+    ['project_viewer', true, true, true],
+  ].each do |user, with_options, choose_options, in_aproject|
+    test "Rerun pipeline instance as #{user} using options #{with_options} #{choose_options} in #{in_aproject}" do
+      visit page_with_token('active')
+
+      # need bigger modal size when choosing a file from collection
+      Capybara.current_session.driver.browser.manage.window.resize_to(1024, 768)
+
+      if in_aproject
+        find("#projects-menu").click
+        find('.dropdown-menu a,button', text: 'A Project').click
+      end
+
+      create_and_run_pipeline_in_aproject in_aproject, 'Two Part Pipeline Template', 'foo_collection_in_aproject'
       instance_path = current_path
 
       # Pause the pipeline
@@ -283,8 +319,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       # Verify that the newly created instance is created in the right project.
       # In case of project_viewer user, since the use cannot write to the project,
       # the pipeline should have been created in the user's Home project.
-      rerun_instance_path = current_path
-      assert_not_equal instance_path, rerun_instance_path, 'Rerun instance path expected to be different'
+      assert_not_equal instance_path, current_path, 'Rerun instance path expected to be different'
       assert page.has_text? 'Home'
       if in_aproject && (user != 'project_viewer')
         assert page.has_text? 'A Project'
@@ -295,7 +330,10 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   end
 
   # Create and run a pipeline for 'Two Part Pipeline Template' in 'A Project'
-  def create_and_run_pipeline_in_aproject in_aproject, template_name, choose_file
+  def create_and_run_pipeline_in_aproject in_aproject, template_name, collection_fixture, choose_file=false
+    # collection in aproject to be used as input
+    collection = api_fixture('collections', collection_fixture)
+
     # create a pipeline instance
     find('.btn', text: 'Run a pipeline').click
     within('.modal-dialog') do
@@ -320,7 +358,16 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
         click_link "A Project"
         wait_for_ajax
       end
-      first('span', text: 'foo_tag').click
+
+      if collection_fixture == 'foo_collection_in_aproject'
+        first('span', text: 'foo_tag').click
+      elsif collection['name']
+        first('span', text: "#{collection['name']}").click
+      else
+        collection_uuid = collection['uuid']
+        find("div[data-object-uuid=#{collection_uuid}]").click
+      end
+
       if choose_file
         wait_for_ajax
         find('.preview-selectable', text: 'foo').click
@@ -334,20 +381,17 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
 
     # Ensure that the collection's portable_data_hash, uuid and name
     # are saved in the desired places. (#4015)
-
-    # foo_collection_in_aproject is the collection tagged with foo_tag.
-    col = api_fixture('collections', 'foo_collection_in_aproject')
     click_link 'Advanced'
     click_link 'API response'
     api_response = JSON.parse(find('div#advanced_api_response pre').text)
     input_params = api_response['components']['part-one']['script_parameters']['input']
-    assert_equal(input_params['selection_uuid'], col['uuid'], "Not found expected input param uuid")
+    assert_equal(input_params['selection_uuid'], collection['uuid'], "Not found expected input param uuid")
     if choose_file
-      assert_equal(input_params['value'], col['portable_data_hash']+'/foo', "Not found expected input file param value")
-      assert_equal(input_params['selection_name'], col['name']+'/foo', "Not found expected input file param name")
+      assert_equal(input_params['value'], collection['portable_data_hash']+'/foo', "Not found expected input file param value")
+      assert_equal(input_params['selection_name'], collection['name']+'/foo', "Not found expected input file param name")
     else
-      assert_equal(input_params['value'], col['portable_data_hash'], "Not found expected input param value")
-      assert_equal(input_params['selection_name'], col['name'], "Not found expected input param name")
+      assert_equal(input_params['value'], collection['portable_data_hash'], "Not found expected input param value")
+      assert_equal(input_params['selection_name'], collection['name'], "Not found expected input selection name")
     end
 
     # "Run" button present and enabled
index 5da61536f010cdc9cfc1aee35f2d91e93cd7a3fa..97e1a542bafcfad5f7a7e930c48c21f9417ec149 100644 (file)
@@ -4,12 +4,18 @@ require 'headless'
 
 class ProjectsTest < ActionDispatch::IntegrationTest
   setup do
-    Capybara.current_driver = Capybara.javascript_driver
+    headless = Headless.new
+    headless.start
+    Capybara.current_driver = :selenium
+
+    # project tests need bigger page size to be able to see all the buttons
+    Capybara.current_session.driver.browser.manage.window.resize_to(1152, 768)
   end
 
   test 'Check collection count for A Project in the tab pane titles' do
     project_uuid = api_fixture('groups')['aproject']['uuid']
     visit page_with_token 'active', '/projects/' + project_uuid
+    wait_for_ajax
     collection_count = page.all("[data-pk*='collection']").count
     assert_selector '#Data_collections-tab span', text: "(#{collection_count})"
   end
@@ -48,8 +54,7 @@ class ProjectsTest < ActionDispatch::IntegrationTest
 
     # visit project page
     visit current_path
-    assert(has_no_text?('.container-fluid', text: '*Textile description for A project*'),
-           "Description is not rendered properly")
+    assert_no_text '*Textile description for A project*'
     assert(find?('.container-fluid', text: 'Textile description for A project'),
            "Description update did not survive page refresh")
     assert(find?('.container-fluid', text: 'And a new paragraph in description'),
@@ -132,7 +137,7 @@ class ProjectsTest < ActionDispatch::IntegrationTest
     click_link 'Other objects'
     within '.selection-action-container' do
       find '.editable', text: 'Now I have a new name.'
-      page.assert_no_selector '.editable', text: 'Now I have a name.'
+      assert_no_selector '.editable', text: 'Now I have a name.'
     end
   end
 
@@ -142,7 +147,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
     find(".dropdown-menu a", text: "Home").click
     find('.btn', text: "Add a subproject").click
 
-    # within('.editable', text: 'New project') do
     within('h2') do
       find('.fa-pencil').click
       find('.editable-input input').set('Project 1234')
@@ -222,7 +226,9 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       assert(has_link?("Write"),
              "failed to change access level on new share")
       click_on "Revoke"
+      page.driver.browser.switch_to.alert.accept
     end
+    wait_for_ajax
     using_wait_time(Capybara.default_wait_time * 3) do
       assert(page.has_no_text?(name),
              "new share row still exists after being revoked")
@@ -278,7 +284,7 @@ class ProjectsTest < ActionDispatch::IntegrationTest
     ['Remove',api_fixture('collections')['collection_in_aproject_with_same_name_as_in_home_project'],
       api_fixture('groups')['aproject'],nil,true],
   ].each do |action, my_collection, src, dest=nil, expect_name_change=nil|
-    test "selection #{action} #{expect_name_change} for project" do
+    test "selection #{action} -> #{expect_name_change.inspect} for project" do
       perform_selection_action src, dest, my_collection, action
 
       case action
@@ -289,8 +295,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
         find(".dropdown-menu a", text: dest['name']).click
         assert page.has_text?(my_collection['name']), 'Collection not found in dest project after copy'
 
-        # now remove it from destination project to restore to original state
-        perform_selection_action dest, nil, my_collection, 'Remove'
       when 'Move'
         assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after move'
         visit page_with_token 'active', '/'
@@ -298,8 +302,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
         find(".dropdown-menu a", text: dest['name']).click
         assert page.has_text?(my_collection['name']), 'Collection not found in dest project after move'
 
-        # move it back to src project to restore to original state
-        perform_selection_action dest, src, my_collection, action
       when 'Remove'
         assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after remove'
         visit page_with_token 'active', '/'
@@ -324,7 +326,7 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       find('input[type=checkbox]').click
     end
 
-    click_button 'Selection...'
+    click_button 'Selection'
 
     within('.selection-action-container') do
       assert page.has_text?("Compare selected"), "Compare selected link text not found"
@@ -356,13 +358,13 @@ class ProjectsTest < ActionDispatch::IntegrationTest
     find("#projects-menu").click
     find(".dropdown-menu a", text: my_project['name']).click
 
-    click_button 'Selection...'
+    click_button 'Selection'
     within('.selection-action-container') do
-      page.assert_selector 'li.disabled', text: 'Create new collection with selected collections'
-      page.assert_selector 'li.disabled', text: 'Compare selected'
-      page.assert_selector 'li.disabled', text: 'Copy selected'
-      page.assert_selector 'li.disabled', text: 'Move selected'
-      page.assert_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li.disabled', text: 'Remove selected'
     end
 
     # select collection and verify links are enabled
@@ -375,17 +377,17 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       find('input[type=checkbox]').click
     end
 
-    click_button 'Selection...'
+    click_button 'Selection'
     within('.selection-action-container') do
-      page.assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
-      page.assert_selector 'li', text: 'Create new collection with selected collections'
-      page.assert_selector 'li.disabled', text: 'Compare selected'
-      page.assert_no_selector 'li.disabled', text: 'Copy selected'
-      page.assert_selector 'li', text: 'Copy selected'
-      page.assert_no_selector 'li.disabled', text: 'Move selected'
-      page.assert_selector 'li', text: 'Move selected'
-      page.assert_no_selector 'li.disabled', text: 'Remove selected'
-      page.assert_selector 'li', text: 'Remove selected'
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
     end
 
     # select subproject and verify that copy action is disabled
@@ -400,15 +402,15 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       find('input[type=checkbox]').click
     end
 
-    click_button 'Selection...'
+    click_button 'Selection'
     within('.selection-action-container') do
-      page.assert_selector 'li.disabled', text: 'Create new collection with selected collections'
-      page.assert_selector 'li.disabled', text: 'Compare selected'
-      page.assert_selector 'li.disabled', text: 'Copy selected'
-      page.assert_no_selector 'li.disabled', text: 'Move selected'
-      page.assert_selector 'li', text: 'Move selected'
-      page.assert_no_selector 'li.disabled', text: 'Remove selected'
-      page.assert_selector 'li', text: 'Remove selected'
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
     end
 
     # select subproject and a collection and verify that copy action is still disabled
@@ -430,15 +432,87 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       find('input[type=checkbox]').click
     end
 
-    click_button 'Selection...'
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # When project tabs are switched, only options applicable to the current tab's selections are enabled.
+  test "verify selection options when tabs are switched" do
+    my_project = api_fixture('groups')['aproject']
+    my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+    my_subproject = api_fixture('groups')['asubproject']
+
+    # select subproject and a collection and verify that copy action is still disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    # Select a sub-project
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Select a collection
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Go back to Subprojects tab
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # Go back to Data collections tab
+    click_link 'Data collections'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # "Move selected" and "Remove selected" options should not be available when current user cannot write to the project
+  test "move selected and remove selected actions not available when current user cannot write to project" do
+    my_project = api_fixture('groups')['anonymously_accessible_project']
+    visit page_with_token 'active', "/projects/#{my_project['uuid']}"
+
+    click_button 'Selection'
     within('.selection-action-container') do
-      page.assert_selector 'li.disabled', text: 'Create new collection with selected collections'
-      page.assert_selector 'li.disabled', text: 'Compare selected'
-      page.assert_selector 'li.disabled', text: 'Copy selected'
-      page.assert_no_selector 'li.disabled', text: 'Move selected'
-      page.assert_selector 'li', text: 'Move selected'
-      page.assert_no_selector 'li.disabled', text: 'Remove selected'
-      page.assert_selector 'li', text: 'Remove selected'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Compare selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li', text: 'Move selected'
+      assert_no_selector 'li', text: 'Remove selected'
     end
   end
 
@@ -459,7 +533,7 @@ class ProjectsTest < ActionDispatch::IntegrationTest
         find('input[type=checkbox]').click
       end
 
-      click_button 'Selection...'
+      click_button 'Selection'
       within('.selection-action-container') do
         click_link 'Create new collection with selected collections'
       end
@@ -493,10 +567,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
                    item_list_parameter,
                    sorted = false,
                    sort_parameters = nil)
-    headless = Headless.new
-    headless.start
-    Capybara.current_driver = :selenium
-
     project_uuid = api_fixture('groups')[project_name]['uuid']
     visit page_with_token 'user1_with_load', '/projects/' + project_uuid
 
@@ -634,4 +704,44 @@ class ProjectsTest < ActionDispatch::IntegrationTest
     end
   end
 
+  test "error while loading tab" do
+    original_arvados_v1_base = Rails.configuration.arvados_v1_base
+
+    visit page_with_token 'active', '/projects/' + api_fixture('groups')['aproject']['uuid']
+
+    # Point to a bad api server url to generate error
+    Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+    click_link 'Other objects'
+    within '#Other_objects' do
+      # Error
+      assert_selector('a', text: 'Reload tab')
+
+      # Now point back to the orig api server and reload tab
+      Rails.configuration.arvados_v1_base = original_arvados_v1_base
+      click_link 'Reload tab'
+      assert_no_selector('a', text: 'Reload tab')
+      assert_selector('button', text: 'Selection')
+      within '.selection-action-container' do
+        assert_selector 'tr[data-kind="arvados#trait"]'
+      end
+    end
+  end
+
+  test "add new project using projects dropdown" do
+    # verify that selection options are disabled on the project until an item is selected
+    visit page_with_token 'active', '/'
+
+    # Add a new project
+    find("#projects-menu").click
+    click_link 'Add a new project'
+    assert_text 'New project'
+    assert_text 'No description provided'
+
+    # Add one more new project
+    find("#projects-menu").click
+    click_link 'Add a new project'
+    match = /New project \(\d\)/.match page.text
+    assert match, 'Expected project name not found'
+    assert_text 'No description provided'
+  end
 end
index 4bf7d5747828a92f19130b5f9b1847b25a48a0f8..ac9e596f8cb65053b6a217646757c5db2288ad80 100644 (file)
@@ -37,6 +37,12 @@ class ReportIssueTest < ActionDispatch::IntegrationTest
       assert page.has_no_text?('Describe the problem?'), 'Found text - Describe the problem'
       assert page.has_button?('Close'), 'No button - Close'
       assert page.has_no_button?('Send problem report'), 'Found button - Send problem report'
+      history_links = all('a').select do |a|
+        a[:href] =~ %r!^https://arvados.org/projects/arvados/repository/changes\?rev=[0-9a-f]+$!
+      end
+      assert_operator(2, :<=, history_links.count,
+                      "Should have found two links to revision history " +
+                      "in #{history_links.inspect}")
       click_button 'Close'
     end
 
index ff2b7dd012273ede52954d6b97a6cad57c9236fa..a4defda806fcc9357683ce8ceb760d77fc0a24ff 100644 (file)
@@ -24,9 +24,19 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
       assert page.has_text?('Repositories'), 'No text - Repositories'
       assert page.has_text?('SSH Keys'), 'No text - SSH Keys'
       assert page.has_text?('Current Token'), 'No text - Current Token'
-
       assert page.has_text?('The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados'), 'No text - Arvados API token'
+      add_and_verify_ssh_key
+    else  # inactive user
+      within('.navbar-fixed-top') do
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+        end
+      end
+    end
+  end
 
+  def add_and_verify_ssh_key
       click_link 'Add new SSH key'
 
       within '.modal-content' do
@@ -52,14 +62,6 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
 
       # key must be added. look for it in the refreshed page
       assert page.has_text?('added_in_test'), 'No text - added_in_test'
-    else  # inactive user
-      within('.navbar-fixed-top') do
-        find('a', text: "#{user['email']}").click
-        within('.dropdown-menu') do
-          assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
-        end
-      end
-    end
   end
 
   [
@@ -73,4 +75,30 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
       verify_manage_account user
     end
   end
+
+  [
+    ['inactive_but_signed_user_agreement', true],
+    ['active', false],
+  ].each do |user, notifications|
+    test "test manage account for #{user} with notifications #{notifications}" do
+      visit page_with_token(user)
+      click_link 'notifications-menu'
+      if notifications
+        assert_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+        click_link('Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Add new SSH key')
+
+        add_and_verify_ssh_key
+
+        # No more SSH notification
+        click_link 'notifications-menu'
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      else
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_no_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      end
+    end
+  end
 end
index 341975fe2a23bca1ebbf0b5f86b5bd18bc6adfda..c22b3ff58fc79bd941809f603bb113232d5b7445 100644 (file)
@@ -157,4 +157,45 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     Thread.current[:arvados_api_token] = nil
   end
 
+  test "live log charting" do
+    uuid = api_fixture("jobs")['running']['uuid']
+
+    visit page_with_token "admin", "/jobs/#{uuid}"
+    click_link "Log"
+
+    api = ArvadosApiClient.new
+
+    # should give 45.3% or (((36.39+0.86)/10.0002)/8)*100 rounded to 1 decimal place
+    text = "2014-11-07_23:33:51 #{uuid} 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys"
+
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+    api.api("logs", "", {log: {
+                object_uuid: uuid,
+                event_type: "stderr",
+                properties: {"text" => text}}})
+    wait_for_ajax
+
+    # using datapoint 1 instead of datapoint 0 because there will be a "dummy" datapoint with no actual stats 10 minutes previous to the one we're looking for, for the sake of making the x-axis of the graph show a full 10 minutes of time even though there is only a single real datapoint
+    cpu_stat = page.evaluate_script("jobGraphData[1]['T1-cpu']")
+
+    assert_equal 45.3, (cpu_stat.to_f*100).round(1)
+
+    Thread.current[:arvados_api_token] = nil
+  end
+
+  test "live log charting from replayed log" do
+    uuid = api_fixture("jobs")['running']['uuid']
+
+    visit page_with_token "admin", "/jobs/#{uuid}"
+    click_link "Log"
+
+    ApiServerForTests.new.run_rake_task("replay_job_log", "test/job_logs/crunchstatshort.log,1.0,#{uuid}")
+    wait_for_ajax
+
+    # see above comment as to why we use datapoint 1 rather than 0
+    cpu_stat = page.evaluate_script("jobGraphData[1]['T1-cpu']")
+
+    assert_equal 45.3, (cpu_stat.to_f*100).round(1)
+  end
+
 end
index 3fea27b91670e04c83a15269790a44b41b7d9ce1..ec299e295528b6c35632fa39d6119435dc9059a6 100644 (file)
@@ -1,12 +1,49 @@
+# http://guides.rubyonrails.org/v3.2.13/performance_testing.html
+
 require 'test_helper'
 require 'rails/performance_test_help'
+require 'performance_test_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class BrowsingTest < WorkbenchPerformanceTest
+  self.profile_options = { :runs => 5,
+                           :metrics => [:wall_time],
+                           :output => 'tmp/performance',
+                           :formats => [:flat] }
+
+  setup do
+    headless = Headless.new
+    headless.start
+    Capybara.current_driver = :selenium
+    Capybara.current_session.driver.browser.manage.window.resize_to(1024, 768)
+  end
+
+  test "home page" do
+    visit_page_with_token
+    wait_for_ajax
+    assert_text 'Dashboard'
+    assert_selector 'a', text: 'Run a pipeline'
+  end
+
+  test "search for hash" do
+    visit_page_with_token
+    wait_for_ajax
+    assert_text 'Dashboard'
 
-class BrowsingTest < ActionDispatch::PerformanceTest
-  # Refer to the documentation for all available options
-  # self.profile_options = { :runs => 5, :metrics => [:wall_time, :memory]
-  #                          :output => 'tmp/performance', :formats => [:flat] }
+    within('.navbar-fixed-top') do
+      page.find_field('search').set('hash')
+      wait_for_ajax
+      page.find('.glyphicon-search').click
+    end
 
-  def test_homepage
-    get '/'
+    # In the search dialog now. Expect at least one item in the result display.
+    within '.modal-content' do
+      wait_for_ajax
+      assert_text 'All projects'
+      assert_text 'Search'
+      assert(page.has_selector?(".selectable[data-object-uuid]"))
+      click_button 'Cancel'
+    end
   end
 end
diff --git a/apps/workbench/test/performance_test_helper.rb b/apps/workbench/test/performance_test_helper.rb
new file mode 100644 (file)
index 0000000..7d335d8
--- /dev/null
@@ -0,0 +1,32 @@
+require 'integration_helper'
+
+# Performance test can run in two two different ways:
+#
+# 1. Similar to other integration tests using the command:
+#     RAILS_ENV=test bundle exec rake test:benchmark
+#
+# 2. Against a configured workbench url using "RAILS_ENV=performance".
+#     RAILS_ENV=performance bundle exec rake test:benchmark
+
+class WorkbenchPerformanceTest < ActionDispatch::PerformanceTest
+
+  # When running in "RAILS_ENV=performance" mode, uses performance
+  # config params.  In this mode, prepends workbench URL to the given
+  # path provided, and visits that page using the configured
+  # "user_token".
+  def visit_page_with_token path='/'
+    if Rails.env == 'performance'
+      token = Rails.configuration.user_token
+      workbench_url = Rails.configuration.arvados_workbench_url
+      if workbench_url.end_with? '/'
+        workbench_url = workbench_url[0, workbench_url.size-1]
+      end
+    else
+      token = 'active'
+      workbench_url = ''
+    end
+
+    visit page_with_token(token, (workbench_url + path))
+  end
+
+end
index ab2ac395b4c487b85643d6df5989934acd7e630f..2b480f9b3934813400a50850a46dd6367861f751 100644 (file)
@@ -1,4 +1,4 @@
-ENV["RAILS_ENV"] = "test" if (ENV["RAILS_ENV"] != "diagnostics")
+ENV["RAILS_ENV"] = "test" if (ENV["RAILS_ENV"] != "diagnostics" and ENV["RAILS_ENV"] != "performance")
 
 unless ENV["NO_COVERAGE_TEST"]
   begin
@@ -182,6 +182,12 @@ class ApiServerForTests
       end
     end
   end
+
+  def run_rake_task(task_name, arg_string)
+    Dir.chdir(ARV_API_SERVER_DIR) do
+      _system('bundle', 'exec', 'rake', "#{task_name}[#{arg_string}]")
+    end
+  end
 end
 
 class ActionController::TestCase
@@ -192,7 +198,7 @@ class ActionController::TestCase
   def check_counter action
     @counter += 1
     if @counter == 2
-      assert_equal 1, 2, "Multiple actions in functional test"
+      assert_equal 1, 2, "Multiple actions in controller test"
     end
   end
 
@@ -204,6 +210,74 @@ class ActionController::TestCase
   end
 end
 
+# Test classes can call reset_api_fixtures(when_to_reset,flag) to
+# override the default. Example:
+#
+# class MySuite < ActionDispatch::IntegrationTest
+#   reset_api_fixtures :after_each_test, false
+#   reset_api_fixtures :after_suite, true
+#   ...
+# end
+#
+# The default behavior is reset_api_fixtures(:after_each_test,true).
+#
+class ActiveSupport::TestCase
+
+  def self.inherited subclass
+    subclass.class_eval do
+      class << self
+        attr_accessor :want_reset_api_fixtures
+      end
+      @want_reset_api_fixtures = {
+        after_each_test: true,
+        after_suite: false,
+        before_suite: false,
+      }
+    end
+    super
+  end
+  # Existing subclasses of ActiveSupport::TestCase (ones that already
+  # existed before we set up the self.inherited hook above) will not
+  # get their own instance variable. They're not real test cases
+  # anyway, so we give them a "don't reset anywhere" stub.
+  def self.want_reset_api_fixtures
+    {}
+  end
+
+  def self.reset_api_fixtures where, t=true
+    if not want_reset_api_fixtures.has_key? where
+      raise ArgumentError, "There is no #{where.inspect} hook"
+    end
+    self.want_reset_api_fixtures[where] = t
+  end
+
+  def self.run *args
+    reset_api_fixtures_now if want_reset_api_fixtures[:before_suite]
+    result = super
+    reset_api_fixtures_now if want_reset_api_fixtures[:after_suite]
+    result
+  end
+
+  def after_teardown
+    if self.class.want_reset_api_fixtures[:after_each_test]
+      self.class.reset_api_fixtures_now
+    end
+    super
+  end
+
+  protected
+  def self.reset_api_fixtures_now
+    # Never try to reset fixtures when we're just using test
+    # infrastructure to run performance/diagnostics suites.
+    return unless Rails.env == 'test'
+
+    auth = api_fixture('api_client_authorizations')['admin_trustedclient']
+    Thread.current[:arvados_api_token] = auth['api_token']
+    ArvadosApiClient.new.api(nil, '../../database/reset', {})
+    Thread.current[:arvados_api_token] = nil
+  end
+end
+
 # If it quacks like a duck, it must be a HTTP request object.
 class RequestDuck
   def self.host
@@ -219,6 +293,20 @@ class RequestDuck
   end
 end
 
+# Example:
+#
+# apps/workbench$ RAILS_ENV=test bundle exec irb -Ilib:test
+# > load 'test/test_helper.rb'
+# > singletest 'integration/collection_upload_test.rb', 'Upload two empty files'
+#
+def singletest test_class_file, test_name
+  load File.join('test', test_class_file)
+  Minitest.run ['-v', '-n', "test_#{test_name.gsub ' ', '_'}"]
+  Object.send(:remove_const,
+              test_class_file.gsub(/.*\/|\.rb$/, '').camelize.to_sym)
+  ::Minitest::Runnable.runnables.reject! { true }
+end
+
 if ENV["RAILS_ENV"].eql? 'test'
   ApiServerForTests.new.run
   ApiServerForTests.new.run ["--websockets"]
diff --git a/apps/workbench/test/unit/api_client_authorization_test.rb b/apps/workbench/test/unit/api_client_authorization_test.rb
deleted file mode 100644 (file)
index b5b07d1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class ApiClientAuthorizationTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/authorized_key_test.rb b/apps/workbench/test/unit/authorized_key_test.rb
deleted file mode 100644 (file)
index b8d9b67..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class AuthorizedKeyTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/human_test.rb b/apps/workbench/test/unit/human_test.rb
deleted file mode 100644 (file)
index 2863cbf..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class HumanTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/job_task_test.rb b/apps/workbench/test/unit/job_task_test.rb
deleted file mode 100644 (file)
index 3c7c1e7..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class JobTaskTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/keep_disk_test.rb b/apps/workbench/test/unit/keep_disk_test.rb
deleted file mode 100644 (file)
index 42ab63b..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-require 'test_helper'
-
-class KeepDiskTest < ActiveSupport::TestCase
-end
diff --git a/apps/workbench/test/unit/log_test.rb b/apps/workbench/test/unit/log_test.rb
deleted file mode 100644 (file)
index f2afee2..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class LogTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/node_test.rb b/apps/workbench/test/unit/node_test.rb
deleted file mode 100644 (file)
index ccc3765..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class NodeTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/pipeline_template_test.rb b/apps/workbench/test/unit/pipeline_template_test.rb
deleted file mode 100644 (file)
index a28a2c9..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class PipelineTemplateTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/repository_test.rb b/apps/workbench/test/unit/repository_test.rb
deleted file mode 100644 (file)
index 327170c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class RepositoryTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/specimen_test.rb b/apps/workbench/test/unit/specimen_test.rb
deleted file mode 100644 (file)
index a9abc8c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class SpecimenTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/trait_test.rb b/apps/workbench/test/unit/trait_test.rb
deleted file mode 100644 (file)
index 45df2ed..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class TraitTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/user_agreement_test.rb b/apps/workbench/test/unit/user_agreement_test.rb
deleted file mode 100644 (file)
index 7c0ac65..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class UserAgreementTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/test/unit/virtual_machine_test.rb b/apps/workbench/test/unit/virtual_machine_test.rb
deleted file mode 100644 (file)
index 69258b5..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-require 'test_helper'
-
-class VirtualMachineTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
-end
diff --git a/apps/workbench/vendor/assets/javascripts/jquery.number.min.js b/apps/workbench/vendor/assets/javascripts/jquery.number.min.js
new file mode 100644 (file)
index 0000000..4fce02b
--- /dev/null
@@ -0,0 +1,2 @@
+/*! jQuery number 2.1.5 (c) github.com/teamdf/jquery-number | opensource.teamdf.com/license */
+(function(e){"use strict";function t(e,t){if(this.createTextRange){var n=this.createTextRange();n.collapse(true);n.moveStart("character",e);n.moveEnd("character",t-e);n.select()}else if(this.setSelectionRange){this.focus();this.setSelectionRange(e,t)}}function n(e){var t=this.value.length;e=e.toLowerCase()=="start"?"Start":"End";if(document.selection){var n=document.selection.createRange(),r,i,s;r=n.duplicate();r.expand("textedit");r.setEndPoint("EndToEnd",n);i=r.text.length-n.text.length;s=i+n.text.length;return e=="Start"?i:s}else if(typeof this["selection"+e]!="undefined"){t=this["selection"+e]}return t}var r={codes:{46:127,188:44,109:45,190:46,191:47,192:96,220:92,222:39,221:93,219:91,173:45,187:61,186:59,189:45,110:46},shifts:{96:"~",49:"!",50:"@",51:"#",52:"$",53:"%",54:"^",55:"&",56:"*",57:"(",48:")",45:"_",61:"+",91:"{",93:"}",92:"|",59:":",39:'"',44:"<",46:">",47:"?"}};e.fn.number=function(i,s,o,u){u=typeof u==="undefined"?",":u;o=typeof o==="undefined"?".":o;s=typeof s==="undefined"?0:s;var a="\\u"+("0000"+o.charCodeAt(0).toString(16)).slice(-4),f=new RegExp("[^"+a+"0-9]","g"),l=new RegExp(a,"g");if(i===true){if(this.is("input:text")){return this.on({"keydown.format":function(i){var a=e(this),f=a.data("numFormat"),l=i.keyCode?i.keyCode:i.which,c="",h=n.apply(this,["start"]),p=n.apply(this,["end"]),d="",v=false;if(r.codes.hasOwnProperty(l)){l=r.codes[l]}if(!i.shiftKey&&l>=65&&l<=90){l+=32}else if(!i.shiftKey&&l>=69&&l<=105){l-=48}else if(i.shiftKey&&r.shifts.hasOwnProperty(l)){c=r.shifts[l]}if(c=="")c=String.fromCharCode(l);if(l!=8&&l!=45&&l!=127&&c!=o&&!c.match(/[0-9]/)){var m=i.keyCode?i.keyCode:i.which;if(m==46||m==8||m==127||m==9||m==27||m==13||(m==65||m==82||m==80||m==83||m==70||m==72||m==66||m==74||m==84||m==90||m==61||m==173||m==48)&&(i.ctrlKey||i.metaKey)===true||(m==86||m==67||m==88)&&(i.ctrlKey||i.metaKey)===true||m>=35&&m<=39||m>=112&&m<=123){return}i.preventDefault();return false}if(h==0&&p==this.value.length||a.val()==0){if(l==8){h=p=1;this.value="";f.init=s>0?-1:0;f.c=s>0?-(s+1):0;t.apply(this,[0,0])}else if(c==o){h=p=1;this.value="0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0}else if(l==45){h=p=2;this.value="-0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0;t.apply(this,[2,2])}else{f.init=s>0?-1:0;f.c=s>0?-s:0}}else{f.c=p-this.value.length}f.isPartialSelection=h==p?false:true;if(s>0&&c==o&&h==this.value.length-s-1){f.c++;f.init=Math.max(0,f.init);i.preventDefault();v=this.value.length+f.c}else if(l==45&&(h!=0||this.value.indexOf("-")==0)){i.preventDefault()}else if(c==o){f.init=Math.max(0,f.init);i.preventDefault()}else if(s>0&&l==127&&h==this.value.length-s-1){i.preventDefault()}else if(s>0&&l==8&&h==this.value.length-s){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&l==127&&h>this.value.length-s-1){if(this.value==="")return;if(this.value.slice(h,h+1)!="0"){d=this.value.slice(0,h)+"0"+this.value.slice(h+1);a.val(d)}i.preventDefault();v=this.value.length+f.c}else if(s>0&&l==8&&h>this.value.length-s){if(this.value==="")return;if(this.value.slice(h-1,h)!="0"){d=this.value.slice(0,h-1)+"0"+this.value.slice(h);a.val(d)}i.preventDefault();f.c--;v=this.value.length+f.c}else if(l==127&&this.value.slice(h,h+1)==u){i.preventDefault()}else if(l==8&&this.value.slice(h-1,h)==u){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&h==p&&this.value.length>s+1&&h>this.value.length-s-1&&isFinite(+c)&&!i.metaKey&&!i.ctrlKey&&!i.altKey&&c.length===1){if(p===this.value.length){d=this.value.slice(0,h-1)}else{d=this.value.slice(0,h)+this.value.slice(h+1)}this.value=d;v=h}if(v!==false){t.apply(this,[v,v])}a.data("numFormat",f)},"keyup.format":function(r){var i=e(this),o=i.data("numFormat"),u=r.keyCode?r.keyCode:r.which,a=n.apply(this,["start"]),f=n.apply(this,["end"]),l;if(a===0&&f===0&&(u===189||u===109)){i.val("-"+i.val());a=1;o.c=1-this.value.length;o.init=1;i.data("numFormat",o);l=this.value.length+o.c;t.apply(this,[l,l])}if(this.value===""||(u<48||u>57)&&(u<96||u>105)&&u!==8&&u!==46&&u!==110)return;i.val(i.val());if(s>0){if(o.init<1){a=this.value.length-s-(o.init<0?1:0);o.c=a-this.value.length;o.init=1;i.data("numFormat",o)}else if(a>this.value.length-s&&u!=8){o.c++;i.data("numFormat",o)}}if(u==46&&!o.isPartialSelection){o.c++;i.data("numFormat",o)}l=this.value.length+o.c;t.apply(this,[l,l])},"paste.format":function(t){var n=e(this),r=t.originalEvent,i=null;if(window.clipboardData&&window.clipboardData.getData){i=window.clipboardData.getData("Text")}else if(r.clipboardData&&r.clipboardData.getData){i=r.clipboardData.getData("text/plain")}n.val(i);t.preventDefault();return false}}).each(function(){var t=e(this).data("numFormat",{c:-(s+1),decimals:s,thousands_sep:u,dec_point:o,regex_dec_num:f,regex_dec:l,init:this.value.indexOf(".")?true:false});if(this.value==="")return;t.val(t.val())})}else{return this.each(function(){var t=e(this),n=+t.text().replace(f,"").replace(l,".");t.number(!isFinite(n)?0:+n,s,o,u)})}}return this.text(e.number.apply(window,arguments))};var i=null,s=null;if(e.isPlainObject(e.valHooks.text)){if(e.isFunction(e.valHooks.text.get))i=e.valHooks.text.get;if(e.isFunction(e.valHooks.text.set))s=e.valHooks.text.set}else{e.valHooks.text={}}e.valHooks.text.get=function(t){var n=e(t),r,s,o=n.data("numFormat");if(!o){if(e.isFunction(i)){return i(t)}else{return undefined}}else{if(t.value==="")return"";r=+t.value.replace(o.regex_dec_num,"").replace(o.regex_dec,".");return(t.value.indexOf("-")===0?"-":"")+(isFinite(r)?r:0)}};e.valHooks.text.set=function(t,n){var r=e(t),i=r.data("numFormat");if(!i){if(e.isFunction(s)){return s(t,n)}else{return undefined}}else{var o=e.number(n,i.decimals,i.dec_point,i.thousands_sep);return t.value=o}};e.number=function(e,t,n,r){r=typeof r==="undefined"?",":r;n=typeof n==="undefined"?".":n;t=!isFinite(+t)?0:Math.abs(t);var i="\\u"+("0000"+n.charCodeAt(0).toString(16)).slice(-4);var s="\\u"+("0000"+r.charCodeAt(0).toString(16)).slice(-4);e=(e+"").replace(".",n).replace(new RegExp(s,"g"),"").replace(new RegExp(i,"g"),".").replace(new RegExp("[^0-9+-Ee.]","g"),"");var o=!isFinite(+e)?0:+e,u="",a=function(e,t){var n=Math.pow(10,t);return""+Math.round(e*n)/n};u=(t?a(o,t):""+Math.round(o)).split(".");if(u[0].length>3){u[0]=u[0].replace(/\B(?=(?:\d{3})+(?!\d))/g,r)}if((u[1]||"").length<t){u[1]=u[1]||"";u[1]+=(new Array(t-u[1].length+1)).join("0")}return u.join(n)}})(jQuery)
index 13ae918895d6192553107ae00273e247c6eb4c34..c07debd787eecfc6696c5d614d69c013f8028dd7 100755 (executable)
@@ -434,8 +434,8 @@ if "task.vwd" in taskp:
 else:
     outcollection = robust_put.upload(outdir, logger)
 
-# Success if no non-zero return codes
-success = any(rcode) and not any([status != 0 for status in rcode.values()])
+# Success if we ran any subprocess, and they all exited 0.
+success = rcode and all(status == 0 for status in rcode.itervalues())
 
 api.job_tasks().update(uuid=arvados.current_task()['uuid'],
                                      body={
@@ -444,4 +444,4 @@ api.job_tasks().update(uuid=arvados.current_task()['uuid'],
                                          'progress':1.0
                                      }).execute()
 
-sys.exit(rcode)
+sys.exit(0 if success else 1)
index 6d2c895a483486277554f662b12fe463bc8c663a..af5160f52602af292cf6470987200510a91dc03b 100644 (file)
@@ -33,7 +33,6 @@ navbar:
     - Run a pipeline on the command line:
       - user/topics/running-pipeline-command-line.html.textile.liquid
       - user/topics/arv-run.html.textile.liquid
-      - user/reference/sdk-cli.html.textile.liquid
     - Develop a new pipeline:
       - user/tutorials/intro-crunch.html.textile.liquid
       - user/tutorials/running-external-program.html.textile.liquid
@@ -70,6 +69,7 @@ navbar:
     - CLI:
       - sdk/cli/index.html.textile.liquid
       - sdk/cli/install.html.textile.liquid
+      - sdk/cli/reference.html.textile.liquid
       - sdk/cli/subcommands.html.textile.liquid
   api:
     - Concepts:
index 035c4818c9cf4e6eb7130500e9beec507a7da423..831e1b8fddf66783bb43ccf38eb764b26df432c6 100644 (file)
@@ -14,22 +14,17 @@ collection = arvados.CollectionReader(this_task_input)
 # Create an object to write a new collection as output
 out = arvados.CollectionWriter()
 
-# Set the name of output file within the collection
-out.set_current_file_name("0-filter.txt")
-
-# Get an iterator over the files listed in the collection
-all_files = collection.all_files()
-
-# Iterate over each file
-for input_file in all_files:
-    for ln in input_file.readlines():
-        if ln[0] == '0':
-            out.write(ln)
-
-# Commit the output to keep.  This returns a Keep id.
-output_id = out.finish()
-
-# Set the output for this task to the Keep id
-this_task.set_output(output_id)
+# Create a new file in the output collection
+with out.open('0-filter.txt') as out_file:
+    # Iterate over every input file in the input collection
+    for input_file in collection.all_files():
+        # Output every line in the file that starts with '0'
+        out_file.writelines(line for line in input_file if line.startswith('0'))
+
+# Commit the output to Keep.
+output_locator = out.finish()
+
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
 
 # Done!
index a914e0482ae902644c2af11c74a2209e6f752084..691ed567c3e43a6b3c6c839e76d7049f50a76811 100644 (file)
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 
 import hashlib
+import os
 import arvados
 
 # Jobs consist of one or more tasks.  A task is a single invocation of
@@ -11,7 +12,7 @@ this_task = arvados.current_task()
 
 # Tasks have a sequence number for ordering.  All tasks
 # with the current sequence number must finish successfully
-# before tasks in the next sequence are started. 
+# before tasks in the next sequence are started.
 # The first task has sequence number 0
 if this_task['sequence'] == 0:
     # Get the "input" field from "script_parameters" on the task object
@@ -21,7 +22,7 @@ if this_task['sequence'] == 0:
     cr = arvados.CollectionReader(job_input)
 
     # Loop over each stream in the collection (a stream is a subset of
-    # files that logically represents a directory
+    # files that logically represents a directory)
     for s in cr.all_streams():
 
         # Loop over each file in the stream
@@ -62,29 +63,21 @@ else:
 
     collection = arvados.CollectionReader(this_task_input)
 
-    out = arvados.CollectionWriter()
-    out.set_current_file_name("md5sum.txt")
-
     # There should only be one file in the collection, so get the
-    # first one.  collection.all_files() returns an iterator so we
-    # need to make it into a list for indexed access.
-    input_file = list(collection.all_files())[0]
+    # first one from the all files iterator.
+    input_file = next(collection.all_files())
+    output_path = os.path.normpath(os.path.join(input_file.stream_name(),
+                                                input_file.name))
 
     # Everything after this is the same as the first tutorial.
     digestor = hashlib.new('md5')
-
-    while True:
-        buf = input_file.read(2**20)
-        if len(buf) == 0:
-            break
+    for buf in input_file.readall():
         digestor.update(buf)
 
-    hexdigest = digestor.hexdigest()
-    file_name = input_file.name()
-    if input_file.stream_name() != '.':
-        file_name = os.join(input_file.stream_name(), file_name)
-    out.write("%s %s\n" % (hexdigest, file_name))
-    output_id = out.finish()
-    this_task.set_output(output_id)
+    out = arvados.CollectionWriter()
+    with out.open('md5sum.txt') as out_file:
+        out_file.write("{} {}\n".format(digestor.hexdigest(), output_path))
+
+    this_task.set_output(out.finish())
 
 # Done!
index 16516a8852556fa374f8b7c05bc5b9cb6dff80c0..46152f17db17ac22dd12964bebcc63e035af19f9 100644 (file)
@@ -3,7 +3,8 @@
 import arvados
 
 # Automatically parallelize this job by running one task per file.
-arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=True)
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
 
 # Get the input file for the task
 input_file = arvados.get_task_param_mount('input')
@@ -13,6 +14,6 @@ stdoutdata, stderrdata = arvados.util.run_command(['md5sum', input_file])
 
 # Save the standard output (stdoutdata) to "md5sum.txt" in the output collection
 out = arvados.CollectionWriter()
-out.set_current_file_name("md5sum.txt")
-out.write(stdoutdata)
+with out.open('md5sum.txt') as out_file:
+    out_file.write(stdoutdata)
 arvados.current_task().set_output(out.finish())
index 6a8b5b51b70eb22ef72fa4967e4129134f461fbc..a371d2489a6165e17ffb01d7248559944ba9afd3 100644 (file)
@@ -1 +1,3 @@
-*This tutorial assumes either that you are logged into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/index.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html*
+{% include 'notebox_begin' %}
+This tutorial assumes either that you are logged into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+{% include 'notebox_end' %}
index b9c7f31532088892958a886ef77f7c89b239192c..ede28091de45f1b14556b721722153c3b1f3d70f 100644 (file)
@@ -1,45 +1,45 @@
 #!/usr/bin/env python
 
 import hashlib      # Import the hashlib module to compute MD5.
+import os           # Import the os module for basic path manipulation
 import arvados      # Import the Arvados sdk module
 
 # Automatically parallelize this job by running one task per file.
 # This means that if the input consists of many files, each file will
-# be processed in parallel on different nodes enabling the job to 
+# be processed in parallel on different nodes enabling the job to
 # be completed quicker.
-arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True, 
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
                                           input_as_path=True)
 
+# Get object representing the current task
+this_task = arvados.current_task()
+
 # Create the message digest object that will compute the MD5 hash
 digestor = hashlib.new('md5')
 
 # Get the input file for the task
-input_file = arvados.get_task_param_mount('input')
+input_id, input_path = this_task['parameters']['input'].split('/', 1)
 
-# Open the input file for reading
-with open(input_file) as f:
-    while True:
-        buf = f.read(2**20)      # read a 1 megabyte block from the file
-        if len(buf) == 0:        # break when there is no more data left
-            break
-        digestor.update(buf)     # update the MD5 hash object
+# Open the input collection
+input_collection = arvados.CollectionReader(input_id)
 
-# Get object representing the current task
-this_task = arvados.current_task()
+# Open the input file for reading
+with input_collection.open(input_path) as input_file:
+    for buf in input_file.readall():  # Iterate the file's data blocks
+        digestor.update(buf)          # Update the MD5 hash object
 
- # Write a new collection as output
+# Write a new collection as output
 out = arvados.CollectionWriter()
 
- # Set output file within the collection
-out.set_current_file_name("md5sum.txt")
-
-# Write an output line with the MD5 value and input
-out.write("%s %s\n" % (digestor.hexdigest(), this_task['parameters']['input']))
+# Write an output file with one line: the MD5 value and input path
+with out.open('md5sum.txt') as out_file:
+    out_file.write("{} {}/{}\n".format(digestor.hexdigest(), input_id,
+                                       os.path.normpath(input_path)))
 
- # Commit the output to keep.  This returns a Keep id.
-output_id = out.finish()
+# Commit the output to Keep.
+output_locator = out.finish()
 
-# Set the output for this task to the Keep id
-this_task.set_output(output_id) 
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
 
 # Done!
index e2363ac818dccd3b781ca64f6d1e2adee7156ae4..80f5de6f06ff9eae7b31947bdfc3147605027cae 100644 (file)
@@ -53,7 +53,7 @@ h3. Runtime constraints
 
 table(table table-bordered table-condensed).
 |_. Key|_. Type|_. Description|_. Implemented|
-|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository.  See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit.||
+|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository.  See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit.  If you use this, you must also specify a @docker_image@ constraint (see below).  In order to install the Python SDK successfully, Crunch must be able to find and run virtualenv inside the container.|&#10003;|
 |docker_image|string|The Docker image that this Job needs to run.  If specified, Crunch will create a Docker container from this image, and run the Job's script inside that.  The Keep mount and work directories will be available as volumes inside this container.  The image must be uploaded to Arvados using @arv keep docker@.  You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id.  Alternatively, you may specify the UUID or portable data hash of the image Collection, returned by @arv keep docker@.|&#10003;|
 |min_nodes|integer||&#10003;|
 |max_nodes|integer|||
index 14cc1f48e5785640efbc182459c7ebc47bef140d..16b635c1fa623ba7237f0975d887ad3d513fe8f9 100644 (file)
@@ -4473,7 +4473,7 @@ a.thumbnail.active {
 }
 .alert-info {
   color: #31708f;
-  background-color: #d9edf7;
+  background-color: #edf6fa;
   border-color: #bce8f1;
 }
 .alert-info hr {
@@ -4481,6 +4481,10 @@ a.thumbnail.active {
 }
 .alert-info .alert-link {
   color: #245269;
+  font-weight: bold;
+}
+.alert-info a {
+  font-weight: bold;
 }
 .alert-warning {
   color: #8a6d3b;
index 92b0aded5cd0f342489ba49aea4f1a943de14460..4e105e82ec392d2a809381b736ee47228c082430 100644 (file)
@@ -1,29 +1,11 @@
 ---
 layout: default
 navsection: installguide
-title: Create standard objects
+title: Add an Arvados repository
 
 ...
 
-
-Next, we're going to use the Arvados CLI tools on the <strong>shell server</strong> to create some standard objects.
-
-h3. "All users" group
-
-The convention is to add every active user to this group. We give it a distinctive UUID that looks like an IP broadcast address.
-
-<notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
-~$ <span class="userinput">read -rd $'\000' newgroup &lt;&lt;EOF; arv group create --group "$newgroup"</span>
-<span class="userinput">{
- "uuid":"$prefix-j7d0g-fffffffffffffff",
- "name":"All users"
-}</span>
-EOF
-</code></pre></notextile>
-
-h3. "arvados" repository
+Next, we're going to use the Arvados CLI tools on the <strong>shell server</strong> to create a few Arvados objects. These objects set up a hosted clone of the arvados repository on this cluster.
 
 This will be readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
 
index 646b6433a986d0c1049432a1c7c101bc4d2faad1..43c1c6726596704707a165f27a5e317fd2987dbc 100644 (file)
@@ -23,11 +23,10 @@ h2. Install Keepproxy
 First add the Arvados apt repository, and then install the Keepproxy package.
 
 <notextile>
-<pre><code>~$ <span class="userinput">echo "# apt.arvados.org" > /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" >> /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">/usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">/usr/bin/apt-get update</span>
-~$ <span class="userinput">/usr/bin/apt-get install keepproxy</span>
+<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
+~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get install keepproxy</span>
 </code></pre>
 </notextile>
 
index 0c684eafd74389325b4c7ecd7de69e65119f9f86..7fb810d841913c34603126229baae98af7cb0ddf 100644 (file)
@@ -22,11 +22,10 @@ h2. Install Keepstore
 First add the Arvados apt repository, and then install the Keepstore package.
 
 <notextile>
-<pre><code>~$ <span class="userinput">echo "# apt.arvados.org" > /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" >> /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">/usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">/usr/bin/apt-get update</span>
-~$ <span class="userinput">/usr/bin/apt-get install keepstore</span>
+<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
+~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get install keepstore</span>
 </code></pre>
 </notextile>
 
index 537f1a4443d626c997fc1fcc529624fc427563f4..25ddf7b05761b56dbcba12a906b68fef4da2f05d 100644 (file)
@@ -14,4 +14,4 @@ Please follow the "API token guide":{{site.baseurl}}/user/reference/api-tokens.h
 
 h2. Install the SDKs
 
-Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html and the "Command line SDK":{{site.baseurl}}/sdk/cli/index.html
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html and the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html
index 08880b1b3c6e0829833e94de37f4fe39289cc8cd..3d44250541a406b3520ffb8b8c3fb54479f48426 100644 (file)
@@ -6,11 +6,11 @@ title: "Overview"
 
 ...
 
-The @arv@ CLI tool provides a generic set of wrappers so you can make API calls easily. Additionally, it provides access to a number of subcommands.
+The @arv@ CLI tool provides a set of wrappers to make API calls. Additionally, it provides access to a number of subcommands.
 
-h3. Wrapper for API calls
+h3. Wrappers for API calls
 
-See the "command line interface":{{site.baseurl}}/user/reference/sdk-cli.html page in the user guide.
+See the "arv reference":{{site.baseurl}}/sdk/cli/reference.html page.
 
 h3. Subcommands
 
index 18e1f705e0ecb76b0b61f851d16b20d964f41456..df5507702443103be54c9db56122404c9de9b05b 100644 (file)
@@ -6,9 +6,7 @@ title: "Installation"
 
 ...
 
-If you are logged in to an Arvados VM, the @arv@ tool should be installed.
-
-To use @arv@ elsewhere, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
+To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
 
 h4. Prerequisites: Ruby &gt;= 2.1.0 and curl libraries
 
diff --git a/doc/sdk/cli/reference.html.textile.liquid b/doc/sdk/cli/reference.html.textile.liquid
new file mode 100644 (file)
index 0000000..bc5cf1e
--- /dev/null
@@ -0,0 +1,80 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "arv reference"
+...
+
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
+
+h3. Usage
+
+@arv [global_options] resource_type resource_method [method_parameters]@
+
+h4. Global options
+
+- @--format=json@ := Output response as JSON. This is the default format.
+
+- @--format=yaml@ := Output response as YAML
+
+- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
+
+
+h3. Resource types and methods
+
+Get list of resource types
+@arv --resources@
+
+Get list of resource methods for the "user" resource type
+@arv user --help@
+
+
+h3. Basic examples
+
+Get record for current user
+@arv user current@
+
+Get entire record for some specific user
+@arv user get --uuid 6dnxa-tpzed-iimd25zhzh84gbk@
+
+Update user record
+@arv user update --uuid 6dnxa-tpzed-iimd25zhzh84gbk --first-name "Bob"@
+
+Get list of groups
+@arv group list@
+
+Delete a group
+@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
+
+
+h3. Common commands
+
+Most @arv@ resources accept the following commands:
+
+* @get@
+* @list@
+* @create@
+* @update@
+* @delete@
+
+
+h4. @list@
+
+Arguments accepted by the @list@ subcommand include:
+
+<pre>
+    --limit, -l <i>:     Maximum number of resources to return.
+   --offset, -o <i>:     Number of users to skip before first returned record.
+  --filters, -f <s>:     Conditions for filtering users.
+    --order, -r <s>:     Order in which to return matching users.
+   --select, -s <s>:     Select which fields to return
+     --distinct, -d:     Return each distinct object
+</pre>
+
+The @--filters@ option takes a string describing a JSON list of filters on which the returned resources should be returned. Each filter is a three-element list of _[field, operator, value]_, where the _operator_ may be one of @=@, @<@, @<=@, @>@, @>=@, @!=@, @like@, or @ilike@.
+
+Example:
+
+@arv collection list --filters '[["name", "=", "PGP VAR inputs"], ["created_at", ">=", "2014-10-01"]]'@
+
+will return a list of all collections visible to the current user which are named "PGP VAR inputs" and were created on or after October 1, 2014.
index 11b79e3774ae3872a37e14a90e98ec8e5f5c65fc..5d82f7ac01dcc859cf4a0edc7d4f061a554c90d4 100644 (file)
@@ -6,7 +6,7 @@ title: "arv subcommands"
 
 ...
 
-The @arv@ CLI tool provides access to a number of subcommands which are described on this page.
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
 
 h3(#arv-create). arv create
 
@@ -161,6 +161,154 @@ Available methods: ls, get, put, less, check, docker
 </pre>
 </notextile>
 
+h3(#arv-keep-ls). arv keep ls
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep ls --help</code>
+usage: arv-ls [-h] [--retries RETRIES] [-s] locator
+
+List contents of a manifest
+
+positional arguments:
+  locator            Collection UUID or locator
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  -s                 List file sizes, in KiB.
+</pre>
+</notextile>
+
+h3(#arv-keep-get). arv keep get
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep get --help</code>
+usage: arv-get [-h] [--retries RETRIES]
+               [--progress | --no-progress | --batch-progress]
+               [--hash HASH | --md5sum] [-n] [-r] [-f | --skip-existing]
+               locator [destination]
+
+Copy data from Keep to a local file or pipe.
+
+positional arguments:
+  locator            Collection locator, optionally with a file path or
+                     prefix.
+  destination        Local file or directory where the data is to be written.
+                     Default: /dev/stdout.
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  --progress         Display human-readable progress on stderr (bytes and, if
+                     possible, percentage of total data size). This is the
+                     default behavior when it is not expected to interfere
+                     with the output: specifically, stderr is a tty _and_
+                     either stdout is not a tty, or output is being written to
+                     named files rather than stdout.
+  --no-progress      Do not display human-readable progress on stderr.
+  --batch-progress   Display machine-readable progress on stderr (bytes and,
+                     if known, total data size).
+  --hash HASH        Display the hash of each file as it is read from Keep,
+                     using the given hash algorithm. Supported algorithms
+                     include md5, sha1, sha224, sha256, sha384, and sha512.
+  --md5sum           Display the MD5 hash of each file as it is read from
+                     Keep.
+  -n                 Do not write any data -- just read from Keep, and report
+                     md5sums if requested.
+  -r                 Retrieve all files in the specified collection/prefix.
+                     This is the default behavior if the "locator" argument
+                     ends with a forward slash.
+  -f                 Overwrite existing files while writing. The default
+                     behavior is to refuse to write *anything* if any of the
+                     output files already exist. As a special case, -f is not
+                     needed to write to /dev/stdout.
+  --skip-existing    Skip files that already exist. The default behavior is to
+                     refuse to write *anything* if any files exist that would
+                     have to be overwritten. This option causes even devices,
+                     sockets, and fifos to be skipped.
+</pre>
+</notextile>
+
+h3(#arv-keep-put). arv keep put
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep put --help</code>
+usage: arv-put [-h] [--max-manifest-depth N | --normalize]
+               [--as-stream | --stream | --as-manifest | --in-manifest | --manifest | --as-raw | --raw]
+               [--use-filename FILENAME] [--filename FILENAME]
+               [--portable-data-hash] [--project-uuid UUID] [--name NAME]
+               [--progress | --no-progress | --batch-progress]
+               [--resume | --no-resume] [--retries RETRIES]
+               [path [path ...]]
+
+Copy data from the local filesystem to Keep.
+
+positional arguments:
+  path                  Local file or directory. Default: read from standard
+                        input.
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --max-manifest-depth N
+                        Maximum depth of directory tree to represent in the
+                        manifest structure. A directory structure deeper than
+                        this will be represented as a single stream in the
+                        manifest. If N=0, the manifest will contain a single
+                        stream. Default: -1 (unlimited), i.e., exactly one
+                        manifest stream per filesystem directory that contains
+                        files.
+  --normalize           Normalize the manifest by re-ordering files and
+                        streams after writing data.
+  --as-stream           Synonym for --stream.
+  --stream              Store the file content and display the resulting
+                        manifest on stdout. Do not write the manifest to Keep
+                        or save a Collection object in Arvados.
+  --as-manifest         Synonym for --manifest.
+  --in-manifest         Synonym for --manifest.
+  --manifest            Store the file data and resulting manifest in Keep,
+                        save a Collection object in Arvados, and display the
+                        manifest locator (Collection uuid) on stdout. This is
+                        the default behavior.
+  --as-raw              Synonym for --raw.
+  --raw                 Store the file content and display the data block
+                        locators on stdout, separated by commas, with a
+                        trailing newline. Do not store a manifest.
+  --use-filename FILENAME
+                        Synonym for --filename.
+  --filename FILENAME   Use the given filename in the manifest, instead of the
+                        name of the local file. This is useful when "-" or
+                        "/dev/stdin" is given as an input file. It can be used
+                        only if there is exactly one path given and it is not
+                        a directory. Implies --manifest.
+  --portable-data-hash  Print the portable data hash instead of the Arvados
+                        UUID for the collection created by the upload.
+  --project-uuid UUID   Store the collection in the specified project, instead
+                        of your Home project.
+  --name NAME           Save the collection with the specified name.
+  --progress            Display human-readable progress on stderr (bytes and,
+                        if possible, percentage of total data size). This is
+                        the default behavior when stderr is a tty.
+  --no-progress         Do not display human-readable progress on stderr, even
+                        if stderr is a tty.
+  --batch-progress      Display machine-readable progress on stderr (bytes
+                        and, if known, total data size).
+  --resume              Continue interrupted uploads from cached state
+                        (default).
+  --no-resume           Do not continue interrupted uploads from cached state.
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+</pre>
+</notextile>
+
+
 h3(#arv-pipeline-run). arv pipeline run
 
 @arv pipeline run@ can be used to start a pipeline run from the command line.
@@ -209,7 +357,7 @@ Options:
 
 h3(#arv-run). arv run
 
-The @arv-run@ command creates Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvado compute nodes.
+The @arv-run@ command creates Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
 
 The User Guide has a page on "using arv-run":{{site.baseurl}}/user/topics/arv-run.html.
 
index 89b77c9b656ef54e3a5a2857bb51828ac1793425..ead804e8d0eee5216edf951584cb4d45dee2943d 100644 (file)
@@ -37,7 +37,26 @@ $ <code class="userinput">sudo pip install --pre arvados-python-client</code>
 </pre>
 </notextile>
 
-h4. Option 2: build and install from source
+h4. Option 2: install from distribution packages (Debian/Ubuntu only)
+
+First add @http://apt.arvados.org@ to your list of apt repositories:
+
+<notextile>
+<pre>
+$ <code class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</code>
+</pre>
+</notextile>
+
+Then install the package:
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo apt-get update</code>
+$ <code class="userinput">sudo apt-get install python-arvados-python-client</code>
+</pre>
+</notextile>
+
+h4. Option 3: build and install from source
 
 <notextile>
 <pre>
index 82b3fa24c18895eb1279265a6150509e9acae19b..46156b7f85e2b5e102b42cb5d4ef158aab18c79d 100644 (file)
@@ -4,7 +4,7 @@ navsection: userguide
 title: "Checking your environment"
 ...
 
-First, log into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/index.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
+First, log into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
 
 Check that you are able to access the Arvados API server using @arv user current@.  If it is able to access the API server, it will print out information about your account:
 
diff --git a/doc/user/reference/sdk-cli.html.textile.liquid b/doc/user/reference/sdk-cli.html.textile.liquid
deleted file mode 100644 (file)
index e696250..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: default
-navsection: userguide
-title: "Command line reference"
-...
-
-*First, you should be logged into an Arvados VM instance ("Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login), and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html*
-
-h3. Usage
-
-@arv [global_options] resource_type resource_method [method_parameters]@
-
-h4. Global options
-
-- @--format=json@ := Output response as JSON. This is the default format.
-
-- @--format=yaml@ := Output response as YAML
-
-- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
-
-
-h3. Resource types and methods
-
-Get list of resource types
-@arv --resources@
-
-Get list of resource methods for the "user" resource type
-@arv user --help@
-
-
-h3. Basic examples
-
-Get record for current user
-@arv user current@
-
-Get entire record for some specific user
-@arv user get --uuid 6dnxa-tpzed-iimd25zhzh84gbk@
-
-Update user record
-@arv user update --uuid 6dnxa-tpzed-iimd25zhzh84gbk --first-name "Bob"@
-
-Get list of groups
-@arv group list@
-
-Delete a group
-@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
index 9854dc22e39da548031ab5b25e999bd84cb3e28e..300ff2ff0431185cba11fa0d696c21f3dbe4c4db 100644 (file)
@@ -4,7 +4,7 @@ navsection: userguide
 title: "Using arv-run"
 ...
 
-The @arv-run@ command enables you create Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvado compute nodes.
+The @arv-run@ command enables you create Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
 
 {% include 'tutorial_expectations' %}
 
@@ -148,7 +148,7 @@ h2. Additional options
 
 * @--docker-image IMG@ : By default, commands run inside a Docker container created from the latest "arvados/jobs" Docker image.  Use this option to specify a different image to use.  Note: the Docker image must be uploaded to Arvados using @arv keep docker@.
 * @--dry-run@ : Print out the final Arvados pipeline generated by @arv-run@ without submitting it.
-* @--local@ : By default, the pipeline will be submitted to your configured Arvado instance.  Use this option to run the command locally using @arv-run-pipeline-instance --run-jobs-here@.
+* @--local@ : By default, the pipeline will be submitted to your configured Arvados instance.  Use this option to run the command locally using @arv-run-pipeline-instance --run-jobs-here@.
 * @--ignore-rcode@ : Some commands use non-zero exit codes to indicate nonfatal conditions (e.g. @grep@ returns 1 when no match is found).  Set this to indicate that commands that return non-zero return codes should not be considered failed.
 * @--no-wait@ : Do not wait and display logs after submitting command, just exit.
 
index b8b90caaf069686c31aeb5cfcd5fddca08c11fa1..6fe88fe156c5a78577b9a271730e7da938d721a6 100644 (file)
@@ -76,25 +76,28 @@ You can now run your script on your local workstation or VM using @arv-crunch-jo
 2014-08-06_15:16:26 qr1hi-8i9sb-qyrat80ef927lam 14473 1 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 on localhost.1 exit 0 signal 0 success=true
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 success in 10 seconds
-2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 output 50cafdb29cc21dd6eaec85ba9e0c6134+56+Aef0f991b80fa0b75f802e58e70b207aa184d24ff@53f4bbd3
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 output 8c20281b9840f624a486e4f1a78a1da8+105+A234be74ceb5ea31db6e11b6be26f3eb76d288ad0@54987018
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  wait for last 0 children to finish
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 2 done, 0 running, 0 todo
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  release job allocation
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  Freeze not implemented
 2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  collate
-2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output uuid qr1hi-4zz18-n91qrqfp3zivexo
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output hash c1b44b6dc41ef334cf1136033ca950e6+54
 2014-08-06_15:16:37 qr1hi-8i9sb-qyrat80ef927lam 14473  finish
 2014-08-06_15:16:38 qr1hi-8i9sb-qyrat80ef927lam 14473  log manifest is 7fe8cf1d45d438a3ca3ac4a184b7aff4+83
 </code></pre>
 </notextile>
 
-Although the job runs locally, the output of the job has been saved to Keep, the Arvados file store.  The "output" line (third from the bottom) provides the "Keep locator":{{site.baseurl}}/user/tutorials/tutorial-keep-get.html to which the script's output has been saved.  Copy the output identifier and use @arv-ls@ to list the contents of your output collection, and @arv-get@ to download it to the current directory:
+Although the job runs locally, the output of the job has been saved to Keep, the Arvados file store.  The "output uuid" line (fourth from the bottom) provides the UUID of the Arvados collection where the script's output has been saved.  Copy the output identifier and use @arv-ls@ to list the contents of your output collection, and @arv-get@ to download it to the current directory:
 
 <notextile>
-<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">arv-ls d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4</span>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">arv-ls qr1hi-4zz18-n91qrqfp3zivexo</span>
 ./md5sum.txt
-~/tutorial/crunch_scripts$ <span class="userinput">arv-get d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4/ .</span>
+~/tutorial/crunch_scripts$ <span class="userinput">arv-get qr1hi-4zz18-n91qrqfp3zivexo/ .</span>
+0 MiB / 0 MiB 100.0%
 ~/tutorial/crunch_scripts$ <span class="userinput">cat md5sum.txt</span>
-44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/./var-GS000016015-ASM.tsv.bz2
+44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/var-GS000016015-ASM.tsv.bz2
 </code></pre>
 </notextile>
 
index dc67339f7f04e2f7c7387670fb84e85ce3c9dc2a..ada6d1fbab302a40fd74eabd54eac6aae866f1db 100644 (file)
@@ -4,53 +4,53 @@ navsection: userguide
 title: "Uploading data"
 ...
 
-This tutorial describes how to to upload new Arvados data collections using the command line tool @arv-put@.  This example uses a freely available TSV file containing variant annotations from "Personal Genome Project (PGP)":http://www.pgp-hms.org participant "hu599905.":https://my.pgp-hms.org/profile/hu599905
+This tutorial describes how to to upload new Arvados data collections using the command line tool @arv keep put@.
 
 notextile. <div class="spaced-out">
 
-# Begin by installing the "Arvados Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on the system from which you will upload the data (such as your workstation, or a server containing data from your sequencer).  This will install the Arvados file upload tool, @arv-put@.  Alternately, you can log into an Arvados VM (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
-# On system from which you will upload data, configure the environment with the Arvados instance host name and authentication token as decribed in "Getting an API token.":{{site.baseurl}}/user/reference/api-tokens.html  (If you are logged into an Arvados VM, you can skip this step.)
-# Download the following example file.  (If you are uploading your own data, you can skip this step.)
-<notextile>
-<pre><code>~$ <span class="userinput">curl -o var-GS000016015-ASM.tsv.bz2 'https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2'</span>
-  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-100  216M  100  216M    0     0  10.0M      0  0:00:21  0:00:21 --:--:-- 9361k
-</code></pre>
-</notextile>
-# Now upload the file to Keep using @arv-put@:
+{% include 'tutorial_expectations' %}
+
+h3. Upload
+
+To upload a file to Keep using @arv keep put@:
 <notextile>
-<pre><code>~$ <span class="userinput">arv-put var-GS000016015-ASM.tsv.bz2</span>
+<pre><code>~$ <span class="userinput">arv keep put var-GS000016015-ASM.tsv.bz2</span>
 216M / 216M 100.0%
 Collection saved as ...
 qr1hi-4zz18-xxxxxxxxxxxxxxx
 </code></pre>
 </notextile>
 
-* The output value @qr1hi-4zz18-xxxxxxxxxxxxxxx@ is the uuid of the Arvados collection created.
-
-Now visit the Workbench *Dashboard*.  Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu, select your *Home* project.  Your newly uploaded collection should appear near the top of the *Data collections* tab.  The collection locator printed by @arv-put@ will appear under the *name* column.
-
-To move the collection to a different project, check the box at the left of the collection row.  Pull down the *Selection...*<span class="caret"></span> menu near the top of the page tab, and select *Move selected*. This will open a dialog box where you can select a destination project for the collection.  Click a project, then finally the <span class="btn btn-sm btn-primary">Move</span> button.
-
-!{{ site.baseurl }}/images/workbench-move-selected.png!
-
-Click on the *<i class="fa fa-fw fa-archive"></i> Show* button next to the collection's listing on a project page to go to the Workbench page for your collection.  On this page, you can see the collection's contents, download individual files, and set sharing options.
-
-notextile. </div>
+The output value @qr1hi-4zz18-xxxxxxxxxxxxxxx@ is the uuid of the Arvados collection created.
 
-h2(#dir). Putting a directory
+The file used in this example is a freely available TSV file containing variant annotations from "Personal Genome Project (PGP)":http://www.pgp-hms.org participant "hu599905.":https://my.pgp-hms.org/profile/hu599905), downloadable "here":https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2.
 
-If you give @arv-put@ a directory, it will recursively upload the entire directory:
+<notextile><a name="dir"></a></notextile>It is also possible to upload an entire directory with @arv keep put@:
 
 <notextile>
 <pre><code>~$ <span class="userinput">mkdir tmp</span>
 ~$ <span class="userinput">echo "hello alice" > tmp/alice.txt</span>
 ~$ <span class="userinput">echo "hello bob" > tmp/bob.txt</span>
 ~$ <span class="userinput">echo "hello carol" > tmp/carol.txt</span>
-~$ <span class="userinput">arv-put tmp</span>
+~$ <span class="userinput">arv keep put tmp</span>
 0M / 0M 100.0%
 Collection saved as ...
 qr1hi-4zz18-yyyyyyyyyyyyyyy
 </code></pre>
 </notextile>
+
+In both examples, the @arv keep put@ command created a collection. The first collection contains the single uploaded file. The second collection contains the entire uploaded directory.
+
+@arv keep put@ accepts quite a few optional command line arguments, which are described "on the arv subcommands":{{site.baseurl}}/sdk/cli/subcommands.html#arv-keep-put page.
+
+h3. Locate your collection in Workbench
+
+Visit the Workbench *Dashboard*.  Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu, select your *Home* project.  Your newly uploaded collection should appear near the top of the *Data collections* tab.  The collection locator printed by @arv keep put@ will appear under the *name* column.
+
+To move the collection to a different project, check the box at the left of the collection row.  Pull down the *Selection...*<span class="caret"></span> menu near the top of the page tab, and select *Move selected*. This will open a dialog box where you can select a destination project for the collection.  Click a project, then finally the <span class="btn btn-sm btn-primary">Move</span> button.
+
+!{{ site.baseurl }}/images/workbench-move-selected.png!
+
+Click on the *<i class="fa fa-fw fa-archive"></i> Show* button next to the collection's listing on a project page to go to the Workbench page for your collection.  On this page, you can see the collection's contents, download individual files, and set sharing options.
+
+notextile. </div>
index ee9198e7c3d4eca12c182c6b65279bf221cc8087..abd2114302ac5f3595a3a0e0a031383342f601ff 100644 (file)
@@ -8,7 +8,7 @@ RUN apt-get update -qq
 RUN apt-get install -qqy \
     procps postgresql postgresql-server-dev-9.1 apache2 slurm-llnl munge \
     supervisor sudo libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
-    libjson-perl cron
+    libjson-perl cron openssh-server
 
 ADD munge.key /etc/munge/
 RUN chown munge:munge /etc/munge/munge.key && chmod 600 /etc/munge/munge.key
@@ -71,7 +71,6 @@ ADD update-gitolite.rb /usr/local/arvados/
 
 # Supervisor.
 ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
-ADD ssh.sh /usr/local/bin/ssh.sh
 ADD generated/setup.sh /usr/local/bin/setup.sh
 ADD generated/setup-gitolite.sh /usr/local/bin/setup-gitolite.sh
 ADD crunch-dispatch-run.sh /usr/local/bin/crunch-dispatch-run.sh
index babfc4e5c98ea2af0e761cfe2f141d295d0e25cf..59ff352a1889a5edc3fcd7fe27343e889c772231 100644 (file)
@@ -3,3 +3,4 @@ production:
   gitolite_tmp: 'gitolite-tmp'
   arvados_api_host: 'api'
   arvados_api_token: '@@API_SUPERUSER_SECRET@@'
+  arvados_api_host_insecure: true
index cba475988334d6d502d978cff3a1cbd1ee1e428e..7af6afb237e364ffafeceb826c039dfa802b5fd4 100755 (executable)
@@ -8,15 +8,6 @@ export ARVADOS_API_HOST=api
 export ARVADOS_API_HOST_INSECURE=yes
 export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
 
-# All users group
-prefix=`arv --format=uuid user current | cut -d- -f1`
-read -rd $'\000' newgroup <<EOF; arv group create --group "$newgroup"
-{
- "uuid":"$prefix-j7d0g-fffffffffffffff",
- "name":"All users"
-}
-EOF
-
 # Arvados repository object
 all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"
 repo_uuid=`arv --format=uuid repository create --repository '{"name":"arvados","fetch_url":"git@api:arvados.git","push_url":"git@api:arvados.git"}'`
diff --git a/docker/api/ssh.sh b/docker/api/ssh.sh
deleted file mode 100755 (executable)
index 664414b..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-echo $ENABLE_SSH
-
-# Start ssh daemon if requested via the ENABLE_SSH env variable
-if [[ ! "$ENABLE_SSH" =~ (0|false|no|f|^$) ]]; then
-echo "STARTING"
-  /etc/init.d/ssh start
-fi
-
index e85bb72658ee48dbb464a5aa088e0403c7ca1054..b01dc1c11051a723e35f8f56dfc8ce8c0a58d7f3 100644 (file)
@@ -1,15 +1,17 @@
 [program:ssh]
 user=root
-command=/usr/local/bin/ssh.sh
+command=/etc/init.d/ssh start
 startsecs=0
 
 [program:postgres]
 user=postgres
 command=/usr/lib/postgresql/9.1/bin/postgres -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf
+autorestart=true
 
 [program:apache2]
 command=/etc/apache2/foreground.sh
 stopsignal=6
+autorestart=true
 
 [program:munge]
 user=root
@@ -39,3 +41,4 @@ startsecs=0
 [program:crunch-dispatch]
 user=root
 command=/usr/local/bin/crunch-dispatch-run.sh
+autorestart=true
index 779099aac83cf88e2892178da47fa9ca9e39b735..2c46a0d0b5a3c71b8989df9e1c80909fce2830ec 100755 (executable)
@@ -4,6 +4,7 @@ require 'rubygems'
 require 'pp'
 require 'arvados'
 require 'active_support/all'
+require 'yaml'
 
 # This script does the actual gitolite config management on disk.
 #
@@ -34,6 +35,11 @@ gitolite_admin = File.join(File.expand_path(File.dirname(__FILE__)) + '/' + gito
 
 ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
 ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
+if cp_config['arvados_api_host_insecure']
+  ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
+else
+  ENV.delete('ARVADOS_API_HOST_INSECURE')
+end
 
 keys = ''
 
index 31e405bb410213dc798ccbeb9d85b916177369b6..142ba27e3141cb6bb5a47667b99e7167dcba965f 100755 (executable)
@@ -1,6 +1,5 @@
 #!/bin/bash
 
-ENABLE_SSH=false
 DOCKER=`which docker.io`
 
 if [[ "$DOCKER" == "" ]]; then
@@ -22,7 +21,6 @@ function usage {
     echo >&2 "  -v, --vm                      Shell server"
     echo >&2 "  -n, --nameserver              Nameserver"
     echo >&2 "  -k, --keep                    Keep servers"
-    echo >&2 "  --ssh                         Enable SSH access to server containers"
     echo >&2 "  -h, --help                    Display this help and exit"
     echo >&2
     echo >&2 "  If no options are given, the action is applied to all servers."
@@ -65,11 +63,6 @@ function start_container {
     fi
     local image=$5
 
-    if $ENABLE_SSH
-    then
-      args="$args -e ENABLE_SSH=$ENABLE_SSH"
-    fi
-
     `$DOCKER ps |grep -P "$name[^/]" -q`
     if [[ "$?" == "0" ]]; then
       echo "You have a running container with name $name -- skipping."
@@ -86,15 +79,6 @@ function start_container {
     if [ "$?" != "0" -o "$container" = "" ]; then
       echo "Unable to start container"
       exit 1
-    fi
-    if [ "$name" == "compute" -o "$ENABLE_SSH" != "false" ];
-    then
-      ip=$(ip_address $container )
-      echo
-      echo "You can ssh into the container with:"
-      echo
-      echo "    ssh root@$ip"
-      echo
     else
       echo "Started container: $container"
     fi
@@ -155,7 +139,7 @@ function do_start {
 
     # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
     local TEMP=`getopt -o d::s::a::cw::nkvh \
-                  --long doc::,sso::,api::,compute,workbench::,nameserver,keep,vm,help,ssh \
+                  --long doc::,sso::,api::,compute,workbench::,nameserver,keep,vm,help \
                   -n "$0" -- "$@"`
 
     if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
@@ -206,11 +190,6 @@ function do_start {
                 start_keep=true
                 shift
                 ;;
-            --ssh)
-                # ENABLE_SSH is a global variable
-                ENABLE_SSH=true
-                shift
-                ;;
             --)
                 shift
                 break
index 2959d503b048bb74361376eb9d282b92cf2dab16..325b7792a039add3ec4c60d24a38f93f98cfb40f 100644 (file)
@@ -14,12 +14,11 @@ ADD apt.arvados.org.list /etc/apt/sources.list.d/
 RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
 RUN apt-get update -qq
 
-RUN apt-get install -qqy openssh-server apt-utils git curl \
+RUN apt-get install -qqy apt-utils git curl \
              libcurl3 libcurl3-gnutls libcurl4-openssl-dev locales \
              postgresql-server-dev-9.1 python-arvados-python-client
 
 RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
-    /bin/mkdir -p /root/.ssh && \
     /bin/sed -ri 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
     /usr/sbin/locale-gen && \
     curl -L https://get.rvm.io | bash -s stable && \
@@ -36,6 +35,3 @@ RUN /usr/local/rvm/bin/rvm-exec default gem update --system && \
     /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
     /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
     /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile
-
-ADD generated/id_rsa.pub /root/.ssh/authorized_keys
-RUN chown root:root /root/.ssh/authorized_keys
index f3dd90c6c741db3a1a3dd0ec0e98bfbce0323283..d92349c50be7f387faeaa798f91867d0c8b6bf5f 100644 (file)
@@ -148,10 +148,7 @@ $(SSO_GENERATED): $(SSO_GENERATED_IN)
 $(KEEP_GENERATED): $(KEEP_GENERATED_IN)
        $(CONFIG_RB) keep
 
-# The docker build -q option suppresses verbose build output.
-# Necessary to prevent failure on building warehouse; see
-# https://github.com/dotcloud/docker/issues/3172
-DOCKER_BUILD = $(DOCKER) build -q --rm=true
+DOCKER_BUILD = $(DOCKER) build --rm=true
 
 # ============================================================
 # The main Arvados servers: api, doc, workbench, compute
index a79157fdb4235246f411069fe888cccf4c640685..e8f58097d864610e2641142584f061c9c94cefc2 100755 (executable)
@@ -85,7 +85,6 @@ def main options
       config['ARVADOS_USER_NAME'] = user_name
       config['API_HOSTNAME'] = generate_api_hostname
       config['API_WORKBENCH_ADDRESS'] = 'false'
-      config['PUBLIC_KEY_PATH'] = find_or_create_ssh_key(config['API_HOSTNAME'])
       config.each_key do |var|
         config_out.write "#{var}: #{config[var]}\n"
       end
@@ -166,22 +165,6 @@ def docker_ok?(docker_path)
   return system "#{docker_path} images > /dev/null 2>&1"
 end
 
-# find_or_create_ssh_key arvados_name
-#   Returns the SSH public key appropriate for this Arvados instance,
-#   generating one if necessary.
-#
-def find_or_create_ssh_key arvados_name
-  ssh_key_file = "#{ENV['HOME']}/.ssh/arvados_#{arvados_name}_id_rsa"
-  unless File.exists? ssh_key_file
-    system 'ssh-keygen',
-           '-f', ssh_key_file,
-           '-C', "arvados@#{arvados_name}",
-           '-P', ''
-  end
-
-  return "#{ssh_key_file}.pub"
-end
-
 # install_docker
 #   Determines which Docker package is suitable for this Linux distro
 #   and installs it, resolving any dependencies.
index d8bf256124eeeb387f6af39128b47b4ab35f9aac..296bc206ff90560bdc0f16ab4d89887b89372623 100755 (executable)
@@ -72,12 +72,3 @@ Dir.glob(globdir + '/*.in') do |template_file|
     end
   end
 end
-
-# Copy the ssh public key file to base/generated (if a path is given)
-generated_dir = File.join('base/generated')
-Dir.mkdir(generated_dir) unless Dir.exists? generated_dir
-if (!config['PUBLIC_KEY_PATH'].nil? and
-    File.readable? config['PUBLIC_KEY_PATH'])
-  FileUtils.cp(config['PUBLIC_KEY_PATH'],
-               File.join(generated_dir, 'id_rsa.pub'))
-end
index 1dd3889a1e59f39dbcb58181519feed5c2d7a5e5..462115cab0d8b07d7759da951b186e143663acdf 100644 (file)
@@ -18,7 +18,6 @@ RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid
 
 # Supervisor.
 ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
-ADD ssh.sh /usr/local/bin/ssh.sh
 ADD generated/setup.sh /usr/local/bin/setup.sh
 ADD wrapdocker /usr/local/bin/wrapdocker.sh
 
diff --git a/docker/compute/ssh.sh b/docker/compute/ssh.sh
deleted file mode 100755 (executable)
index 664414b..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-echo $ENABLE_SSH
-
-# Start ssh daemon if requested via the ENABLE_SSH env variable
-if [[ ! "$ENABLE_SSH" =~ (0|false|no|f|^$) ]]; then
-echo "STARTING"
-  /etc/init.d/ssh start
-fi
-
index 7fc34fc2c98473b60f74825865f70a73a819eea7..615e55a953606e948f7628d68b6ab8e37bebd390 100644 (file)
@@ -1,8 +1,3 @@
-[program:ssh]
-user=root
-command=/usr/local/bin/ssh.sh
-startsecs=0
-
 [program:munge]
 user=root
 command=/etc/init.d/munge start
index 6ba5bcf3113cd482f763ee1ec2ac34206f73af83..4210ec3e161ad2b1fbaa355d9f6b249a741bf00b 100644 (file)
@@ -1,12 +1,6 @@
 # Configuration for the Rails databases (database names,
 # usernames and passwords).
 
-# Path to a public ssh key you would like to have installed in the
-# root account of the generated images. This is useful for debugging.
-# To enable the SSH server, set the ENABLE_SSH environment variable to
-# true when starting the container.
-PUBLIC_KEY_PATH:
-
 # Username for your Arvados user. This will be used as your shell login name
 # as well as the name for your git repository.
 ARVADOS_USER_NAME:
index c3facfe6f246578db9825ee46f697e1a583ba291..fc6028ea8391b3445e13adc5de4439fe7207ec2d 100755 (executable)
@@ -3,10 +3,5 @@
 read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
 trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
 
-# Start ssh daemon if requested via the ENABLE_SSH env variable
-if [[ ! "$ENABLE_SSH" =~ (0|false|no|f|^$) ]]; then
-  /etc/init.d/ssh start
-fi
-
 source /etc/apache2/envvars
 /usr/sbin/apache2 -D FOREGROUND
index 5dae9a628a88d68ab454cc1e63264ab9ebe711d3..713ef2116a6fe7a7936ce6abc34877f3fc3de348 100644 (file)
@@ -6,14 +6,14 @@ USER root
 RUN apt-get update -qq
 RUN apt-get install -qqy openjdk-7-jre-headless && \
     cd /tmp && \
-    curl --location http://downloads.sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
+    curl --location http://cache.arvados.org/sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
     tar xjf bwa-0.7.9a.tar.bz2 && \
     cd bwa-0.7.9a && \
     make && \
     (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
     rm -r /tmp/bwa-0.7.9a* && \
     cd /tmp && \
-    curl --location http://downloads.sourceforge.net/project/samtools/samtools/0.1.19/samtools-0.1.19.tar.bz2 -o samtools-0.1.19.tar.bz2 && \
+    curl --location http://cache.arvados.org/sourceforge.net/project/samtools/samtools/0.1.19/samtools-0.1.19.tar.bz2 -o samtools-0.1.19.tar.bz2 && \
     tar xjf samtools-0.1.19.tar.bz2 && \
     cd samtools-0.1.19 && \
     make && \
index e140f277529498e1a4d9ce91f31d988f6b722b5f..313dd3662a2e80192db76e69e3243f9d56a2b90f 100644 (file)
@@ -3,7 +3,8 @@ MAINTAINER Brett Smith <brett@curoverse.com>
 
 # Install dependencies and set up system.
 # The FUSE packages help ensure that we can install the Python SDK (arv-mount).
-RUN /usr/bin/apt-get install -q -y python-dev python-llfuse python-pip \
+RUN /usr/bin/apt-get install -q -y \
+      python-dev python-llfuse python-pip python-virtualenv \
       libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl dtrx \
       fuse libattr1-dev libfuse-dev && \
     /usr/sbin/adduser --disabled-password \
index 0201b4925376b9c1504f68fe9d54728e7f685110..5e0fd76b04955b8fb94f23ed327a6ac4439872f1 100644 (file)
@@ -12,7 +12,7 @@ RUN apt-get install -qqy \
         libapr1-dev libaprutil1-dev
 
 RUN cd /usr/src/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --auto
+    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --auto --languages ruby
 
 RUN cd /usr/src/arvados/services/api && \
     /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --snippet > /etc/apache2/conf.d/passenger
index 539ff942dd0db07ebd1e30296ad40ac0eaa13896..8235159999934244e319db16c8b7e472f58c9390 100644 (file)
@@ -7,7 +7,7 @@ RUN apt-get update -qq
 RUN apt-get install -qqy \
     python-pip python-pyvcf python-gflags python-google-api-python-client \
     python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse \
-    crunchstat python-arvados-fuse cron vim supervisor
+    crunchstat python-arvados-fuse cron vim supervisor openssh-server
 
 ADD fuse.conf /etc/fuse.conf
 RUN chmod 644 /etc/fuse.conf
index 2815201614ab2bfb986779da7e4346526df95905..03beb4b06b14745f3f2237e40df22504408187cc 100755 (executable)
@@ -5,8 +5,6 @@ USER_NAME="@@ARVADOS_USER_NAME@@"
 useradd $USER_NAME -s /bin/bash
 mkdir /home/$USER_NAME/.ssh -p
 
-cp ~root/.ssh/authorized_keys /home/$USER_NAME/.ssh/authorized_keys
-
 # Install our token
 mkdir -p /home/$USER_NAME/.config/arvados;
 echo "ARVADOS_API_HOST=api" >> /home/$USER_NAME/.config/arvados/settings.conf
index 6563b547df45025e5f2786985dec74a2dd46dcad..64f86b19e61c0c5989f738f3e3b221032553905b 100644 (file)
@@ -1,8 +1,3 @@
-[program:ssh]
-user=root
-command=/usr/local/bin/ssh.sh
-startsecs=0
-
 [program:munge]
 user=root
 command=/etc/init.d/munge start
@@ -10,5 +5,3 @@ command=/etc/init.d/munge start
 [program:slurm]
 user=root
 command=/etc/init.d/slurm-llnl start
-
-
index c3facfe6f246578db9825ee46f697e1a583ba291..fc6028ea8391b3445e13adc5de4439fe7207ec2d 100755 (executable)
@@ -3,10 +3,5 @@
 read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
 trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
 
-# Start ssh daemon if requested via the ENABLE_SSH env variable
-if [[ ! "$ENABLE_SSH" =~ (0|false|no|f|^$) ]]; then
-  /etc/init.d/ssh start
-fi
-
 source /etc/apache2/envvars
 /usr/sbin/apache2 -D FOREGROUND
index 37650ab5daf9e8f929cad41bec98e011d34b1835..5475ff0a199e9b29bc6891968fa2f30f32b85770 100755 (executable)
@@ -3,13 +3,8 @@
 read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
 trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
 
-# Start ssh daemon if requested via the ENABLE_SSH env variable
-if [[ ! "$ENABLE_SSH" =~ (0|false|no|f|^$) ]]; then
-  /etc/init.d/ssh start
-fi
-
 # Override the default API server address if necessary.
-if [[ "$API_PORT_443_TCP_ADDR" != "" ]]; then 
+if [[ "$API_PORT_443_TCP_ADDR" != "" ]]; then
   sed -i "s/localhost:9900/$API_PORT_443_TCP_ADDR/" /usr/src/arvados/apps/workbench/config/application.yml
 fi
 
index beb0627be7380d123214ad82578b61d3de9b0f74..51463cf587fa994af06dbdf73a8b7d663db77f0c 100644 (file)
@@ -1,2 +1,3 @@
 arvados-cli*gem
 tmp
+Gemfile.lock
diff --git a/sdk/cli/Gemfile.lock b/sdk/cli/Gemfile.lock
deleted file mode 100644 (file)
index 2db90e0..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-PATH
-  remote: .
-  specs:
-    arvados-cli (0.1.20140922151840)
-      activesupport (~> 3.2, >= 3.2.13)
-      andand (~> 1.3, >= 1.3.3)
-      arvados (~> 0.1, >= 0.1.0)
-      curb (~> 0.8)
-      google-api-client (~> 0.6, >= 0.6.3)
-      json (~> 1.7, >= 1.7.7)
-      jwt (>= 0.1.5, < 1.0.0)
-      oj (~> 2.0, >= 2.0.3)
-      trollop (~> 2.0)
-
-GEM
-  remote: https://rubygems.org/
-  specs:
-    activesupport (3.2.19)
-      i18n (~> 0.6, >= 0.6.4)
-      multi_json (~> 1.0)
-    addressable (2.3.6)
-    andand (1.3.3)
-    arvados (0.1.20140117103233)
-      activesupport (>= 3.2.13)
-      google-api-client (>= 0.6.3)
-      json (>= 1.7.7)
-    autoparse (0.3.3)
-      addressable (>= 2.3.1)
-      extlib (>= 0.9.15)
-      multi_json (>= 1.0.0)
-    curb (0.8.6)
-    extlib (0.9.16)
-    faraday (0.9.0)
-      multipart-post (>= 1.2, < 3)
-    google-api-client (0.7.1)
-      addressable (>= 2.3.2)
-      autoparse (>= 0.3.3)
-      extlib (>= 0.9.15)
-      faraday (>= 0.9.0)
-      jwt (>= 0.1.5)
-      launchy (>= 2.1.1)
-      multi_json (>= 1.0.0)
-      retriable (>= 1.4)
-      signet (>= 0.5.0)
-      uuidtools (>= 2.1.0)
-    i18n (0.6.11)
-    json (1.8.1)
-    jwt (0.1.13)
-      multi_json (>= 1.5)
-    launchy (2.4.2)
-      addressable (~> 2.3)
-    minitest (5.2.3)
-    multi_json (1.10.1)
-    multipart-post (2.0.0)
-    oj (2.10.2)
-    rake (10.1.1)
-    retriable (1.4.1)
-    signet (0.5.1)
-      addressable (>= 2.2.3)
-      faraday (>= 0.9.0.rc5)
-      jwt (>= 0.1.5)
-      multi_json (>= 1.0.0)
-    trollop (2.0)
-    uuidtools (2.1.5)
-
-PLATFORMS
-  ruby
-
-DEPENDENCIES
-  arvados-cli!
-  minitest (>= 5.0.0)
-  rake
index 0d35d53f9d2b924ea8b583fda5b5a3a682be09fb..820d142e26ec08374d54ce43772b56aba8dab250 100755 (executable)
@@ -86,6 +86,7 @@ use POSIX ':sys_wait_h';
 use POSIX qw(strftime);
 use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
 use Arvados;
+use Cwd qw(realpath);
 use Data::Dumper;
 use Digest::MD5 qw(md5_hex);
 use Getopt::Long;
@@ -197,6 +198,16 @@ $Job->{'runtime_constraints'} ||= {};
 $Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
 my $max_ncpus = $Job->{'runtime_constraints'}->{'max_tasks_per_node'};
 
+my $gem_versions = `gem list --quiet arvados-cli 2>/dev/null`;
+if ($? == 0) {
+  $gem_versions =~ s/^arvados-cli \(/ with arvados-cli Gem version(s) /;
+  chomp($gem_versions);
+  chop($gem_versions);  # Closing parentheses
+} else {
+  $gem_versions = "";
+}
+Log(undef,
+    "running from " . ((-e $0) ? realpath($0) : "stdin") . $gem_versions);
 
 Log (undef, "check slurm allocation");
 my @slot;
@@ -305,7 +316,6 @@ my @jobstep_tomerge = ();
 my $jobstep_tomerge_level = 0;
 my $squeue_checked;
 my $squeue_kill_checked;
-my $output_in_keep = 0;
 my $latest_refresh = scalar time;
 
 
@@ -335,13 +345,9 @@ if (!$have_slurm)
   must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
 }
 
-
-my $build_script;
-do {
-  local $/ = undef;
-  $build_script = <DATA>;
-};
+my $build_script = handle_readall(\*DATA);
 my $nodelist = join(",", @node);
+my $git_tar_count = 0;
 
 if (!defined $no_clear_tmp) {
   # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
@@ -363,8 +369,51 @@ if (!defined $no_clear_tmp) {
   Log (undef, "Cleanup command exited ".exit_status_s($?));
 }
 
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my ($docker_locator, $docker_stream, $docker_hash);
+if ($docker_locator = $Job->{docker_image_locator}) {
+  ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
+  if (!$docker_hash)
+  {
+    croak("No Docker image hash found from locator $docker_locator");
+  }
+  $docker_stream =~ s/^\.//;
+  my $docker_install_script = qq{
+if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
+    arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
+fi
+};
+  my $docker_pid = fork();
+  if ($docker_pid == 0)
+  {
+    srun (["srun", "--nodelist=" . join(',', @node)],
+          ["/bin/sh", "-ec", $docker_install_script]);
+    exit ($?);
+  }
+  while (1)
+  {
+    last if $docker_pid == waitpid (-1, WNOHANG);
+    freeze_if_want_freeze ($docker_pid);
+    select (undef, undef, undef, 0.1);
+  }
+  if ($? != 0)
+  {
+    croak("Installing Docker image from $docker_locator exited "
+          .exit_status_s($?));
+  }
+
+  if ($Job->{arvados_sdk_version}) {
+    # The job also specifies an Arvados SDK version.  Add the SDKs to the
+    # tar file for the build script to install.
+    Log(undef, sprintf("Packing Arvados SDK version %s for installation",
+                       $Job->{arvados_sdk_version}));
+    add_git_archive("git", "--git-dir=$git_dir", "archive",
+                    "--prefix=.arvados.sdk/",
+                    $Job->{arvados_sdk_version}, "sdk");
+  }
+}
 
-my $git_archive;
 if (!defined $git_dir && $Job->{'script_version'} =~ m{^/}) {
   # If script_version looks like an absolute path, *and* the --git-dir
   # argument was not given -- which implies we were not invoked by
@@ -518,12 +567,10 @@ else {
   }
 
   $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
-  $git_archive = `$gitcmd archive ''\Q$commit\E`;
-  if ($?) {
-    croak("Error: $gitcmd archive exited ".exit_status_s($?));
-  }
+  add_git_archive("$gitcmd archive ''\Q$commit\E");
 }
 
+my $git_archive = combined_git_archive();
 if (!defined $git_archive) {
   Log(undef, "Skip install phase (no git archive)");
   if ($have_slurm) {
@@ -553,48 +600,10 @@ else {
   }
   my $install_exited = $?;
   Log (undef, "Install script exited ".exit_status_s($install_exited));
-  exit (1) if $install_exited != 0;
-}
-
-if (!$have_slurm)
-{
-  # Grab our lock again (we might have deleted and re-created CRUNCH_TMP above)
-  must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
-}
-
-# If this job requires a Docker image, install that.
-my $docker_bin = "/usr/bin/docker.io";
-my ($docker_locator, $docker_stream, $docker_hash);
-if ($docker_locator = $Job->{docker_image_locator}) {
-  ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
-  if (!$docker_hash)
-  {
-    croak("No Docker image hash found from locator $docker_locator");
-  }
-  $docker_stream =~ s/^\.//;
-  my $docker_install_script = qq{
-if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
-    arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
-fi
-};
-  my $docker_pid = fork();
-  if ($docker_pid == 0)
-  {
-    srun (["srun", "--nodelist=" . join(',', @node)],
-          ["/bin/sh", "-ec", $docker_install_script]);
-    exit ($?);
-  }
-  while (1)
-  {
-    last if $docker_pid == waitpid (-1, WNOHANG);
-    freeze_if_want_freeze ($docker_pid);
-    select (undef, undef, undef, 0.1);
-  }
-  if ($? != 0)
-  {
-    croak("Installing Docker image from $docker_locator exited "
-          .exit_status_s($?));
+  foreach my $tar_filename (map { tar_filename_n($_); } (1..$git_tar_count)) {
+    unlink($tar_filename);
   }
+  exit (1) if $install_exited != 0;
 }
 
 foreach (qw (script script_version script_parameters runtime_constraints))
@@ -823,7 +832,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
     {
       $main::please_info = 0;
       freeze();
-      collate_output();
+      create_output_collection();
       save_meta(1);
       update_progress_stats();
     }
@@ -885,7 +894,7 @@ while (%proc)
     $main::please_continue = 0;
     goto THISROUND;
   }
-  $main::please_info = 0, freeze(), collate_output(), save_meta(1) if $main::please_info;
+  $main::please_info = 0, freeze(), create_output_collection(), save_meta(1) if $main::please_info;
   readfrompipes ();
   if (!reapchildren())
   {
@@ -922,28 +931,14 @@ goto ONELEVEL if !defined $main::success;
 
 release_allocation();
 freeze();
-my $collated_output = &collate_output();
+my $collated_output = &create_output_collection();
 
 if (!$collated_output) {
-  Log(undef, "output undef");
+  Log (undef, "Failed to write output collection");
 }
 else {
-  eval {
-    open(my $orig_manifest, '-|', 'arv-get', $collated_output)
-        or die "failed to get collated manifest: $!";
-    my $orig_manifest_text = '';
-    while (my $manifest_line = <$orig_manifest>) {
-      $orig_manifest_text .= $manifest_line;
-    }
-    my $output = api_call("collections/create", collection => {
-      'manifest_text' => $orig_manifest_text});
-    Log(undef, "output uuid " . $output->{uuid});
-    Log(undef, "output hash " . $output->{portable_data_hash});
-    $Job->update_attributes('output' => $output->{portable_data_hash});
-  };
-  if ($@) {
-    Log (undef, "Failed to register output manifest: $@");
-  }
+  Log(undef, "output hash " . $collated_output);
+  $Job->update_attributes('output' => $collated_output);
 }
 
 Log (undef, "finish");
@@ -1275,14 +1270,24 @@ sub fetch_block
   return $output_block;
 }
 
-sub collate_output
+# create_output_collections generates a new collection containing the
+# output of each successfully completed task, and returns the
+# portable_data_hash for the new collection.
+#
+sub create_output_collection
 {
   Log (undef, "collate");
 
   my ($child_out, $child_in);
-  my $pid = open2($child_out, $child_in, 'arv-put', '--raw',
-                  '--retries', retry_count());
-  my $joboutput;
+  my $pid = open2($child_out, $child_in, 'python', '-c',
+                  'import arvados; ' .
+                  'import sys; ' .
+                  'print arvados.api()' .
+                  '.collections()' .
+                  '.create(body={"manifest_text":sys.stdin.read()})' .
+                  '.execute()["portable_data_hash"]'
+      );
+
   for (@jobstep)
   {
     next if (!exists $_->{'arvados_task'}->{'output'} ||
@@ -1290,17 +1295,10 @@ sub collate_output
     my $output = $_->{'arvados_task'}->{output};
     if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
     {
-      $output_in_keep ||= $output =~ / [0-9a-f]{32}\S*\+K/;
       print $child_in $output;
     }
-    elsif (@jobstep == 1)
-    {
-      $joboutput = $output;
-      last;
-    }
     elsif (defined (my $outblock = fetch_block ($output)))
     {
-      $output_in_keep ||= $outblock =~ / [0-9a-f]{32}\S*\+K/;
       print $child_in $outblock;
     }
     else
@@ -1311,15 +1309,14 @@ sub collate_output
   }
   $child_in->close;
 
-  if (!defined $joboutput) {
-    my $s = IO::Select->new($child_out);
-    if ($s->can_read(120)) {
-      sysread($child_out, $joboutput, 64 * 1024 * 1024);
-      chomp($joboutput);
-      # TODO: Ensure exit status == 0.
-    } else {
-      Log (undef, "timed out reading from 'arv-put'");
-    }
+  my $joboutput;
+  my $s = IO::Select->new($child_out);
+  if ($s->can_read(120)) {
+    sysread($child_out, $joboutput, 64 * 1024 * 1024);
+    chomp($joboutput);
+    # TODO: Ensure exit status == 0.
+  } else {
+    Log (undef, "timed out while creating output collection");
   }
   # TODO: kill $pid instead of waiting, now that we've decided to
   # ignore further output.
@@ -1469,7 +1466,7 @@ sub croak
   my $message = "@_ at $file line $line\n";
   Log (undef, $message);
   freeze() if @jobstep_todo;
-  collate_output() if @jobstep_todo;
+  create_output_collection() if @jobstep_todo;
   cleanup();
   save_meta();
   die;
@@ -1521,7 +1518,7 @@ sub freeze_if_want_freeze
       }
     }
     freeze();
-    collate_output();
+    create_output_collection();
     cleanup();
     save_meta();
     exit 1;
@@ -1702,7 +1699,7 @@ sub api_call {
     if ($next_try_at < time) {
       $retry_msg = "Retrying.";
     } else {
-      my $next_try_fmt = strftime("%Y-%m-%d %H:%M:%S", $next_try_at);
+      my $next_try_fmt = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($next_try_at);
       $retry_msg = "Retrying at $next_try_fmt.";
     }
     Log(undef, "API method $method_name failed: $errmsg. $retry_msg");
@@ -1728,17 +1725,87 @@ sub exit_status_s {
   return $s;
 }
 
+sub handle_readall {
+  # Pass in a glob reference to a file handle.
+  # Read all its contents and return them as a string.
+  my $fh_glob_ref = shift;
+  local $/ = undef;
+  return <$fh_glob_ref>;
+}
+
+sub tar_filename_n {
+  my $n = shift;
+  return sprintf("%s/git.%s.%d.tar", $ENV{CRUNCH_TMP}, $job_id, $n);
+}
+
+sub add_git_archive {
+  # Pass in a git archive command as a string or list, a la system().
+  # This method will save its output to be included in the archive sent to the
+  # build script.
+  my $git_input;
+  $git_tar_count++;
+  if (!open(GIT_ARCHIVE, ">", tar_filename_n($git_tar_count))) {
+    croak("Failed to save git archive: $!");
+  }
+  my $git_pid = open2(">&GIT_ARCHIVE", $git_input, @_);
+  close($git_input);
+  waitpid($git_pid, 0);
+  close(GIT_ARCHIVE);
+  if ($?) {
+    croak("Failed to save git archive: git exited " . exit_status_s($?));
+  }
+}
+
+sub combined_git_archive {
+  # Combine all saved tar archives into a single archive, then return its
+  # contents in a string.  Return undef if no archives have been saved.
+  if ($git_tar_count < 1) {
+    return undef;
+  }
+  my $base_tar_name = tar_filename_n(1);
+  foreach my $tar_to_append (map { tar_filename_n($_); } (2..$git_tar_count)) {
+    my $tar_exit = system("tar", "-Af", $base_tar_name, $tar_to_append);
+    if ($tar_exit != 0) {
+      croak("Error preparing build archive: tar -A exited " .
+            exit_status_s($tar_exit));
+    }
+  }
+  if (!open(GIT_TAR, "<", $base_tar_name)) {
+    croak("Could not open build archive: $!");
+  }
+  my $tar_contents = handle_readall(\*GIT_TAR);
+  close(GIT_TAR);
+  return $tar_contents;
+}
+
 __DATA__
 #!/usr/bin/perl
-
-# checkout-and-build
+#
+# This is crunch-job's internal dispatch script.  crunch-job running on the API
+# server invokes this script on individual compute nodes, or localhost if we're
+# running a job locally.  It gets called in two modes:
+#
+# * No arguments: Installation mode.  Read a tar archive from the DATA
+#   file handle; it includes the Crunch script's source code, and
+#   maybe SDKs as well.  Those should be installed in the proper
+#   locations.  This runs outside of any Docker container, so don't try to
+#   introspect Crunch's runtime environment.
+#
+# * With arguments: Crunch script run mode.  This script should set up the
+#   environment, then run the command specified in the arguments.  This runs
+#   inside any Docker container.
 
 use Fcntl ':flock';
 use File::Path qw( make_path remove_tree );
+use POSIX qw(getcwd);
+
+# Map SDK subdirectories to the path environments they belong to.
+my %SDK_ENVVARS = ("perl/lib" => "PERLLIB", "ruby/lib" => "RUBYLIB");
 
 my $destdir = $ENV{"CRUNCH_SRC"};
 my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
 my $repo = $ENV{"CRUNCH_SRC_URL"};
+my $install_dir = $ENV{"CRUNCH_INSTALL"} || (getcwd() . "/opt");
 my $job_work = $ENV{"JOB_WORK"};
 my $task_work = $ENV{"TASK_WORK"};
 
@@ -1753,43 +1820,110 @@ if ($task_work) {
   remove_tree($task_work, {keep_root => 1});
 }
 
-my @git_archive_data = <DATA>;
-if (!@git_archive_data) {
-  # Nothing to extract -> nothing to install.
-  run_argv_and_exit();
+open(STDOUT_ORIG, ">&", STDOUT);
+open(STDERR_ORIG, ">&", STDERR);
+open(STDOUT, ">>", "$destdir.log");
+open(STDERR, ">&", STDOUT);
+
+### Crunch script run mode
+if (@ARGV) {
+  # We want to do routine logging during task 0 only.  This gives the user
+  # the information they need, but avoids repeating the information for every
+  # task.
+  my $Log;
+  if ($ENV{TASK_SEQUENCE} eq "0") {
+    $Log = sub {
+      my $msg = shift;
+      printf STDERR_ORIG "[Crunch] $msg\n", @_;
+    };
+  } else {
+    $Log = sub { };
+  }
+
+  my $python_src = "$install_dir/python";
+  my $venv_dir = "$job_work/.arvados.venv";
+  my $venv_built = -e "$venv_dir/bin/activate";
+  if ((!$venv_built) and (-d $python_src) and can_run("virtualenv")) {
+    shell_or_die("virtualenv", "--quiet", "--system-site-packages",
+                 "--python=python2.7", $venv_dir);
+    shell_or_die("$venv_dir/bin/pip", "--quiet", "install", $python_src);
+    $venv_built = 1;
+    $Log->("Built Python SDK virtualenv");
+  }
+
+  if ($venv_built) {
+    $Log->("Running in Python SDK virtualenv");
+    my $orig_argv = join(" ", map { quotemeta($_); } @ARGV);
+    @ARGV = ("/bin/sh", "-ec",
+             ". \Q$venv_dir/bin/activate\E; exec $orig_argv");
+  } elsif (-d $python_src) {
+    $Log->("Warning: virtualenv not found inside Docker container default " +
+           "\$PATH. Can't install Python SDK.");
+  }
+
+  while (my ($sdk_dir, $sdk_envkey) = each(%SDK_ENVVARS)) {
+    my $sdk_path = "$install_dir/$sdk_dir";
+    if (-d $sdk_path) {
+      if ($ENV{$sdk_envkey}) {
+        $ENV{$sdk_envkey} = "$sdk_path:" . $ENV{$sdk_envkey};
+      } else {
+        $ENV{$sdk_envkey} = $sdk_path;
+      }
+      $Log->("Arvados SDK added to %s", $sdk_envkey);
+    }
+  }
+
+  close(STDOUT);
+  close(STDERR);
+  open(STDOUT, ">&", STDOUT_ORIG);
+  open(STDERR, ">&", STDERR_ORIG);
+  exec(@ARGV);
+  die "Cannot exec `@ARGV`: $!";
 }
 
+### Installation mode
 open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
 flock L, LOCK_EX;
 if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
   # This version already installed -> nothing to do.
-  run_argv_and_exit();
+  exit(0);
 }
 
 unlink "$destdir.commit";
-open STDERR_ORIG, ">&STDERR";
-open STDOUT, ">", "$destdir.log";
-open STDERR, ">&STDOUT";
-
 mkdir $destdir;
-open TARX, "|-", "tar", "-C", $destdir, "-xf", "-";
-print TARX @git_archive_data;
+open TARX, "|-", "tar", "-xC", $destdir;
+{
+  local $/ = undef;
+  print TARX <DATA>;
+}
 if(!close(TARX)) {
-  die "'tar -C $destdir -xf -' exited $?: $!";
+  die "'tar -xC $destdir' exited $?: $!";
 }
 
-my $pwd;
-chomp ($pwd = `pwd`);
-my $install_dir = $ENV{"CRUNCH_INSTALL"} || "$pwd/opt";
 mkdir $install_dir;
 
-for my $src_path ("$destdir/arvados/sdk/python") {
-  if (-d $src_path) {
-    shell_or_die ("virtualenv", $install_dir);
-    shell_or_die ("cd $src_path && ./build.sh && $install_dir/bin/python setup.py install");
+my $sdk_root = "$destdir/.arvados.sdk/sdk";
+if (-d $sdk_root) {
+  foreach my $sdk_lang (("python",
+                         map { (split /\//, $_, 2)[0]; } keys(%SDK_ENVVARS))) {
+    if (-d "$sdk_root/$sdk_lang") {
+      if (!rename("$sdk_root/$sdk_lang", "$install_dir/$sdk_lang")) {
+        die "Failed to install $sdk_lang SDK: $!";
+      }
+    }
   }
 }
 
+my $python_dir = "$install_dir/python";
+if ((-d $python_dir) and can_run("python2.7") and
+    (system("python2.7", "$python_dir/setup.py", "--quiet", "egg_info") != 0)) {
+  # egg_info failed, probably when it asked git for a build tag.
+  # Specify no build tag.
+  open(my $pysdk_cfg, ">>", "$python_dir/setup.cfg");
+  print $pysdk_cfg "\n[egg_info]\ntag_build =\n";
+  close($pysdk_cfg);
+}
+
 if (-e "$destdir/crunch_scripts/install") {
     shell_or_die ("$destdir/crunch_scripts/install", $install_dir);
 } elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {
@@ -1807,16 +1941,12 @@ if ($commit) {
 
 close L;
 
-run_argv_and_exit();
-
-sub run_argv_and_exit
-{
-  if (@ARGV) {
-    exec(@ARGV);
-    die "Cannot exec `@ARGV`: $!";
-  } else {
-    exit 0;
-  }
+sub can_run {
+  my $command_name = shift;
+  open(my $which, "-|", "which", $command_name);
+  while (<$which>) { }
+  close($which);
+  return ($? == 0);
 }
 
 sub shell_or_die
index c513be098674318d4604b2c54bd286f2974dfb65..18bef403b761f52701fdc86b2919dac44de59e13 100644 (file)
@@ -1,11 +1,14 @@
 require 'minitest/autorun'
 require 'digest/md5'
+require 'active_support/core_ext'
 
 class TestCollectionCreate < Minitest::Test
   def setup
   end
 
   def test_small_collection
+    skip "Waiting unitl #4534 is implemented"
+
     uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s
     out, err = capture_subprocess_io do
       assert_arv('--format', 'uuid', 'collection', 'create', '--collection', {
@@ -13,7 +16,7 @@ class TestCollectionCreate < Minitest::Test
                    manifest_text: foo_manifest
                  }.to_json)
     end
-    assert_equal uuid+"\n", out
+    assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
     assert_equal '', err
     $stderr.puts err
   end
index 3d378e740c10f476df1d76a0f397276cd06f1fc6..67dd399a2456fe4a7c2a2a2cf4d86401d409e6d6 100644 (file)
@@ -30,10 +30,14 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_dev_stdout
+    skip "Waiting unitl #4534 is implemented"
+
     test_file_to_stdout('/dev/stdout')
   end
 
   def test_file_to_stdout(specify_stdout_as='-')
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as
     end
@@ -42,6 +46,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'
@@ -52,30 +58,34 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file_no_overwrite_file
+    skip "Waiting unitl #4534 is implemented"
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
     out, err = capture_subprocess_io do
       assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
     end
-    assert_match /^ERROR:/, err
+    assert_match /Error:/, err
     assert_equal '', out
     assert_equal 'baz', IO.read('tmp/foo')
   end
 
   def test_file_to_file_no_overwrite_file_in_dir
+    skip "Waiting unitl #4534 is implemented"
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
     out, err = capture_subprocess_io do
       assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
     end
-    assert_match /^ERROR:/, err
+    assert_match /Error:/, err
     assert_equal '', out
     assert_equal 'baz', IO.read('tmp/foo')
   end
 
   def test_file_to_file_force_overwrite
+    skip "Waiting unitl #4534 is implemented"
+
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
@@ -89,6 +99,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file_skip_existing
+    skip "Waiting unitl #4534 is implemented"
+
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
@@ -102,6 +114,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_dir
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'
@@ -128,22 +142,28 @@ class TestArvGet < Minitest::Test
   end
 
   def test_nonexistent_block
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6'
     end
     assert_equal '', out
-    assert_match /^ERROR:/, err
+    assert_match /Error:/, err
   end
 
   def test_nonexistent_manifest
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6/', 'tmp/'
     end
     assert_equal '', out
-    assert_match /^ERROR:/, err
+    assert_match /Error:/, err
   end
 
   def test_manifest_root_to_dir
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'
@@ -154,6 +174,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_manifest_root_to_dir_noslash
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'
@@ -164,6 +186,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_display_md5sum
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
@@ -174,6 +198,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_md5sum_nowrite
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
@@ -184,6 +210,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_sha1_nowrite
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'
@@ -194,16 +222,22 @@ class TestArvGet < Minitest::Test
   end
 
   def test_block_to_file
+    skip "Waiting unitl #4534 is implemented"
+
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator, 'tmp/foo'
     end
     assert_equal '', err
     assert_equal '', out
-    assert_equal foo_manifest, IO.read('tmp/foo')
+
+    digest = Digest::MD5.hexdigest('foo')
+    !(IO.read('tmp/foo')).gsub!( /^(. #{digest}+3)(.*)( 0:3:foo)$/).nil?
   end
 
   def test_create_directory_tree
+    skip "Waiting unitl #4534 is implemented"
+
     `rm -rf ./tmp/arv-get-test/`
     Dir.mkdir './tmp/arv-get-test'
     out, err = capture_subprocess_io do
@@ -215,6 +249,8 @@ class TestArvGet < Minitest::Test
   end
 
   def test_create_partial_directory_tree
+    skip "Waiting unitl #4534 is implemented"
+
     `rm -rf ./tmp/arv-get-test/`
     Dir.mkdir './tmp/arv-get-test'
     out, err = capture_subprocess_io do
@@ -240,10 +276,6 @@ class TestArvGet < Minitest::Test
                  "should exit #{if expect then 0 else 'non-zero' end}")
   end
 
-  def foo_manifest
-    ". #{Digest::MD5.hexdigest('foo')}+3 0:3:foo\n"
-  end
-
   def remove_tmp_foo
     begin
       File.unlink('tmp/foo')
index 27809aadedc0d301d458730b8e6e479d3f5fb6a0..73513db56cb17ee5f6d88d151205f437e6d22107 100644 (file)
@@ -22,6 +22,8 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_stdin
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       r,w = IO.pipe
       wpid = fork do
@@ -39,6 +41,8 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_file
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--raw', './tmp/foo')
     end
@@ -48,6 +52,8 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_empty_file
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--raw', './tmp/empty_file')
     end
@@ -77,15 +83,19 @@ class TestArvPut < Minitest::Test
   end
 
   def test_filename_arg_with_empty_file
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--filename', 'foo', './tmp/empty_file')
     end
     $stderr.write err
     assert_match '', err
-    assert_equal "aa4f15cbf013142a7d98b1e273f9c661+45\n", out
+    assert match_collection_uuid(out)
   end
 
   def test_as_stream
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--as-stream', './tmp/foo')
     end
@@ -95,20 +105,24 @@ class TestArvPut < Minitest::Test
   end
 
   def test_progress
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--manifest', '--progress', './tmp/foo')
     end
     assert_match /%/, err
-    assert_equal foo_manifest_locator+"\n", out
+    assert match_collection_uuid(out)
   end
 
   def test_batch_progress
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       assert arv_put('--manifest', '--batch-progress', './tmp/foo')
     end
     assert_match /: 0 written 3 total/, err
     assert_match /: 3 written 3 total/, err
-    assert_equal foo_manifest_locator+"\n", out
+    assert match_collection_uuid(out)
   end
 
   def test_progress_and_batch_progress
@@ -122,14 +136,20 @@ class TestArvPut < Minitest::Test
   end
 
   def test_read_from_implicit_stdin
+    skip "Waiting unitl #4534 is implemented"
+
     test_read_from_stdin(specify_stdin_as='--manifest')
   end
 
   def test_read_from_dev_stdin
+    skip "Waiting unitl #4534 is implemented"
+
     test_read_from_stdin(specify_stdin_as='/dev/stdin')
   end
 
   def test_read_from_stdin(specify_stdin_as='-')
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       r,w = IO.pipe
       wpid = fork do
@@ -144,20 +164,26 @@ class TestArvPut < Minitest::Test
     end
     $stderr.write err
     assert_match '', err
-    assert_equal foo_manifest_locator+"\n", out
+    assert match_collection_uuid(out)
   end
 
   def test_read_from_implicit_stdin_implicit_manifest
+    skip "Waiting unitl #4534 is implemented"
+
     test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,
                                            expect_filename='stdin')
   end
 
   def test_read_from_dev_stdin_implicit_manifest
+    skip "Waiting unitl #4534 is implemented"
+
     test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')
   end
 
   def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',
                                              expect_filename=nil)
+    skip "Waiting unitl #4534 is implemented"
+
     expect_filename = expect_filename || specify_stdin_as.split('/').last
     out, err = capture_subprocess_io do
       r,w = IO.pipe
@@ -174,8 +200,7 @@ class TestArvPut < Minitest::Test
     end
     $stderr.write err
     assert_match '', err
-    assert_equal(foo_manifest_locator(expect_filename)+"\n",
-                 out)
+    assert match_collection_uuid(out)
   end
 
   protected
@@ -191,4 +216,8 @@ class TestArvPut < Minitest::Test
     Digest::MD5.hexdigest(foo_manifest(filename)) +
       "+#{foo_manifest(filename).length}"
   end
+
+  def match_collection_uuid(uuid)
+    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(uuid)
+  end
 end
index cac89b37bc0555c4929c6efadf873c32aed01297..8c8d1d8331ae05fcbda64a65289732188c66bcd8 100644 (file)
@@ -5,6 +5,8 @@ class TestRunPipelineInstance < Minitest::Test
   end
 
   def test_run_pipeline_instance_get_help
+    skip "Waiting unitl #4534 is implemented"
+
     out, err = capture_subprocess_io do
       system ('arv-run-pipeline-instance -h')
     end
index 0e6aa2b1561f8b1ca5e85da9062efbfd22f72a42..a5a1c94fff29227e0944afcae08383529cfe0b33 100644 (file)
@@ -9,6 +9,8 @@ end
 class TestArvTag < Minitest::Test
 
   def test_no_args
+    skip "Waiting unitl #4534 is implemented"
+
     # arv-tag exits with failure if run with no args
     out, err = capture_subprocess_io do
       assert_equal false, arv_tag
@@ -19,6 +21,8 @@ class TestArvTag < Minitest::Test
 
   # Test adding and removing a single tag on a single object.
   def test_single_tag_single_obj
+    skip "TBD"
+
     # Add a single tag.
     tag_uuid, err = capture_subprocess_io do
       assert arv_tag '--short', 'add', 'test_tag1', '--object', 'uuid1'
@@ -53,6 +57,8 @@ class TestArvTag < Minitest::Test
 
   # Test adding and removing a single tag with multiple objects.
   def test_single_tag_multi_objects
+    skip "TBD"
+
     out, err = capture_subprocess_io do
       assert arv_tag('add', 'test_tag1',
                      '--object', 'uuid1',
index 326c2a06ae1a5620af8a388ffb9392357dc7d15e..23af470f78efc8137d0913cc4ccf611138ae0daf 100644 (file)
@@ -2,11 +2,11 @@
 package keepclient
 
 import (
-       "git.curoverse.com/arvados.git/sdk/go/streamer"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "crypto/md5"
        "errors"
        "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
        "io"
        "io/ioutil"
        "log"
@@ -15,6 +15,7 @@ import (
        "strings"
        "sync"
        "sync/atomic"
+       "time"
        "unsafe"
 )
 
@@ -47,8 +48,8 @@ func MakeKeepClient(arv *arvadosclient.ArvadosClient) (kc KeepClient, err error)
                Arvados:       arv,
                Want_replicas: 2,
                Using_proxy:   false,
-               Client:        &http.Client{Transport: &http.Transport{}}}
-
+               Client:        &http.Client{},
+       }
        err = (&kc).DiscoverKeepServers()
 
        return kc, err
@@ -131,6 +132,10 @@ func (this KeepClient) AuthorizedGet(hash string,
        timestamp string) (reader io.ReadCloser,
        contentLength int64, url string, err error) {
 
+       // Take the hash of locator and timestamp in order to identify this
+       // specific transaction in log statements.
+       requestId := fmt.Sprintf("%x", md5.Sum([]byte(hash+time.Now().String())))[0:8]
+
        // Calculate the ordering for asking servers
        sv := NewRootSorter(this.ServiceRoots(), hash).GetSortedRoots()
 
@@ -150,12 +155,19 @@ func (this KeepClient) AuthorizedGet(hash string,
 
                req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
 
+               log.Printf("[%v] Begin download %s", requestId, url)
+
                var resp *http.Response
-               if resp, err = this.Client.Do(req); err != nil {
+               if resp, err = this.Client.Do(req); err != nil || resp.StatusCode != http.StatusOK {
+                       respbody, _ := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+                       response := strings.TrimSpace(string(respbody))
+                       log.Printf("[%v] Download %v status code: %v error: \"%v\" response: \"%v\"",
+                               requestId, url, resp.StatusCode, err, response)
                        continue
                }
 
                if resp.StatusCode == http.StatusOK {
+                       log.Printf("[%v] Download %v status code: %v", requestId, url, resp.StatusCode)
                        return HashCheckingReader{resp.Body, md5.New(), hash}, resp.ContentLength, url, nil
                }
        }
index 5f9915d95c0967f488e8f50f0bb00d7959c53cba..8487e00786d93d4acece1fcf83c065629e499944 100644 (file)
@@ -1,11 +1,11 @@
 package keepclient
 
 import (
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       "git.curoverse.com/arvados.git/sdk/go/streamer"
        "crypto/md5"
        "flag"
        "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
        . "gopkg.in/check.v1"
        "io"
        "io/ioutil"
@@ -110,24 +110,22 @@ func (this StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request
        this.handled <- fmt.Sprintf("http://%s", req.Host)
 }
 
-func RunBogusKeepServer(st http.Handler, port int) (listener net.Listener, url string) {
+func RunFakeKeepServer(st http.Handler) (ks KeepServer) {
        var err error
-       listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: port})
+       ks.listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0})
        if err != nil {
-               panic(fmt.Sprintf("Could not listen on tcp port %v", port))
+               panic(fmt.Sprintf("Could not listen on any port"))
        }
-
-       url = fmt.Sprintf("http://localhost:%d", port)
-
-       go http.Serve(listener, st)
-       return listener, url
+       ks.url = fmt.Sprintf("http://%s", ks.listener.Addr().String())
+       go http.Serve(ks.listener, st)
+       return
 }
 
 func UploadToStubHelper(c *C, st http.Handler, f func(KeepClient, string,
        io.ReadCloser, io.WriteCloser, chan uploadStatus)) {
 
-       listener, url := RunBogusKeepServer(st, 2990)
-       defer listener.Close()
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
 
        arv, _ := arvadosclient.MakeArvadosClient()
        arv.ApiToken = "abc123"
@@ -137,7 +135,7 @@ func UploadToStubHelper(c *C, st http.Handler, f func(KeepClient, string,
        reader, writer := io.Pipe()
        upload_status := make(chan uploadStatus)
 
-       f(kc, url, reader, writer, upload_status)
+       f(kc, ks.url, reader, writer, upload_status)
 }
 
 func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
@@ -154,7 +152,7 @@ func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
                func(kc KeepClient, url string, reader io.ReadCloser,
                        writer io.WriteCloser, upload_status chan uploadStatus) {
 
-                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")))
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), "TestUploadToStubKeepServer")
 
                        writer.Write([]byte("foo"))
                        writer.Close()
@@ -186,7 +184,7 @@ func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
 
                        br1 := tr.MakeStreamReader()
 
-                       go kc.uploadToKeepServer(url, st.expectPath, br1, upload_status, 3)
+                       go kc.uploadToKeepServer(url, st.expectPath, br1, upload_status, 3, "TestUploadToStubKeepServerBufferReader")
 
                        writer.Write([]byte("foo"))
                        writer.Close()
@@ -221,7 +219,7 @@ func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
                func(kc KeepClient, url string, reader io.ReadCloser,
                        writer io.WriteCloser, upload_status chan uploadStatus) {
 
-                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3)
+                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, "TestFailedUploadToStubKeepServer")
 
                        writer.Write([]byte("foo"))
                        writer.Close()
@@ -240,12 +238,11 @@ type KeepServer struct {
        url      string
 }
 
-func RunSomeFakeKeepServers(st http.Handler, n int, port int) (ks []KeepServer) {
+func RunSomeFakeKeepServers(st http.Handler, n int) (ks []KeepServer) {
        ks = make([]KeepServer, n)
 
        for i := 0; i < n; i += 1 {
-               boguslistener, bogusurl := RunBogusKeepServer(st, port+i)
-               ks[i] = KeepServer{boguslistener, bogusurl}
+               ks[i] = RunFakeKeepServer(st)
        }
 
        return ks
@@ -270,11 +267,11 @@ func (s *StandaloneSuite) TestPutB(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks := RunSomeFakeKeepServers(st, 5, 2990)
+       ks := RunSomeFakeKeepServers(st, 5)
 
-       for i := 0; i < len(ks); i += 1 {
-               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = ks[i].url
-               defer ks[i].listener.Close()
+       for i, k := range ks {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
        }
 
        kc.SetServiceRoots(service_roots)
@@ -313,11 +310,11 @@ func (s *StandaloneSuite) TestPutHR(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks := RunSomeFakeKeepServers(st, 5, 2990)
+       ks := RunSomeFakeKeepServers(st, 5)
 
-       for i := 0; i < len(ks); i += 1 {
-               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = ks[i].url
-               defer ks[i].listener.Close()
+       for i, k := range ks {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
        }
 
        kc.SetServiceRoots(service_roots)
@@ -367,8 +364,8 @@ func (s *StandaloneSuite) TestPutWithFail(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks1 := RunSomeFakeKeepServers(st, 4, 2990)
-       ks2 := RunSomeFakeKeepServers(fh, 1, 2995)
+       ks1 := RunSomeFakeKeepServers(st, 4)
+       ks2 := RunSomeFakeKeepServers(fh, 1)
 
        for i, k := range ks1 {
                service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
@@ -423,8 +420,8 @@ func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
-       ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
 
        for i, k := range ks1 {
                service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
@@ -441,7 +438,7 @@ func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
 
        c.Check(err, Equals, InsufficientReplicasError)
        c.Check(replicas, Equals, 1)
-       c.Check(<-st.handled, Matches, ".*2990")
+       c.Check(<-st.handled, Equals, ks1[0].url)
 
        log.Printf("TestPutWithTooManyFail done")
 }
@@ -471,19 +468,19 @@ func (s *StandaloneSuite) TestGet(c *C) {
                "abc123",
                []byte("foo")}
 
-       listener, url := RunBogusKeepServer(st, 2990)
-       defer listener.Close()
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
        kc, _ := MakeKeepClient(&arv)
        arv.ApiToken = "abc123"
-       kc.SetServiceRoots(map[string]string{"x":url})
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
 
        r, n, url2, err := kc.Get(hash)
        defer r.Close()
        c.Check(err, Equals, nil)
        c.Check(n, Equals, int64(3))
-       c.Check(url2, Equals, fmt.Sprintf("%s/%s", url, hash))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
 
        content, err2 := ioutil.ReadAll(r)
        c.Check(err2, Equals, nil)
@@ -497,13 +494,13 @@ func (s *StandaloneSuite) TestGetFail(c *C) {
 
        st := FailHandler{make(chan string, 1)}
 
-       listener, url := RunBogusKeepServer(st, 2990)
-       defer listener.Close()
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
        kc, _ := MakeKeepClient(&arv)
        arv.ApiToken = "abc123"
-       kc.SetServiceRoots(map[string]string{"x":url})
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
 
        r, n, url2, err := kc.Get(hash)
        c.Check(err, Equals, BlockNotFound)
@@ -527,13 +524,13 @@ func (s *StandaloneSuite) TestChecksum(c *C) {
 
        st := BarHandler{make(chan string, 1)}
 
-       listener, url := RunBogusKeepServer(st, 2990)
-       defer listener.Close()
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
        kc, _ := MakeKeepClient(&arv)
        arv.ApiToken = "abc123"
-       kc.SetServiceRoots(map[string]string{"x":url})
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
 
        r, n, _, err := kc.Get(barhash)
        _, err = ioutil.ReadAll(r)
@@ -568,8 +565,8 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
-       ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
 
        for i, k := range ks1 {
                service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
@@ -588,7 +585,7 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
        // the choice of block content "waz" and the UUIDs of the fake
        // servers, so we just tried different strings until we found
        // an example that passes this Assert.)
-       c.Assert(NewRootSorter(service_roots, hash).GetSortedRoots()[0], Matches, ".*299[1-4]")
+       c.Assert(NewRootSorter(service_roots, hash).GetSortedRoots()[0], Not(Equals), ks1[0].url)
 
        r, n, url2, err := kc.Get(hash)
 
@@ -665,7 +662,7 @@ func (s *StandaloneSuite) TestPutProxy(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+       ks1 := RunSomeFakeKeepServers(st, 1)
 
        for i, k := range ks1 {
                service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
@@ -696,7 +693,7 @@ func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
        arv.ApiToken = "abc123"
        service_roots := make(map[string]string)
 
-       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+       ks1 := RunSomeFakeKeepServers(st, 1)
 
        for i, k := range ks1 {
                service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
index e12214450c12527f9e447d77025df0d542cb208d..c24849e687a8d11cf2e5d2154fdd62d0e470ec83 100644 (file)
@@ -3,15 +3,17 @@ package keepclient
 
 import (
        "crypto/md5"
-       "git.curoverse.com/arvados.git/sdk/go/streamer"
        "errors"
        "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
        "io"
        "io/ioutil"
        "log"
+       "net"
        "net/http"
        "os"
        "strings"
+       "time"
 )
 
 type keepDisk struct {
@@ -22,15 +24,65 @@ type keepDisk struct {
        SvcType  string `json:"service_type"`
 }
 
-func Md5String(s string) (string) {
+func Md5String(s string) string {
        return fmt.Sprintf("%x", md5.Sum([]byte(s)))
 }
 
+// Set timeouts apply when connecting to keepproxy services (assumed to be over
+// the Internet).
+func (this *KeepClient) setClientSettingsProxy() {
+       if this.Client.Timeout == 0 {
+               // Maximum time to wait for a complete response
+               this.Client.Timeout = 300 * time.Second
+
+               // TCP and TLS connection settings
+               this.Client.Transport = &http.Transport{
+                       Dial: (&net.Dialer{
+                               // The maximum time to wait to set up
+                               // the initial TCP connection.
+                               Timeout: 30 * time.Second,
+
+                               // The TCP keep alive heartbeat
+                               // interval.
+                               KeepAlive: 120 * time.Second,
+                       }).Dial,
+
+                       TLSHandshakeTimeout: 10 * time.Second,
+               }
+       }
+
+}
+
+// Set timeouts apply when connecting to keepstore services directly (assumed
+// to be on the local network).
+func (this *KeepClient) setClientSettingsStore() {
+       if this.Client.Timeout == 0 {
+               // Maximum time to wait for a complete response
+               this.Client.Timeout = 20 * time.Second
+
+               // TCP and TLS connection timeouts
+               this.Client.Transport = &http.Transport{
+                       Dial: (&net.Dialer{
+                               // The maximum time to wait to set up
+                               // the initial TCP connection.
+                               Timeout: 2 * time.Second,
+
+                               // The TCP keep alive heartbeat
+                               // interval.
+                               KeepAlive: 180 * time.Second,
+                       }).Dial,
+
+                       TLSHandshakeTimeout: 4 * time.Second,
+               }
+       }
+}
+
 func (this *KeepClient) DiscoverKeepServers() error {
        if prx := os.Getenv("ARVADOS_KEEP_PROXY"); prx != "" {
-               sr := map[string]string{"proxy":prx}
+               sr := map[string]string{"proxy": prx}
                this.SetServiceRoots(sr)
                this.Using_proxy = true
+               this.setClientSettingsProxy()
                return nil
        }
 
@@ -70,6 +122,12 @@ func (this *KeepClient) DiscoverKeepServers() error {
                }
        }
 
+       if this.Using_proxy {
+               this.setClientSettingsProxy()
+       } else {
+               this.setClientSettingsStore()
+       }
+
        this.SetServiceRoots(service_roots)
 
        return nil
@@ -84,21 +142,29 @@ type uploadStatus struct {
 }
 
 func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
-       upload_status chan<- uploadStatus, expectedLength int64) {
-
-       log.Printf("Uploading %s to %s", hash, host)
+       upload_status chan<- uploadStatus, expectedLength int64, requestId string) {
 
        var req *http.Request
        var err error
        var url = fmt.Sprintf("%s/%s", host, hash)
        if req, err = http.NewRequest("PUT", url, nil); err != nil {
+               log.Printf("[%v] Error creating request PUT %v error: %v", requestId, url, err.Error())
                upload_status <- uploadStatus{err, url, 0, 0, ""}
                body.Close()
                return
        }
 
+       req.ContentLength = expectedLength
        if expectedLength > 0 {
-               req.ContentLength = expectedLength
+               // http.Client.Do will close the body ReadCloser when it is
+               // done with it.
+               req.Body = body
+       } else {
+               // "For client requests, a value of 0 means unknown if Body is
+               // not nil."  In this case we do want the body to be empty, so
+               // don't set req.Body.  However, we still need to close the
+               // body ReadCloser.
+               body.Close()
        }
 
        req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
@@ -108,12 +174,10 @@ func (this KeepClient) uploadToKeepServer(host string, hash string, body io.Read
                req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
        }
 
-       req.Body = body
-
        var resp *http.Response
        if resp, err = this.Client.Do(req); err != nil {
+               log.Printf("[%v] Upload failed %v error: %v", requestId, url, err.Error())
                upload_status <- uploadStatus{err, url, 0, 0, ""}
-               body.Close()
                return
        }
 
@@ -126,17 +190,16 @@ func (this KeepClient) uploadToKeepServer(host string, hash string, body io.Read
        defer io.Copy(ioutil.Discard, resp.Body)
 
        respbody, err2 := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+       response := strings.TrimSpace(string(respbody))
        if err2 != nil && err2 != io.EOF {
-               upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, string(respbody)}
-               return
-       }
-
-       locator := strings.TrimSpace(string(respbody))
-
-       if resp.StatusCode == http.StatusOK {
-               upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, locator}
+               log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, err2.Error(), response)
+               upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+       } else if resp.StatusCode == http.StatusOK {
+               log.Printf("[%v] Upload %v success", requestId, url)
+               upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
        } else {
-               upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, locator}
+               log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, resp.StatusCode, response)
+               upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
        }
 }
 
@@ -145,6 +208,10 @@ func (this KeepClient) putReplicas(
        tr *streamer.AsyncStream,
        expectedLength int64) (locator string, replicas int, err error) {
 
+       // Take the hash of locator and timestamp in order to identify this
+       // specific transaction in log statements.
+       requestId := fmt.Sprintf("%x", md5.Sum([]byte(locator+time.Now().String())))[0:8]
+
        // Calculate the ordering for uploading to servers
        sv := NewRootSorter(this.ServiceRoots(), hash).GetSortedRoots()
 
@@ -159,14 +226,14 @@ func (this KeepClient) putReplicas(
        defer close(upload_status)
 
        // Desired number of replicas
-
        remaining_replicas := this.Want_replicas
 
        for remaining_replicas > 0 {
                for active < remaining_replicas {
                        // Start some upload requests
                        if next_server < len(sv) {
-                               go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength)
+                               log.Printf("[%v] Begin upload %s to %s", requestId, hash, sv[next_server])
+                               go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength, requestId)
                                next_server += 1
                                active += 1
                        } else {
@@ -177,20 +244,18 @@ func (this KeepClient) putReplicas(
                                }
                        }
                }
+               log.Printf("[%v] Replicas remaining to write: %v active uploads: %v",
+                       requestId, remaining_replicas, active)
 
                // Now wait for something to happen.
                status := <-upload_status
+               active -= 1
+
                if status.statusCode == 200 {
                        // good news!
                        remaining_replicas -= status.replicas_stored
                        locator = status.response
-               } else {
-                       // writing to keep server failed for some reason
-                       log.Printf("Keep server put to %v failed with '%v'",
-                               status.url, status.err)
                }
-               active -= 1
-               log.Printf("Upload to %v status code: %v remaining replicas: %v active: %v", status.url, status.statusCode, remaining_replicas, active)
        }
 
        return locator, this.Want_replicas, nil
index 105d068d4b60377ef81bcd0f4af620da766bdb68..ab21552d2e16fec0c536ed9de107a630bbab0956 100644 (file)
@@ -1,5 +1,7 @@
+*.pyc
 /build/
 /dist/
-/*.egg
-/*.egg-info
+*.egg
+*.egg-info
 /tests/tmp
+.eggs
index 060ed95d959531917749584176c5f42b8c453f66..4cae20d597d3e230c1fbd88b5c15f881d776c03a 100644 (file)
@@ -87,6 +87,7 @@ class job_setup:
             return
         job_input = current_job()['script_parameters']['input']
         cr = CollectionReader(job_input)
+        cr.normalize()
         for s in cr.all_streams():
             for f in s.all_files():
                 if input_as_path:
index 0f49438b6cd0ed8a57c7f7e519eedde8d294909e..d530f58b03e70f2983280bf673c937df0653669f 100644 (file)
@@ -1,3 +1,4 @@
+import functools
 import logging
 import os
 import re
@@ -124,6 +125,7 @@ class CollectionReader(CollectionBase):
         else:
             raise errors.ArgumentError(
                 "Argument to CollectionReader must be a manifest or a collection UUID")
+        self._api_response = None
         self._streams = None
 
     def _populate_from_api_server(self):
@@ -138,10 +140,10 @@ class CollectionReader(CollectionBase):
             if self._api_client is None:
                 self._api_client = arvados.api('v1')
                 self._keep_client = None  # Make a new one with the new api.
-            c = self._api_client.collections().get(
+            self._api_response = self._api_client.collections().get(
                 uuid=self._manifest_locator).execute(
                 num_retries=self.num_retries)
-            self._manifest_text = c['manifest_text']
+            self._manifest_text = self._api_response['manifest_text']
             return None
         except Exception as e:
             return e
@@ -158,8 +160,6 @@ class CollectionReader(CollectionBase):
             return e
 
     def _populate(self):
-        if self._streams is not None:
-            return
         error_via_api = None
         error_via_keep = None
         should_try_keep = ((self._manifest_text is None) and
@@ -190,9 +190,27 @@ class CollectionReader(CollectionBase):
                          for sline in self._manifest_text.split("\n")
                          if sline]
 
-    def normalize(self):
-        self._populate()
+    def _populate_first(orig_func):
+        # Decorator for methods that read actual Collection data.
+        @functools.wraps(orig_func)
+        def wrapper(self, *args, **kwargs):
+            if self._streams is None:
+                self._populate()
+            return orig_func(self, *args, **kwargs)
+        return wrapper
+
+    @_populate_first
+    def api_response(self):
+        """api_response() -> dict or None
+
+        Returns information about this Collection fetched from the API server.
+        If the Collection exists in Keep but not the API server, currently
+        returns None.  Future versions may provide a synthetic response.
+        """
+        return self._api_response
 
+    @_populate_first
+    def normalize(self):
         # Rearrange streams
         streams = {}
         for s in self.all_streams():
@@ -213,6 +231,7 @@ class CollectionReader(CollectionBase):
             [StreamReader(stream, keep=self._my_keep()).manifest_text()
              for stream in self._streams])
 
+    @_populate_first
     def open(self, streampath, filename=None):
         """open(streampath[, filename]) -> file-like object
 
@@ -220,7 +239,6 @@ class CollectionReader(CollectionBase):
         single string or as two separate stream name and file name arguments.
         This method returns a file-like object to read that file.
         """
-        self._populate()
         if filename is None:
             streampath, filename = split(streampath)
         keep_client = self._my_keep()
@@ -238,8 +256,8 @@ class CollectionReader(CollectionBase):
             raise ValueError("file '{}' not found in Collection stream '{}'".
                              format(filename, streampath))
 
+    @_populate_first
     def all_streams(self):
-        self._populate()
         return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
                 for s in self._streams]
 
@@ -248,6 +266,7 @@ class CollectionReader(CollectionBase):
             for f in s.all_files():
                 yield f
 
+    @_populate_first
     def manifest_text(self, strip=False, normalize=False):
         if normalize:
             cr = CollectionReader(self.manifest_text())
@@ -256,7 +275,6 @@ class CollectionReader(CollectionBase):
         elif strip:
             return self.stripped_manifest()
         else:
-            self._populate()
             return self._manifest_text
 
 
diff --git a/sdk/python/arvados/commands/ls.py b/sdk/python/arvados/commands/ls.py
new file mode 100755 (executable)
index 0000000..e87244d
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import argparse
+
+import arvados
+import arvados.commands._util as arv_cmd
+
+def parse_args(args):
+    parser = argparse.ArgumentParser(
+        description='List contents of a manifest',
+        parents=[arv_cmd.retry_opt])
+
+    parser.add_argument('locator', type=str,
+                        help="""Collection UUID or locator""")
+    parser.add_argument('-s', action='store_true',
+                        help="""List file sizes, in KiB.""")
+
+    return parser.parse_args(args)
+
+def size_formatter(coll_file):
+    return "{:>10}".format((coll_file.size() + 1023) / 1024)
+
+def name_formatter(coll_file):
+    return "{}/{}".format(coll_file.stream_name(), coll_file.name)
+
+def main(args, stdout, stderr, api_client=None):
+    args = parse_args(args)
+
+    if api_client is None:
+        api_client = arvados.api('v1')
+
+    try:
+        cr = arvados.CollectionReader(args.locator, api_client=api_client,
+                                      num_retries=args.retries)
+        cr.normalize()
+    except (arvados.errors.ArgumentError,
+            arvados.errors.NotFoundError) as error:
+        print("arv-ls: error fetching collection: {}".format(error),
+              file=stderr)
+        return 1
+
+    formatters = []
+    if args.s:
+        formatters.append(size_formatter)
+    formatters.append(name_formatter)
+
+    for f in cr.all_files():
+        print(*(info_func(f) for info_func in formatters), file=stdout)
+
+    return 0
index 272fa84a430f4c8c54f5d8d33a75b13f7525a81e..2451416dae38da1932f22fb7c6599b8a82e55110 100755 (executable)
@@ -216,7 +216,7 @@ for s,f,outfilename in todo:
             sys.stderr.write("%s  %s/%s\n"
                              % (digestor.hexdigest(), s.name(), f.name()))
     except KeyboardInterrupt:
-        if outfile and outfile != '/dev/stdout':
+        if outfile and outfilename != '/dev/stdout':
             os.unlink(outfilename)
         break
 
index 382bfe8cd116e92f200b7c675bbde25fa2396db2..23b99f24ce4bef779d6ced8ab235b377c6c9a758 100755 (executable)
@@ -1,24 +1,7 @@
 #!/usr/bin/env python
 
-import argparse
+import sys
 
-import arvados
-import arvados.commands._util as arv_cmd
+from arvados.commands.ls import main
 
-parser = argparse.ArgumentParser(
-    description='List contents of a manifest',
-    parents=[arv_cmd.retry_opt])
-
-parser.add_argument('locator', type=str,
-                    help="Collection UUID or locator")
-parser.add_argument('-s', action='store_true', help="""List file sizes, in KiB.""")
-
-args = parser.parse_args()
-cr = arvados.CollectionReader(args.locator, num_retries=args.retries)
-
-for f in cr.all_files():
-    if args.s:
-        print "{:>10} {}".format((f.size() + 1023) / 1024,
-                                 f.stream_name() + "/" + f.name())
-    else:
-        print f.stream_name() + "/" + f.name()
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
index bc1fab9d60ac3c1ee833f5f2772e6a7330ee9434..754d89bdad70e120367de344799fe337a0b4e1ab 100644 (file)
@@ -5,23 +5,27 @@ import subprocess
 import time
 
 from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
 
 SETUP_DIR = os.path.dirname(__file__)
 README = os.path.join(SETUP_DIR, 'README.rst')
 
-cmd_opts = {'egg_info': {}}
-try:
-    git_tags = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct %h', SETUP_DIR],
-        stderr=open('/dev/null','w')
-        ).split()
-    assert len(git_tags) == 2
-except (AssertionError, OSError, subprocess.CalledProcessError):
-    pass
-else:
-    git_tags[0] = time.strftime('%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-    cmd_opts['egg_info']['tag_build'] = '.{}.{}'.format(*git_tags)
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
 
 
 setup(name='arvados-python-client',
@@ -55,5 +59,5 @@ setup(name='arvados-python-client',
       test_suite='tests',
       tests_require=['mock>=1.0', 'PyYAML'],
       zip_safe=False,
-      options=cmd_opts,
+      cmdclass={'egg_info': TagBuildWithCommit},
       )
diff --git a/sdk/python/tests/test_arv_ls.py b/sdk/python/tests/test_arv_ls.py
new file mode 100644 (file)
index 0000000..90bbacf
--- /dev/null
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import hashlib
+import io
+import random
+
+import mock
+
+import arvados.errors as arv_error
+import arvados.commands.ls as arv_ls
+import run_test_server
+
+class ArvLsTestCase(run_test_server.TestCaseWithServers):
+    FAKE_UUID = 'zzzzz-4zz18-12345abcde12345'
+
+    def newline_join(self, seq):
+        return '\n'.join(seq) + '\n'
+
+    def random_blocks(self, *sizes):
+        return ' '.join('{:032x}+{:d}'.format(
+                  random.randint(0, (16 ** 32) - 1), size
+                ) for size in sizes)
+
+    def mock_api_for_manifest(self, manifest_lines, uuid=FAKE_UUID):
+        manifest_text = self.newline_join(manifest_lines)
+        pdh = '{}+{}'.format(hashlib.md5(manifest_text).hexdigest(),
+                             len(manifest_text))
+        coll_info = {'uuid': uuid,
+                     'portable_data_hash': pdh,
+                     'manifest_text': manifest_text}
+        api_client = mock.MagicMock(name='mock_api_client')
+        api_client.collections().get().execute.return_value = coll_info
+        return coll_info, api_client
+
+    def run_ls(self, args, api_client):
+        self.stdout = io.BytesIO()
+        self.stderr = io.BytesIO()
+        return arv_ls.main(args, self.stdout, self.stderr, api_client)
+
+    def test_plain_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:3:one.txt 3:4:two.txt'.format(self.random_blocks(5, 2)),
+             './dir {} 1:5:sub.txt'.format(self.random_blocks(8))])
+        self.assertEqual(0, self.run_ls([collection['uuid']], api_client))
+        self.assertEqual(
+            self.newline_join(['./one.txt', './two.txt', './dir/sub.txt']),
+            self.stdout.getvalue())
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_size_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:0:0.txt 0:1000:1.txt 1000:2000:2.txt'.format(
+                    self.random_blocks(3000))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        for expected in range(3):
+            actual_size, actual_name = self.stdout.readline().split()
+            # But she seems much bigger to me...
+            self.assertEqual(str(expected), actual_size)
+            self.assertEqual('./{}.txt'.format(expected), actual_name)
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_nonnormalized_manifest(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:1010:non.txt'.format(self.random_blocks(1010)),
+             '. {} 0:2020:non.txt'.format(self.random_blocks(2020))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        self.assertEqual(['3', './non.txt'], self.stdout.readline().split())
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_locator_failure(self):
+        api_client = mock.MagicMock(name='mock_api_client')
+        api_client.collections().get().execute.side_effect = (
+            arv_error.NotFoundError)
+        self.assertNotEqual(0, self.run_ls([self.FAKE_UUID], api_client))
+        self.assertNotEqual('', self.stderr.getvalue())
index 254a29f313eac8a898a18eea3a3f8b9348cd810f..c991154e7f669ff2e92dc80c3cafbf2a62309d86 100644 (file)
@@ -643,6 +643,20 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
                                           api_client=client)
         self.assertEqual('', reader.manifest_text())
 
+    def test_api_response(self):
+        client = self.api_client_mock()
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertEqual(self.DEFAULT_COLLECTION, reader.api_response())
+
+    def test_api_response_with_collection_from_keep(self):
+        client = self.api_client_mock()
+        self.mock_get_collection(client, 404, 'foo')
+        with tutil.mock_get_responses(self.DEFAULT_MANIFEST, 200):
+            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,
+                                              api_client=client)
+            api_response = reader.api_response()
+        self.assertIsNone(api_response)
+
     def check_open_file(self, coll_file, stream_name, file_name, file_size):
         self.assertFalse(coll_file.closed, "returned file is not open")
         self.assertEqual(stream_name, coll_file.stream_name())
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
new file mode 100644 (file)
index 0000000..3436a07
--- /dev/null
@@ -0,0 +1,43 @@
+import mock
+import os
+import unittest
+
+import arvados
+import arvados.collection
+
+class TestSDK(unittest.TestCase):
+
+    @mock.patch('arvados.api')
+    @mock.patch('arvados.current_task')
+    @mock.patch('arvados.current_job')
+    def test_one_task_per_input_file_normalize(self, mock_job, mock_task, mock_api):
+        # This manifest will be reduced from three lines to one when it is
+        # normalized.
+        nonnormalized_manifest = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        dummy_hash = 'ffffffffffffffffffffffffffffffff+0'
+
+        mock_job.return_value = {
+            'uuid': 'none',
+            'script_parameters': {
+                'input': dummy_hash
+            }
+        }
+        mock_task.return_value = {
+            'uuid': 'none',
+            'sequence': 0,
+        }
+        # mock the API client to return a collection with a nonnormalized manifest.
+        mock_api('v1').collections().get().execute.return_value = {
+            'uuid': 'zzzzz-4zz18-mockcollection0',
+            'portable_data_hash': dummy_hash,
+            'manifest_text': nonnormalized_manifest,
+        }
+
+        # Because one_task_per_input_file normalizes this collection,
+        # it should now create only one job task and not three.
+        arvados.job_setup.one_task_per_input_file(and_end_task=False)
+        mock_api('v1').job_tasks().create().execute.assert_called_once_with()
+
index 996d3fc7da560cca5c163a6c3e9dee558d2359b9..a7da12263919f85a95c8dc713aa952d9b948fde5 100644 (file)
@@ -7,6 +7,7 @@ gem 'rails', '~> 3.2.0'
 
 group :test, :development do
   gem 'factory_girl_rails'
+  gem 'database_cleaner'
   # Note: "require: false" here tells bunder not to automatically
   # 'require' the packages during application startup. Installation is
   # still mandatory.
@@ -67,12 +68,11 @@ gem 'test_after_commit', :group => :test
 gem 'google-api-client', '~> 0.6.3'
 gem 'trollop'
 gem 'faye-websocket'
-gem 'database_cleaner'
 
 gem 'themes_for_rails'
 
 gem 'arvados', '>= 0.1.20140919104705'
-gem 'arvados-cli', '>= 0.1.20141014201516'
+gem 'arvados-cli', '>= 0.1.20141202211726'
 
 # pg_power lets us use partial indexes in schema.rb in Rails 3
 gem 'pg_power'
index ce79f854dea56e3006fe049ee52988c690839ded..c2b2351543525bea174b429e8981078023937614 100644 (file)
@@ -35,18 +35,18 @@ GEM
     addressable (2.3.6)
     andand (1.3.3)
     arel (3.0.3)
-    arvados (0.1.20140919104705)
+    arvados (0.1.20141114230720)
       activesupport (>= 3.2.13)
-      andand
-      google-api-client (~> 0.6.3)
-      json (>= 1.7.7)
+      andand (~> 1.3, >= 1.3.3)
+      google-api-client (~> 0.6.3, >= 0.6.3)
+      json (~> 1.7, >= 1.7.7)
       jwt (>= 0.1.5, < 1.0.0)
-    arvados-cli (0.1.20141014201516)
+    arvados-cli (0.1.20141209151444)
       activesupport (~> 3.2, >= 3.2.13)
       andand (~> 1.3, >= 1.3.3)
       arvados (~> 0.1, >= 0.1.0)
       curb (~> 0.8)
-      google-api-client (~> 0.6, >= 0.6.3)
+      google-api-client (~> 0.6.3, >= 0.6.3)
       json (~> 1.7, >= 1.7.7)
       jwt (>= 0.1.5, < 1.0.0)
       oj (~> 2.0, >= 2.0.3)
@@ -108,7 +108,7 @@ GEM
     json (1.8.1)
     jwt (0.1.13)
       multi_json (>= 1.5)
-    launchy (2.4.2)
+    launchy (2.4.3)
       addressable (~> 2.3)
     libv8 (3.16.14.3)
     mail (2.5.4)
@@ -130,7 +130,7 @@ GEM
       jwt (~> 0.1.4)
       multi_json (~> 1.0)
       rack (~> 1.2)
-    oj (2.10.2)
+    oj (2.11.1)
     omniauth (1.1.1)
       hashie (~> 1.2)
       rack
@@ -224,7 +224,7 @@ DEPENDENCIES
   acts_as_api
   andand
   arvados (>= 0.1.20140919104705)
-  arvados-cli (>= 0.1.20141014201516)
+  arvados-cli (>= 0.1.20141202211726)
   coffee-rails (~> 3.2.0)
   database_cleaner
   factory_girl_rails
diff --git a/services/api/app/assets/javascripts/api_client_authorizations.js.coffee b/services/api/app/assets/javascripts/api_client_authorizations.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/api_clients.js.coffee b/services/api/app/assets/javascripts/api_clients.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/application.js b/services/api/app/assets/javascripts/application.js
deleted file mode 100644 (file)
index 37c7bfc..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-// This is a manifest file that'll be compiled into including all the files listed below.
-// Add new JavaScript/Coffee code in separate files in this directory and they'll automatically
-// be included in the compiled file accessible from http://example.com/assets/application.js
-// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
-// the compiled file.
-//
-//= require jquery
-//= require jquery_ujs
-//= require_tree .
diff --git a/services/api/app/assets/javascripts/authorized_keys.js.coffee b/services/api/app/assets/javascripts/authorized_keys.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/collections.js.coffee b/services/api/app/assets/javascripts/collections.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/commit_ancestors.js.coffee b/services/api/app/assets/javascripts/commit_ancestors.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/commits.js.coffee b/services/api/app/assets/javascripts/commits.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/groups.js.coffee b/services/api/app/assets/javascripts/groups.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/humans.js.coffee b/services/api/app/assets/javascripts/humans.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/job_tasks.js.coffee b/services/api/app/assets/javascripts/job_tasks.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/jobs.js.coffee b/services/api/app/assets/javascripts/jobs.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/keep_disks.js.coffee b/services/api/app/assets/javascripts/keep_disks.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/links.js.coffee b/services/api/app/assets/javascripts/links.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/logs.js.coffee b/services/api/app/assets/javascripts/logs.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/nodes.js b/services/api/app/assets/javascripts/nodes.js
deleted file mode 100644 (file)
index a734426..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// -*- mode: javascript; js-indent-level: 4; indent-tabs-mode: nil; -*-
-// Place all the behaviors and hooks related to the matching controller here.
-// All this logic will automatically be available in application.js.
-
-var loaded_nodes_js;
-$(function(){
-    if (loaded_nodes_js) return; loaded_nodes_js = true;
-
-    $('[data-showhide-selector]').on('click', function(e){
-        var x = $($(this).attr('data-showhide-selector'));
-        if (x.css('display') == 'none')
-            x.show();
-        else
-            x.hide();
-    });
-    $('[data-showhide-default]').hide();
-});
diff --git a/services/api/app/assets/javascripts/nodes.js.coffee b/services/api/app/assets/javascripts/nodes.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/pipeline_instances.js.coffee b/services/api/app/assets/javascripts/pipeline_instances.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/pipeline_templates.js.coffee b/services/api/app/assets/javascripts/pipeline_templates.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/repositories.js.coffee b/services/api/app/assets/javascripts/repositories.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/specimens.js.coffee b/services/api/app/assets/javascripts/specimens.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/traits.js.coffee b/services/api/app/assets/javascripts/traits.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
diff --git a/services/api/app/assets/javascripts/virtual_machines.js.coffee b/services/api/app/assets/javascripts/virtual_machines.js.coffee
deleted file mode 100644 (file)
index 7615679..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
index 4f0364f15af205df240f2471b80d015ebb4b8b1e..54d5adb439c37e9cf9371fccec4e77ecee56b002 100644 (file)
@@ -27,6 +27,7 @@ class ApplicationController < ActionController::Base
 
   ERROR_ACTIONS = [:render_error, :render_not_found]
 
+  before_filter :set_cors_headers
   before_filter :respond_with_json_by_default
   before_filter :remote_ip
   before_filter :load_read_auths
@@ -35,6 +36,7 @@ class ApplicationController < ActionController::Base
   before_filter :catch_redirect_hint
   before_filter(:find_object_by_uuid,
                 except: [:index, :create] + ERROR_ACTIONS)
+  before_filter :load_required_parameters
   before_filter :load_limit_offset_order_params, only: [:index, :contents]
   before_filter :load_where_param, only: [:index, :contents]
   before_filter :load_filters_param, only: [:index, :contents]
@@ -345,6 +347,13 @@ class ApplicationController < ActionController::Base
     end
   end
 
+  def set_cors_headers
+    response.headers['Access-Control-Allow-Origin'] = '*'
+    response.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, PUT, POST, DELETE'
+    response.headers['Access-Control-Allow-Headers'] = 'Authorization'
+    response.headers['Access-Control-Max-Age'] = '86486400'
+  end
+
   def respond_with_json_by_default
     html_index = request.accepts.index(Mime::HTML)
     if html_index.nil? or request.accepts[0...html_index].include?(Mime::JSON)
@@ -446,6 +455,40 @@ class ApplicationController < ActionController::Base
     end
   end
 
+  def load_required_parameters
+    (self.class.send "_#{params[:action]}_requires_parameters" rescue {}).
+      each do |key, info|
+      if info[:required] and not params.include?(key)
+        raise ArgumentError.new("#{key} parameter is required")
+      elsif info[:type] == 'boolean'
+        # Make sure params[key] is either true or false -- not a
+        # string, not nil, etc.
+        if not params.include?(key)
+          params[key] = info[:default]
+        elsif [false, 'false', '0', 0].include? params[key]
+          params[key] = false
+        elsif [true, 'true', '1', 1].include? params[key]
+          params[key] = true
+        else
+          raise TypeError.new("#{key} parameter must be a boolean, true or false")
+        end
+      end
+    end
+    true
+  end
+
+  def self._create_requires_parameters
+    {
+      ensure_unique_name: {
+        type: "boolean",
+        description: "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+        location: "query",
+        required: false,
+        default: false
+      }
+    }
+  end
+
   def self._index_requires_parameters
     {
       filters: { type: 'array', required: false },
index 7bc207f0226a5f247e0b2c14aa5870ee994e4bba..54f09828f5ea29d9d17c09dc6ef7f8c85831ceaa 100644 (file)
@@ -54,7 +54,7 @@ class Arvados::V1::CollectionsController < ApplicationController
     when String
       if m = /[a-f0-9]{32}\+\d+/.match(sp)
         yield m[0], nil
-      elsif m = /[0-9a-z]{5}-4zz18-[0-9a-z]{15}/.match(sp)
+      elsif m = Collection.uuid_regex.match(sp)
         yield nil, m[0]
       end
     end
index 47018d4b10eb7ebc6ff27028b93de0e1aed8e84f..e8ccf2386c41343fc2b915ed86b7832d2eae94ac 100644 (file)
@@ -3,13 +3,13 @@ class Arvados::V1::KeepDisksController < ApplicationController
 
   def self._ping_requires_parameters
     {
-      uuid: false,
-      ping_secret: true,
-      node_uuid: false,
-      filesystem_uuid: false,
-      service_host: false,
-      service_port: true,
-      service_ssl_flag: true
+      uuid: {required: false},
+      ping_secret: {required: true},
+      node_uuid: {required: false},
+      filesystem_uuid: {required: false},
+      service_host: {required: false},
+      service_port: {required: true},
+      service_ssl_flag: {required: true}
     }
   end
 
index f2a04b3b29e79efdcd417d68a60e7ee40f921f06..efee982e74bcf7d62a8baac4561ee0da399dbd3e 100644 (file)
@@ -11,7 +11,7 @@ class Arvados::V1::NodesController < ApplicationController
   end
 
   def self._ping_requires_parameters
-    { ping_secret: true }
+    { ping_secret: {required: true} }
   end
 
   def ping
@@ -38,17 +38,16 @@ class Arvados::V1::NodesController < ApplicationController
   end
 
   def find_objects_for_index
-    if current_user.andand.is_admin || !current_user.andand.is_active
-      super
-    else
+    if !current_user.andand.is_admin && current_user.andand.is_active
       # active non-admin users can list nodes that are (or were
       # recently) working
       @objects = model_class.where('last_ping_at >= ?', Time.now - 1.hours)
     end
-    assigned_nodes = @objects.select(&:job_uuid)
-    assoc_jobs = readable_job_uuids(*assigned_nodes.map(&:job_uuid))
-    assigned_nodes.each do |node|
-      node.job_readable = assoc_jobs.include?(node.job_uuid)
+    super
+    job_uuids = @objects.map { |n| n[:job_uuid] }.compact
+    assoc_jobs = readable_job_uuids(job_uuids)
+    @objects.each do |node|
+      node.job_readable = assoc_jobs.include?(node[:job_uuid])
     end
   end
 
index c5b2bcf2f2d4a97f5bac9b18e6f0456707e59210..2f7af3c4287d74587b3db7a7fd65551e239afc7f 100644 (file)
@@ -258,13 +258,7 @@ class Arvados::V1::SchemaController < ApplicationController
               path: "#{k.to_s.underscore.pluralize}",
               httpMethod: "POST",
               description: "Create a new #{k.to_s}.",
-              parameters: {
-                ensure_unique_name: {
-                  type: "boolean",
-                  description: "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
-                  location: "query"
-                }
-              },
+              parameters: {},
               request: {
                 required: true,
                 properties: {
diff --git a/services/api/app/controllers/database_controller.rb b/services/api/app/controllers/database_controller.rb
new file mode 100644 (file)
index 0000000..04c0e79
--- /dev/null
@@ -0,0 +1,73 @@
+class DatabaseController < ApplicationController
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  before_filter :admin_required
+  def reset
+    raise ArvadosModel::PermissionDeniedError unless Rails.env == 'test'
+
+    # Sanity check: If someone has actually logged in here, this might
+    # not really be a throwaway database. Client test suites should
+    # use @example.com email addresses when creating user records, so
+    # we can tell they're not valuable.
+    user_uuids = User.
+      where('email is null or email not like ?', '%@example.com').
+      collect &:uuid
+    fixture_uuids =
+      YAML::load_file(File.expand_path('../../../test/fixtures/users.yml',
+                                       __FILE__)).
+      values.collect { |u| u['uuid'] }
+    unexpected_uuids = user_uuids - fixture_uuids
+    if unexpected_uuids.any?
+      logger.error("Running in test environment, but non-fixture users exist: " +
+                   "#{unexpected_uuids}")
+      raise ArvadosModel::PermissionDeniedError
+    end
+
+    require 'active_record/fixtures'
+
+    # What kinds of fixtures do we have?
+    fixturesets = Dir.glob(Rails.root.join('test', 'fixtures', '*.yml')).
+      collect { |yml| yml.match(/([^\/]*)\.yml$/)[1] }
+
+    table_names = '"' + ActiveRecord::Base.connection.tables.join('","') + '"'
+
+    attempts_left = 20
+    begin
+      ActiveRecord::Base.transaction do
+        # Avoid deadlock by locking all tables before doing anything
+        # drastic.
+        ActiveRecord::Base.connection.execute \
+        "LOCK TABLE #{table_names} IN ACCESS EXCLUSIVE MODE"
+
+        # Delete existing fixtures (and everything else) from fixture
+        # tables
+        fixturesets.each do |x|
+          x.classify.constantize.unscoped.delete_all
+        end
+
+        # create_fixtures() is a no-op for cached fixture sets, so
+        # uncache them all.
+        ActiveRecord::Fixtures.reset_cache
+        ActiveRecord::Fixtures.
+          create_fixtures(Rails.root.join('test', 'fixtures'), fixturesets)
+
+        # Dump cache of permissions etc.
+        Rails.cache.clear
+        ActiveRecord::Base.connection.clear_query_cache
+
+        # Reload database seeds
+        DatabaseSeeds.install
+      end
+    rescue ActiveRecord::StatementInvalid => e
+      if "#{e.inspect}" =~ /deadlock detected/i and (attempts_left -= 1) > 0
+        logger.info "Waiting for lock -- #{e.inspect}"
+        sleep 0.5
+        retry
+      end
+      raise
+    end
+
+    # Done.
+    render json: {success: true}
+  end
+end
index d624ea8c35983dfe745610a195b40c6418a53434..9c66f018723fdfd05c363d744ee45b6fa068fa66 100644 (file)
@@ -3,7 +3,7 @@ class StaticController < ApplicationController
 
   skip_before_filter :find_object_by_uuid
   skip_before_filter :render_404_if_no_object
-  skip_before_filter :require_auth_scope, :only => [ :home, :login_failure ]
+  skip_before_filter :require_auth_scope, only: [:home, :empty, :login_failure]
 
   def home
     respond_to do |f|
@@ -20,4 +20,8 @@ class StaticController < ApplicationController
     end
   end
 
+  def empty
+    render text: "-"
+  end
+
 end
index 30ef63fd8f6e64815dce0c4b89d8c1f2153b6e26..256a67bcbb55aa426e405312fd3908e9dc1177dd 100644 (file)
@@ -1,6 +1,7 @@
 class UserSessionsController < ApplicationController
   before_filter :require_auth_scope, :only => [ :destroy ]
 
+  skip_before_filter :set_cors_headers
   skip_before_filter :find_object_by_uuid
   skip_before_filter :render_404_if_no_object
 
@@ -141,4 +142,8 @@ class UserSessionsController < ApplicationController
     callback_url += 'api_token=' + api_client_auth.api_token
     redirect_to callback_url
   end
+
+  def cross_origin_forbidden
+    send_error 'Forbidden', status: 403
+  end
 end
index 13ccd7033560318c2db2687928a08a4e5de0d44e..a170fb9b54368e3d8f77689604e2d1d5dd7ff301 100644 (file)
@@ -445,6 +445,10 @@ class ArvadosModel < ActiveRecord::Base
     "_____-#{uuid_prefix}-_______________"
   end
 
+  def self.uuid_regex
+    %r/[a-z0-9]{5}-#{uuid_prefix}-[a-z0-9]{15}/
+  end
+
   def ensure_valid_uuids
     specials = [system_user_uuid]
 
diff --git a/services/api/app/models/database_seeds.rb b/services/api/app/models/database_seeds.rb
new file mode 100644 (file)
index 0000000..bc68283
--- /dev/null
@@ -0,0 +1,11 @@
+class DatabaseSeeds
+  extend CurrentApiClient
+  def self.install
+    system_user
+    system_group
+    all_users_group
+    anonymous_group
+    anonymous_user
+    empty_collection
+  end
+end
index 6e01de9213b1d8329451602ff3ea8fdc996e5679..0444528b6bda671a4327ef39fff1918fabebf9d8 100644 (file)
@@ -15,6 +15,7 @@ class Job < ArvadosModel
   validate :find_docker_image_locator
   validate :validate_status
   validate :validate_state_change
+  validate :ensure_no_collection_uuids_in_script_params
   before_save :update_timestamps_when_state_changes
 
   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
@@ -171,10 +172,12 @@ class Job < ArvadosModel
                                :arvados_sdk_version) do |git_search|
       commits = Commit.find_commit_range(current_user, "arvados",
                                          nil, git_search, nil)
-      if commits.andand.any?
-        [true, commits.first]
-      else
+      if commits.nil? or commits.empty?
         [false, "#{git_search} does not resolve to a commit"]
+      elsif not runtime_constraints["docker_image"]
+        [false, "cannot be specified without a Docker image constraint"]
+      else
+        [true, commits.first]
       end
     end
   end
@@ -308,6 +311,8 @@ class Job < ArvadosModel
     end
     self.running ||= false # Default to false instead of nil.
 
+    @need_crunch_dispatch_trigger = true
+
     true
   end
 
@@ -372,4 +377,34 @@ class Job < ArvadosModel
     end
     ok
   end
+
+  def ensure_no_collection_uuids_in_script_params
+    # recursive_hash_search searches recursively through hashes and
+    # arrays in 'thing' for string fields matching regular expression
+    # 'pattern'.  Returns true if pattern is found, false otherwise.
+    def recursive_hash_search thing, pattern
+      if thing.is_a? Hash
+        thing.each do |k, v|
+          return true if recursive_hash_search v, pattern
+        end
+      elsif thing.is_a? Array
+        thing.each do |k|
+          return true if recursive_hash_search k, pattern
+        end
+      elsif thing.is_a? String
+        return true if thing.match pattern
+      end
+      false
+    end
+
+    # Fail validation if any script_parameters field includes a string containing a
+    # collection uuid pattern.
+    if self.script_parameters_changed?
+      if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
+        self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
+        return false
+      end
+    end
+    true
+  end
 end
index ab8b1974bc13db3e70be6187fc7c284c4e8a6c1f..c38f6817dbc768f4b380c51b999d75c6526dff61 100644 (file)
@@ -137,6 +137,17 @@ class Node < ArvadosModel
 
   def dns_server_update
     if self.hostname_changed? or self.ip_address_changed?
+      if not self.ip_address.nil?
+        stale_conflicting_nodes = Node.where('id != ? and ip_address = ? and last_ping_at < ?',self.id,self.ip_address,10.minutes.ago)
+        if not stale_conflicting_nodes.empty?
+          # One or more stale compute node records have the same IP address as the new node.
+          # Clear the ip_address field on the stale nodes.
+          stale_conflicting_nodes.each do |stale_node|
+            stale_node.ip_address = nil
+            stale_node.save!
+          end
+        end
+      end
       if self.hostname and self.ip_address
         self.class.dns_server_update(self.hostname, self.ip_address)
       end
@@ -166,10 +177,6 @@ class Node < ArvadosModel
       STDERR.puts "Unable to write #{hostfile}: #{e.message}"
       return
     end
-    #  f.puts "address=/#{hostname}/#{ip_address}"
-    #  f.puts "address=/#{hostname}.#{@@domain}/#{ip_address}" if @@domain
-    #  f.puts "ptr-record=#{ptr_domain},#{hostname}"
-    #end
     File.open(File.join(@@dns_server_conf_dir, 'restart.txt'), 'w') do |f|
       # this will trigger a dns server restart
       f.puts @@dns_server_reload_command
@@ -182,8 +189,7 @@ class Node < ArvadosModel
 
   # At startup, make sure all DNS entries exist.  Otherwise, slurmctld
   # will refuse to start.
-  if @@dns_server_conf_dir and @@dns_server_conf_template and
-      !File.exists? (File.join(@@dns_server_conf_dir, "#{hostname_for_slot(MAX_SLOTS-1)}.conf"))
+  if @@dns_server_conf_dir and @@dns_server_conf_template
     (0..MAX_SLOTS-1).each do |slot_number|
       hostname = hostname_for_slot(slot_number)
       hostfile = File.join @@dns_server_conf_dir, "#{hostname}.conf"
index b939d07bf056c00e93383499a47e1bfedadad2c7..a32ce39299228b324ec63a64be6e6f4a152af6b0 100644 (file)
@@ -399,34 +399,14 @@ class User < ArvadosModel
 
   # add the user to the 'All users' group
   def create_user_group_link
-    # Look up the "All users" group (we expect uuid *-*-fffffffffffffff).
-    group = Group.where(name: 'All users').select do |g|
-      g[:uuid].match /-f+$/
-    end.first
-
-    if not group
-      logger.warn "No 'All users' group with uuid '*-*-fffffffffffffff'."
-      raise "No 'All users' group with uuid '*-*-fffffffffffffff' is found"
-    else
-      logger.info { "\"All users\" group uuid: " + group[:uuid] }
-
-      group_perms = Link.where(tail_uuid: self.uuid,
-                              head_uuid: group[:uuid],
-                              link_class: 'permission',
-                              name: 'can_read')
-
-      if !group_perms.any?
-        group_perm = Link.create(tail_uuid: self.uuid,
-                                 head_uuid: group[:uuid],
-                                 link_class: 'permission',
-                                 name: 'can_read')
-        logger.info { "group permission: " + group_perm[:uuid] }
-      else
-        group_perm = group_perms.first
-      end
-
-      return group_perm
-    end
+    return (Link.where(tail_uuid: self.uuid,
+                       head_uuid: all_users_group[:uuid],
+                       link_class: 'permission',
+                       name: 'can_read').first or
+            Link.create(tail_uuid: self.uuid,
+                        head_uuid: all_users_group[:uuid],
+                        link_class: 'permission',
+                        name: 'can_read'))
   end
 
   # Give the special "System group" permission to manage this user and
index 4396418dfbc0a86e4ecac1cda5e2affcc17884c7..ed2c533f5662e008bc4d25f5ae4b29c7556b0a49 100644 (file)
@@ -15,7 +15,7 @@ development:
   active_record.auto_explain_threshold_in_seconds: 0.5
   assets.compress: false
   assets.debug: true
-  local_modified: <%= '-modified' if `git status -s` %>
+  local_modified: "<%= '-modified' if `git status -s` != '' %>"
 
 production:
   force_ssl: true
@@ -236,7 +236,7 @@ common:
   auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
 
   # source_version
-  source_version: "<%= `git log -n 1 --format=%h` %>"
+  source_version: "<%= `git log -n 1 --format=%h`.strip %>"
   local_modified: false
 
   # Default lifetime for ephemeral collections: 2 weeks.
index 705822a6a58eb507e2f42261364293e54494dc65..27fd67cece046268f449a8cc18ad35b30d128b44 100644 (file)
@@ -3,6 +3,14 @@ Server::Application.routes.draw do
 
   # See http://guides.rubyonrails.org/routing.html
 
+  # OPTIONS requests are not allowed at routes that use cookies.
+  ['/auth/*a', '/login', '/logout'].each do |nono|
+    match nono, :to => 'user_sessions#cross_origin_forbidden', :via => 'OPTIONS'
+  end
+  # OPTIONS at discovery and API paths get an empty response with CORS headers.
+  match '/discovery/v1/*a', :to => 'static#empty', :via => 'OPTIONS'
+  match '/arvados/v1/*a', :to => 'static#empty', :via => 'OPTIONS'
+
   namespace :arvados do
     namespace :v1 do
       resources :api_client_authorizations do
@@ -63,9 +71,15 @@ Server::Application.routes.draw do
     end
   end
 
+  if Rails.env == 'test'
+    post '/database/reset', to: 'database#reset'
+  end
+
   # omniauth
   match '/auth/:provider/callback', :to => 'user_sessions#create'
   match '/auth/failure', :to => 'user_sessions#failure'
+  # not handled by omniauth provider -> 403 with no CORS headers.
+  get '/auth/*a', :to => 'user_sessions#cross_origin_forbidden'
 
   # Custom logout
   match '/login', :to => 'user_sessions#login'
index 6034c98232678d7c6624457e07730fdc0df89287..7643b18d85896af70786b348febd83f3b521a2d7 100644 (file)
@@ -2,13 +2,8 @@ class PipelineInstanceState < ActiveRecord::Migration
   include CurrentApiClient
 
   def up
-    if !column_exists?(:pipeline_instances, :state)
-      add_column :pipeline_instances, :state, :string
-    end
-
-    if !column_exists?(:pipeline_instances, :components_summary)
-      add_column :pipeline_instances, :components_summary, :text
-    end
+    add_column :pipeline_instances, :state, :string
+    add_column :pipeline_instances, :components_summary, :text
 
     PipelineInstance.reset_column_information
 
diff --git a/services/api/db/migrate/20141208164553_owner_uuid_index.rb b/services/api/db/migrate/20141208164553_owner_uuid_index.rb
new file mode 100644 (file)
index 0000000..0859d46
--- /dev/null
@@ -0,0 +1,20 @@
+class OwnerUuidIndex < ActiveRecord::Migration
+  def tables_with_owner_uuid
+    ActiveRecord::Base.connection.tables.select do |table|
+      columns = ActiveRecord::Base.connection.columns(table)
+      columns.collect(&:name).include? 'owner_uuid'
+    end
+  end
+
+  def up
+    tables_with_owner_uuid.each do |table|
+      add_index table.to_sym, :owner_uuid
+    end
+  end
+
+  def down
+    tables_with_owner_uuid.each do |table|
+      remove_index table.to_sym, :owner_uuid
+    end
+  end
+end
index d397b91bfd516af3e325476d0eb12c3fecdb5302..384d2e2cf1f358663182cec396a7456f0cfd2f45 100644 (file)
@@ -2,11 +2,4 @@
 #
 # It is invoked by `rake db:seed` and `rake db:setup`.
 
-# These two methods would create these objects on demand
-# later anyway, but it's better form to create them up front.
-include CurrentApiClient
-system_user
-system_group
-anonymous_group
-anonymous_user
-empty_collection
+DatabaseSeeds.install
index 2eca2caf16abcfec8cde761b639f931e3dc8cb42..038973f9b8f02e44f8840cac5b3a0554b1373dcb 100644 (file)
@@ -1334,6 +1334,13 @@ CREATE INDEX index_api_clients_on_created_at ON api_clients USING btree (created
 CREATE INDEX index_api_clients_on_modified_at ON api_clients USING btree (modified_at);
 
 
+--
+-- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_clients_on_owner_uuid ON api_clients USING btree (owner_uuid);
+
+
 --
 -- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1348,6 +1355,13 @@ CREATE UNIQUE INDEX index_api_clients_on_uuid ON api_clients USING btree (uuid);
 CREATE INDEX index_authkeys_on_user_and_expires_at ON authorized_keys USING btree (authorized_user_uuid, expires_at);
 
 
+--
+-- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_authorized_keys_on_owner_uuid ON authorized_keys USING btree (owner_uuid);
+
+
 --
 -- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1369,6 +1383,13 @@ CREATE INDEX index_collections_on_created_at ON collections USING btree (created
 CREATE INDEX index_collections_on_modified_at ON collections USING btree (modified_at);
 
 
+--
+-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_collections_on_owner_uuid ON collections USING btree (owner_uuid);
+
+
 --
 -- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1411,6 +1432,13 @@ CREATE INDEX index_groups_on_group_class ON groups USING btree (group_class);
 CREATE INDEX index_groups_on_modified_at ON groups USING btree (modified_at);
 
 
+--
+-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_groups_on_owner_uuid ON groups USING btree (owner_uuid);
+
+
 --
 -- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1418,6 +1446,13 @@ CREATE INDEX index_groups_on_modified_at ON groups USING btree (modified_at);
 CREATE UNIQUE INDEX index_groups_on_uuid ON groups USING btree (uuid);
 
 
+--
+-- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_humans_on_owner_uuid ON humans USING btree (owner_uuid);
+
+
 --
 -- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1446,6 +1481,13 @@ CREATE INDEX index_job_tasks_on_job_uuid ON job_tasks USING btree (job_uuid);
 CREATE INDEX index_job_tasks_on_modified_at ON job_tasks USING btree (modified_at);
 
 
+--
+-- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_owner_uuid ON job_tasks USING btree (owner_uuid);
+
+
 --
 -- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1495,6 +1537,13 @@ CREATE INDEX index_jobs_on_modified_at ON jobs USING btree (modified_at);
 CREATE INDEX index_jobs_on_output ON jobs USING btree (output);
 
 
+--
+-- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_owner_uuid ON jobs USING btree (owner_uuid);
+
+
 --
 -- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1544,6 +1593,13 @@ CREATE INDEX index_keep_disks_on_last_ping_at ON keep_disks USING btree (last_pi
 CREATE INDEX index_keep_disks_on_node_uuid ON keep_disks USING btree (node_uuid);
 
 
+--
+-- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_disks_on_owner_uuid ON keep_disks USING btree (owner_uuid);
+
+
 --
 -- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1551,6 +1607,13 @@ CREATE INDEX index_keep_disks_on_node_uuid ON keep_disks USING btree (node_uuid)
 CREATE UNIQUE INDEX index_keep_disks_on_uuid ON keep_disks USING btree (uuid);
 
 
+--
+-- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_services_on_owner_uuid ON keep_services USING btree (owner_uuid);
+
+
 --
 -- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1579,6 +1642,13 @@ CREATE INDEX index_links_on_head_uuid ON links USING btree (head_uuid);
 CREATE INDEX index_links_on_modified_at ON links USING btree (modified_at);
 
 
+--
+-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_owner_uuid ON links USING btree (owner_uuid);
+
+
 --
 -- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1628,6 +1698,13 @@ CREATE INDEX index_logs_on_modified_at ON logs USING btree (modified_at);
 CREATE INDEX index_logs_on_object_uuid ON logs USING btree (object_uuid);
 
 
+--
+-- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_owner_uuid ON logs USING btree (owner_uuid);
+
+
 --
 -- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1663,6 +1740,13 @@ CREATE INDEX index_nodes_on_hostname ON nodes USING btree (hostname);
 CREATE INDEX index_nodes_on_modified_at ON nodes USING btree (modified_at);
 
 
+--
+-- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_nodes_on_owner_uuid ON nodes USING btree (owner_uuid);
+
+
 --
 -- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1691,6 +1775,13 @@ CREATE INDEX index_pipeline_instances_on_created_at ON pipeline_instances USING
 CREATE INDEX index_pipeline_instances_on_modified_at ON pipeline_instances USING btree (modified_at);
 
 
+--
+-- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_instances_on_owner_uuid ON pipeline_instances USING btree (owner_uuid);
+
+
 --
 -- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1712,6 +1803,13 @@ CREATE INDEX index_pipeline_templates_on_created_at ON pipeline_templates USING
 CREATE INDEX index_pipeline_templates_on_modified_at ON pipeline_templates USING btree (modified_at);
 
 
+--
+-- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_templates_on_owner_uuid ON pipeline_templates USING btree (owner_uuid);
+
+
 --
 -- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1726,6 +1824,13 @@ CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON pipeline_templates USING
 CREATE UNIQUE INDEX index_repositories_on_name ON repositories USING btree (name);
 
 
+--
+-- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_repositories_on_owner_uuid ON repositories USING btree (owner_uuid);
+
+
 --
 -- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1747,6 +1852,13 @@ CREATE INDEX index_specimens_on_created_at ON specimens USING btree (created_at)
 CREATE INDEX index_specimens_on_modified_at ON specimens USING btree (modified_at);
 
 
+--
+-- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_specimens_on_owner_uuid ON specimens USING btree (owner_uuid);
+
+
 --
 -- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1761,6 +1873,13 @@ CREATE UNIQUE INDEX index_specimens_on_uuid ON specimens USING btree (uuid);
 CREATE INDEX index_traits_on_name ON traits USING btree (name);
 
 
+--
+-- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_traits_on_owner_uuid ON traits USING btree (owner_uuid);
+
+
 --
 -- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1782,6 +1901,13 @@ CREATE INDEX index_users_on_created_at ON users USING btree (created_at);
 CREATE INDEX index_users_on_modified_at ON users USING btree (modified_at);
 
 
+--
+-- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_users_on_owner_uuid ON users USING btree (owner_uuid);
+
+
 --
 -- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1796,6 +1922,13 @@ CREATE UNIQUE INDEX index_users_on_uuid ON users USING btree (uuid);
 CREATE INDEX index_virtual_machines_on_hostname ON virtual_machines USING btree (hostname);
 
 
+--
+-- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_virtual_machines_on_owner_uuid ON virtual_machines USING btree (owner_uuid);
+
+
 --
 -- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2032,4 +2165,6 @@ INSERT INTO schema_migrations (version) VALUES ('20140918153705');
 
 INSERT INTO schema_migrations (version) VALUES ('20140924091559');
 
-INSERT INTO schema_migrations (version) VALUES ('20141111133038');
\ No newline at end of file
+INSERT INTO schema_migrations (version) VALUES ('20141111133038');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208164553');
\ No newline at end of file
index 4c26010d2a4a3c0e6c38ae7911c2c9eb84eb822f..9f78587eabe9de75e37f49268d508d832203ad46 100644 (file)
@@ -98,6 +98,28 @@ module CurrentApiClient
     $system_group
   end
 
+  def all_users_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     'fffffffffffffff'].join('-')
+  end
+
+  def all_users_group
+    if not $all_users_group
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          $all_users_group = Group.
+            where(uuid: all_users_group_uuid).first_or_create do |g|
+            g.update_attributes(name: "All users",
+                                description: "All users",
+                                group_class: "role")
+          end
+        end
+      end
+    end
+    $all_users_group
+  end
+
   def act_as_system_user
     if block_given?
       act_as_user system_user do
index 1754fc0ae9b2de1bd7cc70a8d5ed5f1172f481f8..35671d65b287e495a76b2fc94b47cdf588983350 100644 (file)
@@ -1,3 +1,7 @@
+# If any threads raise an unhandled exception, make them all die.
+# We trust a supervisor like runit to restart the server in this case.
+Thread.abort_on_exception = true
+
 require 'eventmachine'
 require 'oj'
 require 'faye/websocket'
@@ -112,7 +116,7 @@ class EventBus
 
         # Execute query and actually send the matching log rows
         count = 0
-        limit = 100
+        limit = 20
 
         logs.limit(limit).each do |l|
           ws.send(l.as_api_response.to_json)
@@ -141,14 +145,24 @@ class EventBus
       Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
       ws.send ({status: 500, message: 'error'}.to_json)
       ws.close
+      # These exceptions typically indicate serious server trouble:
+      # out of memory issues, database connection problems, etc.  Go ahead and
+      # crash; we expect that a supervisor service like runit will restart us.
+      raise
     end
   end
 
   # Handle inbound subscribe or unsubscribe message.
   def handle_message ws, event
     begin
-      # Parse event data as JSON
-      p = (Oj.load event.data).symbolize_keys
+      begin
+        # Parse event data as JSON
+        p = (Oj.load event.data).symbolize_keys
+        filter = Filter.new(p)
+      rescue Oj::Error => e
+        ws.send ({status: 400, message: "malformed request"}.to_json)
+        return
+      end
 
       if p[:method] == 'subscribe'
         # Handle subscribe event
@@ -162,7 +176,7 @@ class EventBus
         if ws.filters.length < MAX_FILTERS
           # Add a filter.  This gets the :filters field which is the same
           # format as used for regular index queries.
-          ws.filters << Filter.new(p)
+          ws.filters << filter
           ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
 
           # Send any pending events
@@ -185,8 +199,6 @@ class EventBus
       else
         ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
       end
-    rescue Oj::Error => e
-      ws.send ({status: 400, message: "malformed request"}.to_json)
     rescue => e
       Rails.logger.warn "Error handling message: #{$!}"
       Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
@@ -252,9 +264,6 @@ class EventBus
                   @channel.push payload.to_i
                 end
               end
-            rescue NoMemoryError
-              EventMachine::stop_event_loop
-              abort "Out of memory"
             ensure
               # Don't want the connection to still be listening once we return
               # it to the pool - could result in weird behavior for the next
diff --git a/services/api/lib/simulate_job_log.rb b/services/api/lib/simulate_job_log.rb
new file mode 100644 (file)
index 0000000..fc124c8
--- /dev/null
@@ -0,0 +1,49 @@
+module SimulateJobLog
+  def replay(filename, multiplier = 1, simulated_job_uuid = nil)
+    raise "Environment must be development or test" unless [ 'test', 'development' ].include? ENV['RAILS_ENV']
+
+    multiplier = multiplier.to_f
+    multiplier = 1.0 if multiplier <= 0
+
+    actual_start_time = Time.now
+    log_start_time = nil
+
+    act_as_system_user do
+      File.open(filename).each.with_index do |line, index|
+        cols = {}
+        cols[:timestamp], rest_of_line = line.split(' ', 2)
+        begin
+          cols[:timestamp] = Time.strptime( cols[:timestamp], "%Y-%m-%d_%H:%M:%S" )
+        rescue ArgumentError
+          if line =~ /^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (.*)/
+            # Wed Nov 19 07:12:39 2014
+            cols[:timestamp] = Time.strptime( $1, "%a %b %d %H:%M:%S %Y" )
+            rest_of_line = $2
+          else
+              STDERR.puts "Ignoring log line because of unknown time format: #{line}"
+          end
+        end
+        cols[:job_uuid], cols[:pid], cols[:task], cols[:event_type], cols[:message] = rest_of_line.split(' ', 5)
+        # Override job uuid with a simulated one if specified
+        cols[:job_uuid] = simulated_job_uuid || cols[:job_uuid]
+        # determine when we want to simulate this log being created, based on the time multiplier
+        log_start_time = cols[:timestamp] if log_start_time.nil?
+        log_time = cols[:timestamp]
+        actual_elapsed_time = Time.now - actual_start_time
+        log_elapsed_time = log_time - log_start_time
+        modified_elapsed_time = log_elapsed_time / multiplier
+        pause_time = modified_elapsed_time - actual_elapsed_time
+        sleep pause_time if pause_time > 0
+        # output log entry for debugging and create it in the current environment's database
+        puts "#{index} #{cols.to_yaml}\n"
+        Log.new({
+          event_at:    Time.zone.local_to_utc(cols[:timestamp]),
+          object_uuid: cols[:job_uuid],
+          event_type:  cols[:event_type],
+          properties:  { 'text' => line }
+        }).save!
+      end
+    end
+
+  end
+end
diff --git a/services/api/lib/tasks/replay_job_log.rake b/services/api/lib/tasks/replay_job_log.rake
new file mode 100644 (file)
index 0000000..14aa3be
--- /dev/null
@@ -0,0 +1,7 @@
+require 'simulate_job_log'
+desc 'Simulate job logging from a file. Three arguments: log filename, time multipler (optional), simulated job uuid (optional). E.g. (use quotation marks if using spaces between args): rake "replay_job_log[log.txt, 2.0, qr1hi-8i9sb-nf3qk0xzwwz3lre]"'
+task :replay_job_log, [:filename, :multiplier, :uuid] => :environment do |t, args|
+  include SimulateJobLog
+  abort("No filename specified.") if args[:filename].blank?
+  replay( args[:filename], args[:multiplier].to_f, args[:uuid] )
+end
index ebd5165669f3d396bfa35e266a2e217ef4285ee5..ab4f70e60bd1115a87a47eeed64a544859bf49fd 100755 (executable)
@@ -44,19 +44,47 @@ require File.dirname(__FILE__) + '/../config/boot'
 require File.dirname(__FILE__) + '/../config/environment'
 require 'open3'
 
+class LogTime < Time
+  def to_s
+    self.utc.strftime "%Y-%m-%d_%H:%M:%S"
+  end
+end
+
 class Dispatcher
   include ApplicationHelper
 
+  def initialize
+    @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
+    if @crunch_job_bin.empty?
+      raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
+    end
+
+    @arvados_internal = Rails.configuration.git_internal_dir
+    if not File.exists? @arvados_internal
+      $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
+      raise "No internal git repository available" unless ($? == 0)
+    end
+
+    @repo_root = Rails.configuration.git_repositories_dir
+    @authorizations = {}
+    @did_recently = {}
+    @fetched_commits = {}
+    @git_tags = {}
+    @node_state = {}
+    @pipe_auth_tokens = {}
+    @running = {}
+    @todo = []
+    @todo_pipelines = []
+  end
+
   def sysuser
     return act_as_system_user
   end
 
   def refresh_todo
-    @todo = []
     if $options[:jobs]
       @todo = Job.queue.select(&:repository)
     end
-    @todo_pipelines = []
     if $options[:pipelines]
       @todo_pipelines = PipelineInstance.queue
     end
@@ -108,7 +136,6 @@ class Dispatcher
 
   def update_node_status
     return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
-    @node_state ||= {}
     slurm_status.each_pair do |hostname, slurmdata|
       next if @node_state[hostname] == slurmdata
       begin
@@ -217,6 +244,103 @@ class Dispatcher
     end
   end
 
+  def stdout_s(cmd_a, opts={})
+    IO.popen(cmd_a, "r", opts) do |pipe|
+      return pipe.read.chomp
+    end
+  end
+
+  def git_cmd(*cmd_a)
+    ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
+  end
+
+  def get_authorization(job)
+    if @authorizations[job.uuid] and
+        @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
+      # We already made a token for this job, but we need a new one
+      # because modified_by_user_uuid has changed (the job will run
+      # as a different user).
+      @authorizations[job.uuid].update_attributes expires_at: Time.now
+      @authorizations[job.uuid] = nil
+    end
+    if not @authorizations[job.uuid]
+      auth = ApiClientAuthorization.
+        new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
+            api_client_id: 0)
+      if not auth.save
+        $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
+      else
+        @authorizations[job.uuid] = auth
+      end
+    end
+    @authorizations[job.uuid]
+  end
+
+  def get_commit(repo_name, commit_hash)
+    # @fetched_commits[V]==true if we know commit V exists in the
+    # arvados_internal git repository.
+    if !@fetched_commits[commit_hash]
+      src_repo = File.join(@repo_root, "#{repo_name}.git")
+      if not File.exists? src_repo
+        src_repo = File.join(@repo_root, repo_name, '.git')
+        if not File.exists? src_repo
+          fail_job job, "No #{repo_name}.git or #{repo_name}/.git at #{@repo_root}"
+          return nil
+        end
+      end
+
+      # check if the commit needs to be fetched or not
+      commit_rev = stdout_s(git_cmd("rev-list", "-n1", commit_hash),
+                            err: "/dev/null")
+      unless $? == 0 and commit_rev == commit_hash
+        # commit does not exist in internal repository, so import the source repository using git fetch-pack
+        cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
+        $stderr.puts "dispatch: #{cmd}"
+        $stderr.puts(stdout_s(cmd))
+        unless $? == 0
+          fail_job job, "git fetch-pack failed"
+          return nil
+        end
+      end
+      @fetched_commits[commit_hash] = true
+    end
+    @fetched_commits[commit_hash]
+  end
+
+  def tag_commit(commit_hash, tag_name)
+    # @git_tags[T]==V if we know commit V has been tagged T in the
+    # arvados_internal repository.
+    if not @git_tags[tag_name]
+      cmd = git_cmd("tag", tag_name, commit_hash)
+      $stderr.puts "dispatch: #{cmd}"
+      $stderr.puts(stdout_s(cmd, err: "/dev/null"))
+      unless $? == 0
+        # git tag failed.  This may be because the tag already exists, so check for that.
+        tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
+        if $? == 0
+          # We got a revision back
+          if tag_rev != commit_hash
+            # Uh oh, the tag doesn't point to the revision we were expecting.
+            # Someone has been monkeying with the job record and/or git.
+            fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
+            return nil
+          end
+          # we're okay (fall through to setting @git_tags below)
+        else
+          # git rev-list failed for some reason.
+          fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
+          return nil
+        end
+      end
+      # 'git tag' was successful, or there is an existing tag that points to the same revision.
+      @git_tags[tag_name] = commit_hash
+    elsif @git_tags[tag_name] != commit_hash
+      fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
+      return nil
+    end
+    @git_tags[tag_name]
+  end
+
   def start_jobs
     @todo.each do |job|
       next if @running[job.uuid]
@@ -259,108 +383,19 @@ class Dispatcher
                          "GEM_PATH=#{ENV['GEM_PATH']}")
       end
 
-      @authorizations ||= {}
-      if @authorizations[job.uuid] and
-          @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
-        # We already made a token for this job, but we need a new one
-        # because modified_by_user_uuid has changed (the job will run
-        # as a different user).
-        @authorizations[job.uuid].update_attributes expires_at: Time.now
-        @authorizations[job.uuid] = nil
-      end
-      if not @authorizations[job.uuid]
-        auth = ApiClientAuthorization.
-          new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
-              api_client_id: 0)
-        if not auth.save
-          $stderr.puts "dispatch: auth.save failed"
-          next
-        end
-        @authorizations[job.uuid] = auth
-      end
-
-      crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
-      if crunch_job_bin == ''
-        raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
-      end
-
-      arvados_internal = Rails.configuration.git_internal_dir
-      if not File.exists? arvados_internal
-        $stderr.puts `mkdir -p #{arvados_internal.shellescape} && cd #{arvados_internal.shellescape} && git init --bare`
-      end
-
-      git = "git --git-dir=#{arvados_internal.shellescape}"
-
-      # @fetched_commits[V]==true if we know commit V exists in the
-      # arvados_internal git repository.
-      @fetched_commits ||= {}
-      if !@fetched_commits[job.script_version]
-
-        repo_root = Rails.configuration.git_repositories_dir
-        src_repo = File.join(repo_root, job.repository + '.git')
-        if not File.exists? src_repo
-          src_repo = File.join(repo_root, job.repository, '.git')
-          if not File.exists? src_repo
-            fail_job job, "No #{job.repository}.git or #{job.repository}/.git at #{repo_root}"
-            next
-          end
-        end
-
-        # check if the commit needs to be fetched or not
-        commit_rev = `#{git} rev-list -n1 #{job.script_version.shellescape} 2>/dev/null`.chomp
-        unless $? == 0 and commit_rev == job.script_version
-          # commit does not exist in internal repository, so import the source repository using git fetch-pack
-          cmd = "#{git} fetch-pack --no-progress --all #{src_repo.shellescape}"
-          $stderr.puts "dispatch: #{cmd}"
-          $stderr.puts `#{cmd}`
-          unless $? == 0
-            fail_job job, "git fetch-pack failed"
-            next
-          end
-        end
-        @fetched_commits[job.script_version] = true
-      end
-
-      # @job_tags[J]==V if we know commit V has been tagged J in the
-      # arvados_internal repository. (J is a job UUID, V is a commit
-      # sha1.)
-      @job_tags ||= {}
-      if not @job_tags[job.uuid]
-        cmd = "#{git} tag #{job.uuid.shellescape} #{job.script_version.shellescape} 2>/dev/null"
-        $stderr.puts "dispatch: #{cmd}"
-        $stderr.puts `#{cmd}`
-        unless $? == 0
-          # git tag failed.  This may be because the tag already exists, so check for that.
-          tag_rev = `#{git} rev-list -n1 #{job.uuid.shellescape}`.chomp
-          if $? == 0
-            # We got a revision back
-            if tag_rev != job.script_version
-              # Uh oh, the tag doesn't point to the revision we were expecting.
-              # Someone has been monkeying with the job record and/or git.
-              fail_job job, "Existing tag #{job.uuid} points to commit #{tag_rev} but expected commit #{job.script_version}"
-              next
-            end
-            # we're okay (fall through to setting @job_tags below)
-          else
-            # git rev-list failed for some reason.
-            fail_job job, "'git tag' for #{job.uuid} failed but did not find any existing tag using 'git rev-list'"
-            next
-          end
-        end
-        # 'git tag' was successful, or there is an existing tag that points to the same revision.
-        @job_tags[job.uuid] = job.script_version
-      elsif @job_tags[job.uuid] != job.script_version
-        fail_job job, "Existing tag #{job.uuid} points to commit #{@job_tags[job.uuid]} but this job uses commit #{job.script_version}"
-        next
+      ready = (get_authorization(job) and
+               get_commit(job.repository, job.script_version) and
+               tag_commit(job.script_version, job.uuid))
+      if ready and job.arvados_sdk_version
+        ready = (get_commit("arvados", job.arvados_sdk_version) and
+                 tag_commit(job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"))
       end
+      next unless ready
 
-      cmd_args << crunch_job_bin
-      cmd_args << '--job-api-token'
-      cmd_args << @authorizations[job.uuid].api_token
-      cmd_args << '--job'
-      cmd_args << job.uuid
-      cmd_args << '--git-dir'
-      cmd_args << arvados_internal
+      cmd_args += [@crunch_job_bin,
+                   '--job-api-token', @authorizations[job.uuid].api_token,
+                   '--job', job.uuid,
+                   '--git-dir', @arvados_internal]
 
       $stderr.puts "dispatch: #{cmd_args.join ' '}"
 
@@ -373,7 +408,7 @@ class Dispatcher
       end
 
       $stderr.puts "dispatch: job #{job.uuid}"
-      start_banner = "dispatch: child #{t.pid} start #{Time.now.ctime.to_s}"
+      start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
       $stderr.puts start_banner
 
       @running[job.uuid] = {
@@ -458,7 +493,7 @@ class Dispatcher
         if j[:log_throttle_bytes_skipped] > 0
           message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
           $stderr.puts message
-          j[:stderr_buf_to_flush] << "#{Time.now.ctime.to_s} #{message}\n"
+          j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
         end
 
         j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
@@ -526,7 +561,7 @@ class Dispatcher
           if rate_limit j, line
             $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
             $stderr.puts line
-            pub_msg = "#{Time.now.ctime.to_s} #{line.strip}\n"
+            pub_msg = "#{LogTime.now} #{line.strip}\n"
             j[:stderr_buf_to_flush] << pub_msg
           end
         end
@@ -652,8 +687,6 @@ class Dispatcher
 
   def run
     act_as_system_user
-    @running ||= {}
-    @pipe_auth_tokens ||= { }
     $stderr.puts "dispatch: ready"
     while !$signal[:term] or @running.size > 0
       read_pipes
@@ -688,7 +721,6 @@ class Dispatcher
   protected
 
   def did_recently(thing, min_interval)
-    @did_recently ||= {}
     if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
       @did_recently[thing] = Time.now
       false
index 54329b0e933b57bc430842cc05f34ecabf8e87e8..0b4d8747ea35bdd66b66182215d821058b917dfe 100644 (file)
@@ -49,6 +49,12 @@ project_viewer:
   api_token: projectviewertoken1234567890abcdefghijklmnopqrstuv
   expires_at: 2038-01-01 00:00:00
 
+project_viewer_trustedclient:
+  api_client: trusted_workbench
+  user: project_viewer
+  api_token: projectviewertrustedtoken1234567890abcdefghijklmno
+  expires_at: 2038-01-01 00:00:00
+
 subproject_admin:
   api_client: untrusted
   user: subproject_admin
index f4614492f202e74d4aba34114c25bb647eed8c70..f28606a09b445e21d39d13238113e69410a47346 100644 (file)
@@ -336,6 +336,36 @@ collection_in_fuse_project:
   manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
   name: "collection in FUSE project"
 
+collection_with_no_name_in_aproject:
+  uuid: zzzzz-4zz18-00000nonamecoll
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+collection_to_search_for_in_aproject:
+  uuid: zzzzz-4zz18-abcd6fx123409f7
+  portable_data_hash: 5bd9c1ad0bc8c7f34be170a7b7b39089+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". juku76584cc2f85cedef654fjyhtgimh+3 0:3:foo\n"
+  name: "zzzzz-4zz18-abcd6fx123409f7 used to search with any"
+
+upload_sandbox:
+  uuid: zzzzz-4zz18-js48y3ykkfdfjd3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-09 15:03:16
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-09 15:03:16
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  updated_at: 2014-12-09 15:03:16
+  manifest_text: ''
+  name: upload sandbox
+
 # Test Helper trims the rest of the file
 
 # Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
index 6349a8287e4d75c5e04b5851c4898c1e56358f0b..86815c04633f2ebac6d73cace66fb7f5e8306ee0 100644 (file)
@@ -47,8 +47,10 @@ empty_lonely_group:
 
 all_users:
   uuid: zzzzz-j7d0g-fffffffffffffff
-  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  owner_uuid: zzzzz-tpzed-000000000000000
   name: All users
+  description: All users
+  group_class: role
 
 testusergroup_admins:
   uuid: zzzzz-j7d0g-48foin4vonvc2at
@@ -137,6 +139,18 @@ group_for_sharing_tests:
   description: Users who can share objects with each other
   group_class: role
 
+empty_project:
+  uuid: zzzzz-j7d0g-9otoxmrksam74q6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-16 15:56:27.967534940 Z
+  modified_by_client_uuid: ~
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-16 15:56:27.967358199 Z
+  name: Empty project
+  description: ~
+  updated_at: 2014-12-16 15:56:27.967242142 Z
+  group_class: project
+
 project_with_10_collections:
   uuid: zzzzz-j7d0g-0010collections
   owner_uuid: zzzzz-tpzed-user1withloadab
index ebf455aa5778b49da9f688a3e3b65d85a5751ab6..c04aa47d2f34f779b8cdbaf3d0a196a499b00e27 100644 (file)
@@ -1,5 +1,21 @@
 # Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
 
+system_user:
+  uuid: zzzzz-tpzed-000000000000000
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-11-27 06:38:21.215463000 Z
+  modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-11-27 06:38:21.208036000 Z
+  email: root
+  first_name: root
+  last_name: ''
+  identity_url:
+  is_admin: true
+  prefs: {}
+  updated_at: 2014-11-27 06:38:21.207873000 Z
+  is_active: true
+
 admin:
   owner_uuid: zzzzz-tpzed-000000000000000
   uuid: zzzzz-tpzed-d9tiejq69daie8f
@@ -144,7 +160,10 @@ inactive_but_signed_user_agreement:
   identity_url: https://inactive-but-agreeable-user.openid.local
   is_active: false
   is_admin: false
-  prefs: {}
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
 
 anonymous:
   owner_uuid: zzzzz-tpzed-000000000000000
index 4144d0a922eb43ade4373cc2d64c54bd044a6504..3a4a244edd9cc1a57e89230c5bea17b97ba322bd 100644 (file)
@@ -46,4 +46,48 @@ class ApplicationControllerTest < ActionController::TestCase
     assert_response 422
     check_error_token
   end
+
+  ['foo', '', 'FALSE', 'TRUE', nil, [true], {a:true}, '"true"'].each do |bogus|
+    test "bogus boolean parameter #{bogus.inspect} returns error" do
+      @controller = Arvados::V1::GroupsController.new
+      authorize_with :active
+      post :create, {
+        group: {},
+        ensure_unique_name: bogus
+      }
+      assert_response 422
+      assert_match(/parameter must be a boolean/, json_response['errors'].first,
+                   'Helpful error message not found')
+    end
+  end
+
+  [[true, [true, 'true', 1, '1']],
+   [false, [false, 'false', 0, '0']]].each do |bool, boolparams|
+    boolparams.each do |boolparam|
+      # Ensure boolparam is acceptable as a boolean
+      test "boolean parameter #{boolparam.inspect} acceptable" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {},
+          ensure_unique_name: boolparam
+        }
+        assert_response :success
+      end
+
+      # Ensure boolparam is acceptable as the _intended_ boolean
+      test "boolean parameter #{boolparam.inspect} accepted as #{bool.inspect}" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {
+            name: groups(:aproject).name,
+            owner_uuid: groups(:aproject).owner_uuid
+          },
+          ensure_unique_name: boolparam
+        }
+        assert_response (bool ? :success : 422)
+      end
+    end
+  end
 end
index 0862176050068a787b5c4f7b216005f5ab109447..269474a5f7f58872b90ba1a1f393276d46fd3cc1 100644 (file)
@@ -347,12 +347,12 @@ EOS
   test "search collections with 'any' operator" do
     authorize_with :active
     get :index, {
-      where: { any: ['contains', '7f9102c395f4ffc5e3'] }
+      where: { any: ['contains', 'd0bc8c7f34be170a7b7b'] }
     }
     assert_response :success
     found = assigns(:objects).collect(&:portable_data_hash)
-    assert_equal 2, found.count
-    assert_equal true, !!found.index('1f4b0bc7583c2a7f9102c395f4ffc5e3+45')
+    assert_equal 1, found.count
+    assert_equal true, !!found.index('5bd9c1ad0bc8c7f34be170a7b7b39089+45')
   end
 
   [false, true].each do |permit_unsigned|
index a3eebf3ed23111e28a51ded2763396b6ec5d428f..9b66851d7e0dc8885876b0b7b179c7e063782ed8 100644 (file)
@@ -90,24 +90,26 @@ class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
     assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
   end
 
-  test "do not reuse job because find_or_create=false" do
-    post :create, {
-      job: {
-        script: "hash",
-        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
-        repository: "foo",
-        script_parameters: {
-          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
-          an_integer: '1'
-        }
-      },
-      find_or_create: false
-    }
-    assert_response :success
-    assert_not_nil assigns(:object)
-    new_job = JSON.parse(@response.body)
-    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
-    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  [false, "false"].each do |whichfalse|
+    test "do not reuse job because find_or_create=#{whichfalse.inspect}" do
+      post :create, {
+        job: {
+          script: "hash",
+          script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+          repository: "foo",
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+            an_integer: '1'
+          }
+        },
+        find_or_create: whichfalse
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      new_job = JSON.parse(@response.body)
+      assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+      assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+    end
   end
 
   test "do not reuse job because output is not readable by user" do
@@ -671,7 +673,10 @@ class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
   test "can't reuse job with older Arvados SDK version" do
     params = {
       script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
-      runtime_constraints: {"arvados_sdk_version" => "master"},
+      runtime_constraints: {
+        "arvados_sdk_version" => "master",
+        "docker_image" => links(:docker_image_collection_tag).name,
+      },
     }
     check_new_job_created_from(job: params)
   end
index ea7b5b765a9ba25a26e0ec5899b6f8d0640492dc..82067b293b76939eed312b77df922a6aac3b4560 100644 (file)
@@ -2,12 +2,14 @@ require 'test_helper'
 
 class Arvados::V1::KeepDisksControllerTest < ActionController::TestCase
 
+  def default_ping_opts
+    {ping_secret: '', service_ssl_flag: false, service_port: 1234}
+  end
+
   test "add keep disk with admin token" do
     authorize_with :admin
-    post :ping, {
-      ping_secret: '',          # required by discovery doc, but ignored
-      filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f'
-    }
+    post :ping, default_ping_opts.
+      merge(filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f')
     assert_response :success
     assert_not_nil assigns(:object)
     new_keep_disk = JSON.parse(@response.body)
@@ -17,30 +19,27 @@ class Arvados::V1::KeepDisksControllerTest < ActionController::TestCase
   end
 
   [
-    {ping_secret: ''},
-    {ping_secret: '', filesystem_uuid: ''},
+    {},
+    {filesystem_uuid: ''},
   ].each do |opts|
-    test "add keep disk with no filesystem_uuid #{opts}" do
+    test "add keep disk with[out] filesystem_uuid #{opts}" do
       authorize_with :admin
-      post :ping, opts
+      post :ping, default_ping_opts.merge(opts)
       assert_response :success
       assert_not_nil JSON.parse(@response.body)['uuid']
     end
   end
 
   test "refuse to add keep disk without admin token" do
-    post :ping, {
-      ping_secret: '',
-    }
+    post :ping, default_ping_opts
     assert_response 404
   end
 
   test "ping keep disk" do
-    post :ping, {
-      id: keep_disks(:nonfull).uuid,
-      ping_secret: keep_disks(:nonfull).ping_secret,
-      filesystem_uuid: keep_disks(:nonfull).filesystem_uuid
-    }
+    post :ping, default_ping_opts.
+      merge(id: keep_disks(:nonfull).uuid,
+            ping_secret: keep_disks(:nonfull).ping_secret,
+            filesystem_uuid: keep_disks(:nonfull).filesystem_uuid)
     assert_response :success
     assert_not_nil assigns(:object)
     keep_disk = JSON.parse(@response.body)
index 7515e49c8f9e81a5cbf3d7ce270d749fc3ec8041..9bf1b0bab1ccdf37f7e5fe4ec63192ef5f475b76 100644 (file)
@@ -137,7 +137,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
   end
 
   test "filter links with 'is_a' operator with more than one" do
@@ -148,7 +148,10 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-(tpzed|j7d0g)-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f|
+                                 f.tail_uuid.match User.uuid_regex or
+                                 f.tail_uuid.match Group.uuid_regex
+                               }).count
   end
 
   test "filter links with 'is_a' operator with bogus type" do
@@ -169,7 +172,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.head_uuid.match /.....-4zz18-.............../}).count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match Collection.uuid_regex}).count
   end
 
   test "test can still use where tail_kind" do
@@ -180,7 +183,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
   end
 
   test "test can still use where head_kind" do
@@ -191,7 +194,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.head_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
   end
 
   test "test can still use filter tail_kind" do
@@ -202,7 +205,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
   end
 
   test "test can still use filter head_kind" do
@@ -213,7 +216,7 @@ class Arvados::V1::LinksControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.head_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
   end
 
   test "head_kind matches head_uuid" do
index f3826ca8c733a32b20497d301f2baa58769d2671..475e7d6672f372c5827644a8ddd43748f95e80f1 100644 (file)
@@ -29,7 +29,7 @@ class Arvados::V1::LogsControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.object_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
     l = JSON.parse(@response.body)
     assert_equal 'arvados#user', l['items'][0]['object_kind']
   end
@@ -42,7 +42,7 @@ class Arvados::V1::LogsControllerTest < ActionController::TestCase
     assert_response :success
     found = assigns(:objects)
     assert_not_equal 0, found.count
-    assert_equal found.count, (found.select { |f| f.object_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
   end
 
 end
index d74450587dc222524a73c7a604a59643bdd6e826..7ea231eecb9f28a35d8ccf67db2727f33446ff84 100644 (file)
@@ -108,6 +108,14 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase
     assert_nil(json_response["job"], "spectator can see node's assigned job")
   end
 
+  [:admin, :spectator].each do |user|
+    test "select param does not break node list for #{user}" do
+      authorize_with user
+      get :index, {select: ['domain']}
+      assert_response :success
+    end
+  end
+
   test "admin can associate a job with a node" do
     changed_node = nodes(:idle)
     assigned_job = jobs(:queued)
diff --git a/services/api/test/functional/database_controller_test.rb b/services/api/test/functional/database_controller_test.rb
new file mode 100644 (file)
index 0000000..4bda0d0
--- /dev/null
@@ -0,0 +1,47 @@
+require 'test_helper'
+
+class DatabaseControllerTest < ActionController::TestCase
+  include CurrentApiClient
+
+  test "reset fails with non-admin token" do
+    authorize_with :active
+    post :reset
+    assert_response 403
+  end
+
+  test "route not found when not in test mode" do
+    authorize_with :admin
+    env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      assert_raises ActionController::RoutingError do
+        post :reset
+      end
+    ensure
+      Rails.env = env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails when a non-test-fixture user exists" do
+    act_as_system_user do
+      User.create!(uuid: 'abcde-tpzed-123451234512345', email: 'bar@example.net')
+    end
+    authorize_with :admin
+    post :reset
+    assert_response 403
+  end
+
+  test "reset succeeds with admin token" do
+    new_uuid = nil
+    act_as_system_user do
+      new_uuid = Specimen.create.uuid
+    end
+    assert_not_empty Specimen.where(uuid: new_uuid)
+    authorize_with :admin
+    post :reset
+    assert_response 200
+    assert_empty Specimen.where(uuid: new_uuid)
+  end
+end
diff --git a/services/api/test/integration/cross_origin_test.rb b/services/api/test/integration/cross_origin_test.rb
new file mode 100644 (file)
index 0000000..ebe7ce7
--- /dev/null
@@ -0,0 +1,76 @@
+require 'test_helper'
+
+class CrossOriginTest < ActionDispatch::IntegrationTest
+  def options *args
+    # Rails doesn't support OPTIONS the same way as GET, POST, etc.
+    reset! unless integration_session
+    integration_session.__send__(:process, :options, *args).tap do
+      copy_session_variables!
+    end
+  end
+
+  %w(/login /logout /auth/example/callback /auth/joshid).each do |path|
+    test "OPTIONS requests are refused at #{path}" do
+      options path, {}, {}
+      assert_no_cors_headers
+    end
+
+    test "CORS headers do not exist at GET #{path}" do
+      get path, {}, {}
+      assert_no_cors_headers
+    end
+  end
+
+  %w(/discovery/v1/apis/arvados/v1/rest).each do |path|
+    test "CORS headers are set at GET #{path}" do
+      get path, {}, {}
+      assert_response :success
+      assert_cors_headers
+    end
+  end
+
+  ['/arvados/v1/collections',
+   '/arvados/v1/users',
+   '/arvados/v1/api_client_authorizations'].each do |path|
+    test "CORS headers are set and body is stub at OPTIONS #{path}" do
+      options path, {}, {}
+      assert_response :success
+      assert_cors_headers
+      assert_equal '-', response.body
+    end
+
+    test "CORS headers are set at authenticated GET #{path}" do
+      get path, {}, auth(:active_trustedclient)
+      assert_response :success
+      assert_cors_headers
+    end
+
+    # CORS headers are OK only if cookies are *not* used to determine
+    # whether a transaction is allowed. The following is a (far from
+    # perfect) test that the usual Rails cookie->session mechanism
+    # does not grant access to any resources.
+    ['GET', 'POST'].each do |method|
+      test "Session does not work at #{method} #{path}" do
+        send method.downcase, path, {format: 'json'}, {user_id: 1}
+        assert_response 401
+        assert_cors_headers
+      end
+    end
+  end
+
+  protected
+  def assert_cors_headers
+    assert_equal '*', response.headers['Access-Control-Allow-Origin']
+    allowed = response.headers['Access-Control-Allow-Methods'].split(', ')
+    %w(GET HEAD POST PUT DELETE).each do |m|
+      assert_includes allowed, m, "A-C-A-Methods should include #{m}"
+    end
+    assert_equal 'Authorization', response.headers['Access-Control-Allow-Headers']
+  end
+
+  def assert_no_cors_headers
+    response.headers.keys.each do |h|
+      assert_no_match /^Access-Control-/i, h
+    end
+  end
+end
diff --git a/services/api/test/integration/database_reset_test.rb b/services/api/test/integration/database_reset_test.rb
new file mode 100644 (file)
index 0000000..58f2abf
--- /dev/null
@@ -0,0 +1,75 @@
+require 'test_helper'
+
+class DatabaseResetTest < ActionDispatch::IntegrationTest
+  self.use_transactional_fixtures = false
+
+  test "reset fails when Rails.env != 'test'" do
+    rails_env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      post '/database/reset', {}, auth(:admin)
+      assert_response 404
+    ensure
+      Rails.env = rails_env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails with non-admin token" do
+    post '/database/reset', {}, auth(:active)
+    assert_response 403
+  end
+
+  test "database reset doesn't break basic CRUD operations" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    put('/arvados/v1/specimens/'+new_uuid,
+        {specimen: '{"properties":{}}'}, active_auth)
+    assert_response :success
+
+    delete '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+  end
+
+  test "roll back database change" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    old_uuid = specimens(:owned_by_active_user).uuid
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    delete '/arvados/v1/specimens/' + old_uuid, {}, active_auth
+    assert_response :success
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    # Reset to fixtures.
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    # New specimen should disappear. Old specimen should reappear.
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+    get '/arvados/v1/specimens/'+old_uuid, {}, active_auth
+    assert_response :success
+  end
+end
index b2ef13e1d3bdc2c69932729a0fa0b382ef357547..984f81fe51ea796f4a4c83064743ce4247221773 100644 (file)
@@ -19,7 +19,7 @@ class ErrorsTest < ActionDispatch::IntegrationTest
       # Generally, new routes should appear under /arvados/v1/. If
       # they appear elsewhere, that might have been caused by default
       # rails generator behavior that we don't want.
-      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|discovery\/.*|static\/.*|themes\/.*)(\(\.:format\))?$/,
+      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|database\/reset|discovery\/.*|static\/.*|themes\/.*)(\(\.:format\))?$/,
                    route.path.spec.to_s,
                    "Unexpected new route: #{route.path.spec}")
     end
diff --git a/services/api/test/job_logs/crunchstatshort.log b/services/api/test/job_logs/crunchstatshort.log
new file mode 100644 (file)
index 0000000..7b39318
--- /dev/null
@@ -0,0 +1 @@
+2014-11-07_23:33:51 qr1hi-8i9sb-nf3qk0xzwwz3lre 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys
index d59b44926fa88a857ce10b34eaca1b4a5d3cc769..216dd2d02d9ebaf3ab7b9fcd4345ad05bd374c9f 100644 (file)
@@ -49,6 +49,10 @@ class ActiveSupport::TestCase
     Thread.current[:api_client_uuid] = nil
     Thread.current[:api_client] = nil
     Thread.current[:user] = nil
+    restore_configuration
+  end
+
+  def restore_configuration
     # Restore configuration settings changed during tests
     $application_config.each do |k,v|
       if k.match /^[^.]*$/
index e47aa3b8921fc067a8ea5d24c0b7f59eaa231853..8c7576478912199371360b43154e3318480f49dc 100644 (file)
@@ -86,4 +86,27 @@ class ArvadosModelTest < ActiveSupport::TestCase
                    properties: {'foo' => 'bar'}.with_indifferent_access)
     end
   end
+
+  [['uuid', {unique: true}],
+   ['owner_uuid', {}]].each do |the_column, requires|
+    test "unique index on all models with #{the_column}" do
+      checked = 0
+      ActiveRecord::Base.connection.tables.each do |table|
+        columns = ActiveRecord::Base.connection.columns(table)
+
+        next unless columns.collect(&:name).include? the_column
+
+        indexes = ActiveRecord::Base.connection.indexes(table).reject do |index|
+          requires.map do |key, val|
+            index.send(key) == val
+          end.include? false
+        end
+        assert_includes indexes.collect(&:columns), [the_column], 'no index'
+        checked += 1
+      end
+      # Sanity check: make sure we didn't just systematically miss everything.
+      assert_operator(10, :<, checked,
+                      "Only #{checked} tables have a #{the_column}?!")
+    end
+  end
 end
index 65ca9357dc849966a5f87bee8188aaa1254852ca..24bc2600103b2589c578bad481f05e0b0b309867 100644 (file)
@@ -189,7 +189,6 @@ class JobTest < ActiveSupport::TestCase
   ].each do |parameters|
     test "verify job status #{parameters}" do
       job = Job.create! job_attrs
-      assert job.valid?, job.errors.full_messages.to_s
       assert_equal 'Queued', job.state, "job.state"
 
       parameters.each do |parameter|
@@ -280,11 +279,9 @@ class JobTest < ActiveSupport::TestCase
 
   test "verify job queue position" do
     job1 = Job.create! job_attrs
-    assert job1.valid?, job1.errors.full_messages.to_s
     assert_equal 'Queued', job1.state, "Incorrect job state for newly created job1"
 
     job2 = Job.create! job_attrs
-    assert job2.valid?, job2.errors.full_messages.to_s
     assert_equal 'Queued', job2.state, "Incorrect job state for newly created job2"
 
     assert_not_nil job1.queue_position, "Expected non-nil queue position for job1"
@@ -296,7 +293,10 @@ class JobTest < ActiveSupport::TestCase
   SDK_TAGGED = "00634b2b8a492d6f121e3cf1d6587b821136a9a7"
 
   def sdk_constraint(version)
-    {runtime_constraints: {"arvados_sdk_version" => version}}
+    {runtime_constraints: {
+        "arvados_sdk_version" => version,
+        "docker_image" => links(:docker_image_collection_tag).name,
+      }}
   end
 
   def check_job_sdk_version(expected)
@@ -347,6 +347,23 @@ class JobTest < ActiveSupport::TestCase
     assert_nil(job.arvados_sdk_version)
   end
 
+  test "job with SDK constraint, without Docker image is invalid" do
+    sdk_attrs = sdk_constraint("master")
+    sdk_attrs[:runtime_constraints].delete("docker_image")
+    job = Job.create(job_attrs(sdk_attrs))
+    refute(job.valid?, "Job valid with SDK version, without Docker image")
+    sdk_errors = job.errors.messages[:arvados_sdk_version] || []
+    refute_empty(sdk_errors.grep(/\bDocker\b/),
+                 "no Job SDK errors mention that Docker is required")
+  end
+
+  test "invalid to clear Docker image constraint when SDK constraint exists" do
+    job = Job.create!(job_attrs(sdk_constraint("master")))
+    job.runtime_constraints.delete("docker_image")
+    refute(job.valid?,
+           "Job with SDK constraint valid after clearing Docker image")
+  end
+
   test "can't create job with SDK version assigned directly" do
     check_creation_prohibited(arvados_sdk_version: SDK_MASTER)
   end
@@ -354,4 +371,33 @@ class JobTest < ActiveSupport::TestCase
   test "can't modify job to assign SDK version directly" do
     check_modification_prohibited(arvados_sdk_version: SDK_MASTER)
   end
+
+  test "job validation fails when collection uuid found in script_parameters" do
+    bad_params = {
+      script_parameters: {
+        'input' => {
+          'param1' => 'the collection uuid zzzzz-4zz18-012345678901234'
+        }
+      }
+    }
+    assert_raises(ActiveRecord::RecordInvalid,
+                  "created job with a collection uuid in script_parameters") do
+      job = Job.create!(job_attrs(bad_params))
+    end
+  end
+
+  test "job validation succeeds when no collection uuid in script_parameters" do
+    good_params = {
+      script_parameters: {
+        'arg1' => 'foo',
+        'arg2' => [ 'bar', 'baz' ],
+        'arg3' => {
+          'a' => 1,
+          'b' => [2, 3, 4],
+        }
+      }
+    }
+    job = Job.create!(job_attrs(good_params))
+    assert job.valid?
+  end
 end
index 9154c827ca64c4a7f8389168e004e3d8078caf7f..80ad6b334392e0cb4c5cfda414acf7d84d880cda 100644 (file)
@@ -5,7 +5,6 @@
 import os
 import sys
 import llfuse
-from llfuse import FUSEError
 import errno
 import stat
 import threading
@@ -20,6 +19,8 @@ import time
 import _strptime
 import calendar
 import threading
+import itertools
+
 from arvados.util import portable_data_hash_pattern, uuid_pattern, collection_uuid_pattern, group_uuid_pattern, user_uuid_pattern, link_uuid_pattern
 
 _logger = logging.getLogger('arvados.arvados_fuse')
@@ -303,18 +304,14 @@ class CollectionDirectory(Directory):
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
-    def new_collection(self, new_collection_object):
+    def new_collection(self, new_collection_object, coll_reader):
         self.collection_object = new_collection_object
 
         if self.collection_object_file is not None:
             self.collection_object_file.update(self.collection_object)
 
         self.clear()
-        collection = arvados.CollectionReader(
-            self.collection_object["manifest_text"], self.api,
-            self.api.localkeep(), num_retries=self.num_retries)
-        collection.normalize()
-        for s in collection.all_streams():
+        for s in coll_reader.all_streams():
             cwd = self
             for part in s.name().split('/'):
                 if part != '' and part != '.':
@@ -331,33 +328,36 @@ class CollectionDirectory(Directory):
                 return True
 
             with llfuse.lock_released:
-                new_collection_object = self.api.collections().get(
-                    uuid=self.collection_locator
-                    ).execute(num_retries=self.num_retries)
+                coll_reader = arvados.CollectionReader(
+                    self.collection_locator, self.api, self.api.localkeep(),
+                    num_retries=self.num_retries)
+                new_collection_object = coll_reader.api_response() or {}
+                # If the Collection only exists in Keep, there will be no API
+                # response.  Fill in the fields we need.
+                if 'uuid' not in new_collection_object:
+                    new_collection_object['uuid'] = self.collection_locator
                 if "portable_data_hash" not in new_collection_object:
                     new_collection_object["portable_data_hash"] = new_collection_object["uuid"]
+                if 'manifest_text' not in new_collection_object:
+                    new_collection_object['manifest_text'] = coll_reader.manifest_text()
+                coll_reader.normalize()
             # end with llfuse.lock_released, re-acquire lock
 
             if self.collection_object is None or self.collection_object["portable_data_hash"] != new_collection_object["portable_data_hash"]:
-                self.new_collection(new_collection_object)
+                self.new_collection(new_collection_object, coll_reader)
 
             self.fresh()
             return True
-        except apiclient.errors.HttpError as e:
-            if e.resp.status == 404:
-                _logger.warn("arv-mount %s: not found", self.collection_locator)
-            else:
-                _logger.error("arv-mount %s: error", self.collection_locator)
-                _logger.exception(detail)
+        except apiclient.errors.NotFoundError:
+            _logger.exception("arv-mount %s: error", self.collection_locator)
         except arvados.errors.ArgumentError as detail:
             _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
             if self.collection_object is not None and "manifest_text" in self.collection_object:
                 _logger.warning("arv-mount manifest_text is: %s", self.collection_object["manifest_text"])
-        except Exception as detail:
-            _logger.error("arv-mount %s: error", self.collection_locator)
+        except Exception:
+            _logger.exception("arv-mount %s: error", self.collection_locator)
             if self.collection_object is not None and "manifest_text" in self.collection_object:
                 _logger.error("arv-mount manifest_text is: %s", self.collection_object["manifest_text"])
-            _logger.exception(detail)
         return False
 
     def __getitem__(self, item):
@@ -391,18 +391,8 @@ class MagicDirectory(Directory):
     to readdir().
     '''
 
-    def __init__(self, parent_inode, inodes, api, num_retries):
-        super(MagicDirectory, self).__init__(parent_inode)
-        self.inodes = inodes
-        self.api = api
-        self.num_retries = num_retries
-        # Have to defer creating readme_file because at this point we don't
-        # yet have an inode assigned.
-        self.readme_file = None
-
-    def create_readme(self):
-        if self.readme_file is None:
-            text = '''This directory provides access to Arvados collections as subdirectories listed
+    README_TEXT = '''
+This directory provides access to Arvados collections as subdirectories listed
 by uuid (in the form 'zzzzz-4zz18-1234567890abcde') or portable data hash (in
 the form '1234567890abcdefghijklmnopqrstuv+123').
 
@@ -410,13 +400,27 @@ Note that this directory will appear empty until you attempt to access a
 specific collection subdirectory (such as trying to 'cd' into it), at which
 point the collection will actually be looked up on the server and the directory
 will appear if it exists.
-'''
-            self.readme_file = self.inodes.add_entry(StringFile(self.inode, text, time.time()))
-            self._entries["README"] = self.readme_file
+'''.lstrip()
 
-    def __contains__(self, k):
-        self.create_readme()
+    def __init__(self, parent_inode, inodes, api, num_retries):
+        super(MagicDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+
+    def __setattr__(self, name, value):
+        super(MagicDirectory, self).__setattr__(name, value)
+        # When we're assigned an inode, add a README.
+        if ((name == 'inode') and (self.inode is not None) and
+              (not self._entries)):
+            self._entries['README'] = self.inodes.add_entry(
+                StringFile(self.inode, self.README_TEXT, time.time()))
+            # If we're the root directory, add an identical by_id subdirectory.
+            if self.inode == llfuse.ROOT_INODE:
+                self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
+                        self.inode, self.inodes, self.api, self.num_retries))
 
+    def __contains__(self, k):
         if k in self._entries:
             return True
 
@@ -435,10 +439,6 @@ will appear if it exists.
             _logger.debug('arv-mount exception keep %s', e)
             return False
 
-    def items(self):
-        self.create_readme()
-        return self._entries.items()
-
     def __getitem__(self, item):
         if item in self:
             return self._entries[item]
@@ -454,8 +454,8 @@ class RecursiveInvalidateDirectory(Directory):
             super(RecursiveInvalidateDirectory, self).invalidate()
             for a in self._entries:
                 self._entries[a].invalidate()
-        except Exception as e:
-            _logger.exception(e)
+        except Exception:
+            _logger.exception()
         finally:
             if self.inode == llfuse.ROOT_INODE:
                 llfuse.lock.release()
@@ -673,8 +673,8 @@ class SharedDirectory(Directory):
                        lambda i: i[0],
                        lambda a, i: a.uuid == i[1]['uuid'],
                        lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time))
-        except Exception as e:
-            _logger.exception(e)
+        except Exception:
+            _logger.exception()
 
 
 class FileHandle(object):
@@ -692,7 +692,7 @@ class Inodes(object):
 
     def __init__(self):
         self._entries = {}
-        self._counter = llfuse.ROOT_INODE
+        self._counter = itertools.count(llfuse.ROOT_INODE)
 
     def __getitem__(self, item):
         return self._entries[item]
@@ -710,9 +710,8 @@ class Inodes(object):
         return k in self._entries
 
     def add_entry(self, entry):
-        entry.inode = self._counter
+        entry.inode = next(self._counter)
         self._entries[entry.inode] = entry
-        self._counter += 1
         return entry
 
     def del_entry(self, entry):
@@ -839,8 +838,8 @@ class Operations(llfuse.Operations):
         except arvados.errors.NotFoundError as e:
             _logger.warning("Block not found: " + str(e))
             raise llfuse.FUSEError(errno.EIO)
-        except Exception as e:
-            _logger.exception(e)
+        except Exception:
+            _logger.exception()
             raise llfuse.FUSEError(errno.EIO)
 
     def release(self, fh):
index c6f9424fddd77f048a98314707788516a3a0f5ba..43b563aa1db312d5f9990d776fe866bfba2e3f82 100644 (file)
@@ -5,22 +5,27 @@ import subprocess
 import time
 
 from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
 
 SETUP_DIR = os.path.dirname(__file__)
 README = os.path.join(SETUP_DIR, 'README.rst')
 
-cmd_opts = {'egg_info': {}}
-try:
-    git_tags = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct %h', SETUP_DIR],
-        stderr=open('/dev/null','w')).split()
-    assert len(git_tags) == 2
-except (AssertionError, OSError, subprocess.CalledProcessError):
-    pass
-else:
-    git_tags[0] = time.strftime('%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-    cmd_opts['egg_info']['tag_build'] = '.{}.{}'.format(*git_tags)
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
 
 
 setup(name='arvados_fuse',
@@ -37,12 +42,12 @@ setup(name='arvados_fuse',
         'bin/arv-mount'
         ],
       install_requires=[
-        'arvados-python-client>=0.1.20141103223015.68dae83',
+        'arvados-python-client>=0.1.20141203150737.277b3c7',
         'llfuse',
         'python-daemon'
         ],
       test_suite='tests',
       tests_require=['PyYAML'],
       zip_safe=False,
-      options=cmd_opts,
+      cmdclass={'egg_info': TagBuildWithCommit},
       )
index 092fed514b434fb20addcaa0be3bdfd5fa6c0278..84dceee13764ea6d1ab2327b27f44f1973af3ce7 100644 (file)
@@ -21,6 +21,15 @@ class MountTestBase(unittest.TestCase):
         run_test_server.authorize_with("admin")
         self.api = api = fuse.SafeApi(arvados.config)
 
+    def make_mount(self, root_class, *root_args):
+        operations = fuse.Operations(os.getuid(), os.getgid())
+        operations.inodes.add_entry(root_class(
+                llfuse.ROOT_INODE, operations.inodes, self.api, 0, *root_args))
+        llfuse.init(operations, self.mounttmp, [])
+        threading.Thread(None, llfuse.main).start()
+        # wait until the driver is finished initializing
+        operations.initlock.wait()
+
     def tearDown(self):
         run_test_server.stop()
 
@@ -36,6 +45,12 @@ class MountTestBase(unittest.TestCase):
         os.rmdir(self.mounttmp)
         shutil.rmtree(self.keeptmp)
 
+    def assertDirContents(self, subdir, expect_content):
+        path = self.mounttmp
+        if subdir:
+            path = os.path.join(path, subdir)
+        self.assertEqual(sorted(expect_content), sorted(os.listdir(path)))
+
 
 class FuseMountTest(MountTestBase):
     def setUp(self):
@@ -80,25 +95,9 @@ class FuseMountTest(MountTestBase):
         self.testcollection = cw.finish()
         self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
 
-    def assertDirContents(self, subdir, expect_content):
-        path = self.mounttmp
-        if subdir:
-            path = os.path.join(path, subdir)
-        self.assertEqual(sorted(expect_content), sorted(os.listdir(path)))
-
     def runTest(self):
-        # Create the request handler
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, self.testcollection))
+        self.make_mount(fuse.CollectionDirectory, self.testcollection)
 
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
-
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
-
-        # now check some stuff
         self.assertDirContents(None, ['thing1.txt', 'thing2.txt',
                                       'edgecases', 'dir1', 'dir2'])
         self.assertDirContents('dir1', ['thing3.txt', 'thing4.txt'])
@@ -123,6 +122,24 @@ class FuseMountTest(MountTestBase):
                 self.assertEqual(v, f.read())
 
 
+class FuseNoAPITest(MountTestBase):
+    def setUp(self):
+        super(FuseNoAPITest, self).setUp()
+        keep = arvados.keep.KeepClient(local_store=self.keeptmp)
+        self.file_data = "API-free text\n"
+        self.file_loc = keep.put(self.file_data)
+        self.coll_loc = keep.put(". {} 0:{}:api-free.txt\n".format(
+                self.file_loc, len(self.file_data)))
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+        self.assertDirContents(self.coll_loc, ['api-free.txt'])
+        with open(os.path.join(
+                self.mounttmp, self.coll_loc, 'api-free.txt')) as keep_file:
+            actual = keep_file.read(-1)
+        self.assertEqual(self.file_data, actual)
+
+
 class FuseMagicTest(MountTestBase):
     def setUp(self):
         super(FuseMagicTest, self).setUp()
@@ -136,31 +153,22 @@ class FuseMagicTest(MountTestBase):
         self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
 
     def runTest(self):
-        # Create the request handler
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.MagicDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0))
-
-        self.mounttmp = tempfile.mkdtemp()
-
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
-
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
-
-        # now check some stuff
-        d1 = os.listdir(self.mounttmp)
-        d1.sort()
-        self.assertEqual(['README'], d1)
-
-        d2 = os.listdir(os.path.join(self.mounttmp, self.testcollection))
-        d2.sort()
-        self.assertEqual(['thing1.txt'], d2)
-
-        d3 = os.listdir(self.mounttmp)
-        d3.sort()
-        self.assertEqual([self.testcollection, 'README'], d3)
+        self.make_mount(fuse.MagicDirectory)
+
+        mount_ls = os.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or
+                             arvados.util.uuid_pattern.match(fn)
+                             for fn in mount_ls),
+                         "new FUSE MagicDirectory lists Collection")
+        self.assertDirContents(self.testcollection, ['thing1.txt'])
+        self.assertDirContents(os.path.join('by_id', self.testcollection),
+                               ['thing1.txt'])
+        mount_ls = os.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertIn(self.testcollection, mount_ls)
+        self.assertIn(self.testcollection,
+                      os.listdir(os.path.join(self.mounttmp, 'by_id')))
 
         files = {}
         files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
@@ -172,15 +180,7 @@ class FuseMagicTest(MountTestBase):
 
 class FuseTagsTest(MountTestBase):
     def runTest(self):
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0))
-
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
-
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
+        self.make_mount(fuse.TagsDirectory)
 
         d1 = os.listdir(self.mounttmp)
         d1.sort()
@@ -196,7 +196,14 @@ class FuseTagsTest(MountTestBase):
 
 
 class FuseTagsUpdateTest(MountTestBase):
-    def runRealTest(self):
+    def tag_collection(self, coll_uuid, tag_name):
+        return self.api.links().create(
+            body={'link': {'head_uuid': coll_uuid,
+                           'link_class': 'tag',
+                           'name': tag_name,
+        }}).execute()
+
+    def runTest(self):
         operations = fuse.Operations(os.getuid(), os.getgid())
         e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, poll_time=1))
 
@@ -206,59 +213,28 @@ class FuseTagsUpdateTest(MountTestBase):
 
         # wait until the driver is finished initializing
         operations.initlock.wait()
+        self.assertIn('foo_tag', os.listdir(self.mounttmp))
 
-        d1 = os.listdir(self.mounttmp)
-        d1.sort()
-        self.assertEqual(['foo_tag'], d1)
-
-        self.api.links().create(body={'link': {
-            'head_uuid': 'fa7aeb5140e2848d39b416daeef4ffc5+45',
-            'link_class': 'tag',
-            'name': 'bar_tag'
-        }}).execute()
-
+        bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']
+        self.tag_collection(bar_uuid, 'fuse_test_tag')
         time.sleep(1)
+        self.assertIn('fuse_test_tag', os.listdir(self.mounttmp))
+        self.assertDirContents('fuse_test_tag', [bar_uuid])
 
-        d2 = os.listdir(self.mounttmp)
-        d2.sort()
-        self.assertEqual(['bar_tag', 'foo_tag'], d2)
-
-        d3 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
-        d3.sort()
-        self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d3)
-
-        l = self.api.links().create(body={'link': {
-            'head_uuid': 'ea10d51bcf88862dbcc36eb292017dfd+45',
-            'link_class': 'tag',
-            'name': 'bar_tag'
-        }}).execute()
-
+        baz_uuid = run_test_server.fixture('collections')['baz_file']['uuid']
+        l = self.tag_collection(baz_uuid, 'fuse_test_tag')
         time.sleep(1)
-
-        d4 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
-        d4.sort()
-        self.assertEqual(['ea10d51bcf88862dbcc36eb292017dfd+45', 'fa7aeb5140e2848d39b416daeef4ffc5+45'], d4)
+        self.assertDirContents('fuse_test_tag', [bar_uuid, baz_uuid])
 
         self.api.links().delete(uuid=l['uuid']).execute()
-
         time.sleep(1)
-
-        d5 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
-        d5.sort()
-        self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d5)
+        self.assertDirContents('fuse_test_tag', [bar_uuid])
 
 
 class FuseSharedTest(MountTestBase):
     def runTest(self):
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.SharedDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, self.api.users().current().execute()['uuid']))
-
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
-
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
+        self.make_mount(fuse.SharedDirectory,
+                        self.api.users().current().execute()['uuid'])
 
         # shared_dirs is a list of the directories exposed
         # by fuse.SharedDirectory (i.e. any object visible
@@ -300,26 +276,16 @@ class FuseSharedTest(MountTestBase):
 
 class FuseHomeTest(MountTestBase):
     def runTest(self):
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.ProjectDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, self.api.users().current().execute()))
-
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
-
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
+        self.make_mount(fuse.ProjectDirectory,
+                        self.api.users().current().execute())
 
         d1 = os.listdir(self.mounttmp)
-        d1.sort()
         self.assertIn('Unrestricted public data', d1)
 
         d2 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data'))
-        d2.sort()
         self.assertEqual(['GNU General Public License, version 3'], d2)
 
         d3 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data', 'GNU General Public License, version 3'))
-        d3.sort()
         self.assertEqual(["GNU_General_Public_License,_version_3.pdf"], d3)
 
 
index de4ccafc28dd90f6ddbab4690bcc02976dcfa4af..ea14c6ca48e89e59c5bc414dedfaf87e1ac0f782 100644 (file)
@@ -1,12 +1,13 @@
 package main
 
 import (
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "flag"
        "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "github.com/gorilla/mux"
        "io"
+       "io/ioutil"
        "log"
        "net"
        "net/http"
@@ -29,6 +30,7 @@ func main() {
                no_get           bool
                no_put           bool
                default_replicas int
+               timeout          int64
                pidfile          string
        )
 
@@ -60,6 +62,12 @@ func main() {
                2,
                "Default number of replicas to write if not specified by the client.")
 
+       flagset.Int64Var(
+               &timeout,
+               "timeout",
+               15,
+               "Timeout on requests to internal Keep services (default 15 seconds)")
+
        flagset.StringVar(
                &pidfile,
                "pid",
@@ -90,6 +98,8 @@ func main() {
 
        kc.Want_replicas = default_replicas
 
+       kc.Client.Timeout = time.Duration(timeout) * time.Second
+
        listener, err = net.Listen("tcp", listen)
        if err != nil {
                log.Fatalf("Could not listen on %v", listen)
@@ -104,7 +114,6 @@ func main() {
                s := <-sig
                log.Println("caught signal:", s)
                listener.Close()
-               listener = nil
        }(term)
        signal.Notify(term, syscall.SIGTERM)
        signal.Notify(term, syscall.SIGINT)
@@ -222,6 +231,8 @@ type PutBlockHandler struct {
 
 type InvalidPathHandler struct{}
 
+type OptionsHandler struct{}
+
 // MakeRESTRouter
 //     Returns a mux.Router that passes GET and PUT requests to the
 //     appropriate handlers.
@@ -244,6 +255,9 @@ func MakeRESTRouter(
        if enable_put {
                rest.Handle(`/{hash:[0-9a-f]{32}}+{hints}`, PutBlockHandler{kc, t}).Methods("PUT")
                rest.Handle(`/{hash:[0-9a-f]{32}}`, PutBlockHandler{kc, t}).Methods("PUT")
+               rest.Handle(`/`, PutBlockHandler{kc, t}).Methods("POST")
+               rest.Handle(`/{any}`, OptionsHandler{}).Methods("OPTIONS")
+               rest.Handle(`/`, OptionsHandler{}).Methods("OPTIONS")
        }
 
        rest.NotFoundHandler = InvalidPathHandler{}
@@ -251,12 +265,25 @@ func MakeRESTRouter(
        return rest
 }
 
+func SetCorsHeaders(resp http.ResponseWriter) {
+       resp.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, POST, PUT, OPTIONS")
+       resp.Header().Set("Access-Control-Allow-Origin", "*")
+       resp.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+       resp.Header().Set("Access-Control-Max-Age", "86486400")
+}
+
 func (this InvalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        log.Printf("%s: %s %s unroutable", GetRemoteAddress(req), req.Method, req.URL.Path)
        http.Error(resp, "Bad request", http.StatusBadRequest)
 }
 
+func (this OptionsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       log.Printf("%s: %s %s", GetRemoteAddress(req), req.Method, req.URL.Path)
+       SetCorsHeaders(resp)
+}
+
 func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       SetCorsHeaders(resp)
 
        kc := *this.KeepClient
 
@@ -265,7 +292,7 @@ func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
 
        locator := keepclient.MakeLocator2(hash, hints)
 
-       log.Printf("%s: %s %s", GetRemoteAddress(req), req.Method, hash)
+       log.Printf("%s: %s %s begin", GetRemoteAddress(req), req.Method, hash)
 
        var pass bool
        var tok string
@@ -290,36 +317,48 @@ func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
                blocklen, _, err = kc.AuthorizedAsk(hash, locator.Signature, locator.Timestamp)
        }
 
-       if blocklen > 0 {
+       if blocklen > -1 {
                resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+       } else {
+               log.Printf("%s: %s %s Keep server did not return Content-Length",
+                       GetRemoteAddress(req), req.Method, hash)
        }
 
+       var status = 0
        switch err {
        case nil:
+               status = http.StatusOK
                if reader != nil {
                        n, err2 := io.Copy(resp, reader)
-                       if n != blocklen {
-                               log.Printf("%s: %s %s mismatched return %v with Content-Length %v error %v", GetRemoteAddress(req), req.Method, hash, n, blocklen, err2)
+                       if blocklen > -1 && n != blocklen {
+                               log.Printf("%s: %s %s %v %v mismatched copy size expected Content-Length: %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n, blocklen)
                        } else if err2 == nil {
-                               log.Printf("%s: %s %s success returned %v bytes", GetRemoteAddress(req), req.Method, hash, n)
+                               log.Printf("%s: %s %s %v %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n)
                        } else {
-                               log.Printf("%s: %s %s returned %v bytes error %v", GetRemoteAddress(req), req.Method, hash, n, err.Error())
+                               log.Printf("%s: %s %s %v %v copy error: %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n, err2.Error())
                        }
                } else {
-                       log.Printf("%s: %s %s success", GetRemoteAddress(req), req.Method, hash)
+                       log.Printf("%s: %s %s %v 0", GetRemoteAddress(req), req.Method, hash, status)
                }
        case keepclient.BlockNotFound:
+               status = http.StatusNotFound
                http.Error(resp, "Not found", http.StatusNotFound)
        default:
+               status = http.StatusBadGateway
                http.Error(resp, err.Error(), http.StatusBadGateway)
        }
 
        if err != nil {
-               log.Printf("%s: %s %s error %s", GetRemoteAddress(req), req.Method, hash, err.Error())
+               log.Printf("%s: %s %s %v error: %v",
+                       GetRemoteAddress(req), req.Method, hash, status, err.Error())
        }
 }
 
 func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       SetCorsHeaders(resp)
 
        kc := *this.KeepClient
 
@@ -339,7 +378,7 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
 
        log.Printf("%s: %s %s Content-Length %v", GetRemoteAddress(req), req.Method, hash, contentLength)
 
-       if contentLength < 1 {
+       if contentLength < 0 {
                http.Error(resp, "Must include Content-Length header", http.StatusLengthRequired)
                return
        }
@@ -371,12 +410,25 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
        }
 
        // Now try to put the block through
-       hash, replicas, err := kc.PutHR(hash, req.Body, contentLength)
+       var replicas int
+       var put_err error
+       if hash == "" {
+               if bytes, err := ioutil.ReadAll(req.Body); err != nil {
+                       msg := fmt.Sprintf("Error reading request body: %s", err)
+                       log.Printf(msg)
+                       http.Error(resp, msg, http.StatusInternalServerError)
+                       return
+               } else {
+                       hash, replicas, put_err = kc.PutB(bytes)
+               }
+       } else {
+               hash, replicas, put_err = kc.PutHR(hash, req.Body, contentLength)
+       }
 
        // Tell the client how many successful PUTs we accomplished
        resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", replicas))
 
-       switch err {
+       switch put_err {
        case nil:
                // Default will return http.StatusOK
                log.Printf("%s: %s %s finished, stored %v replicas (desired %v)", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas)
@@ -400,15 +452,15 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
                                log.Printf("%s: wrote %v bytes to response body and got error %v", n, err2.Error())
                        }
                } else {
-                       http.Error(resp, "", http.StatusServiceUnavailable)
+                       http.Error(resp, put_err.Error(), http.StatusServiceUnavailable)
                }
 
        default:
-               http.Error(resp, err.Error(), http.StatusBadGateway)
+               http.Error(resp, put_err.Error(), http.StatusBadGateway)
        }
 
-       if err != nil {
-               log.Printf("%s: %s %s stored %v replicas (desired %v) got error %v", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas, err.Error())
+       if put_err != nil {
+               log.Printf("%s: %s %s stored %v replicas (desired %v) got error %v", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas, put_err.Error())
        }
 
 }
index 88ac8a6a1dc1ec3c4e6cbb055ed40faaa9c63cc7..8acf43abd4eba5094d89e4443a0d6a067139aa80 100644 (file)
@@ -1,11 +1,11 @@
 package main
 
 import (
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "crypto/md5"
        "crypto/tls"
        "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
        . "gopkg.in/check.v1"
        "io"
        "io/ioutil"
@@ -14,6 +14,7 @@ import (
        "net/url"
        "os"
        "os/exec"
+       "strings"
        "testing"
        "time"
 )
@@ -38,7 +39,9 @@ func pythonDir() string {
 // avoids a race condition where we hit a "connection refused" error
 // because we start testing the proxy too soon.
 func waitForListener() {
-       const (ms = 5)
+       const (
+               ms = 5
+       )
        for i := 0; listener == nil && i < 1000; i += ms {
                time.Sleep(ms * time.Millisecond)
        }
@@ -137,6 +140,7 @@ func runProxy(c *C, args []string, token string, port int) keepclient.KeepClient
        os.Args = append(args, fmt.Sprintf("-listen=:%v", port))
        os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
 
+       listener = nil
        go main()
        time.Sleep(100 * time.Millisecond)
 
@@ -148,7 +152,7 @@ func runProxy(c *C, args []string, token string, port int) keepclient.KeepClient
        c.Assert(err, Equals, nil)
        c.Check(kc.Using_proxy, Equals, true)
        c.Check(len(kc.ServiceRoots()), Equals, 1)
-       for _, root := range(kc.ServiceRoots()) {
+       for _, root := range kc.ServiceRoots() {
                c.Check(root, Equals, fmt.Sprintf("http://localhost:%v", port))
        }
        os.Setenv("ARVADOS_KEEP_PROXY", "")
@@ -161,6 +165,7 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
 
        os.Args = []string{"keepproxy", "-listen=:29950"}
        os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       listener = nil
        go main()
        time.Sleep(100 * time.Millisecond)
 
@@ -218,11 +223,30 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
                log.Print("Get")
        }
 
+       {
+               var rep int
+               var err error
+               hash2, rep, err = kc.PutB([]byte(""))
+               c.Check(hash2, Matches, `^d41d8cd98f00b204e9800998ecf8427e\+0(\+.+)?$`)
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               log.Print("PutB zero block")
+       }
+
+       {
+               reader, blocklen, _, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e")
+               c.Assert(err, Equals, nil)
+               all, err := ioutil.ReadAll(reader)
+               c.Check(all, DeepEquals, []byte(""))
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Get zero block")
+       }
+
        log.Print("TestPutAndGet done")
 }
 
 func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
-       log.Print("TestPutAndGet start")
+       log.Print("TestPutAskGetForbidden start")
 
        kc := runProxy(c, []string{"keepproxy"}, "123abc", 29951)
        waitForListener()
@@ -260,7 +284,7 @@ func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
                log.Print("Get")
        }
 
-       log.Print("TestPutAndGetForbidden done")
+       log.Print("TestPutAskGetForbidden done")
 }
 
 func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
@@ -320,3 +344,56 @@ func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
 
        log.Print("TestPutDisabled done")
 }
+
+func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
+       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29954)
+       waitForListener()
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("OPTIONS",
+                       fmt.Sprintf("http://localhost:29954/%x+3",
+                               md5.Sum([]byte("foo"))),
+                       nil)
+               req.Header.Add("Access-Control-Request-Method", "PUT")
+               req.Header.Add("Access-Control-Request-Headers", "Authorization, X-Keep-Desired-Replicas")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               c.Check(resp.StatusCode, Equals, 200)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(string(body), Equals, "")
+               c.Check(resp.Header.Get("Access-Control-Allow-Methods"), Equals, "GET, HEAD, POST, PUT, OPTIONS")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+
+       {
+               resp, err := http.Get(
+                       fmt.Sprintf("http://localhost:29954/%x+3",
+                               md5.Sum([]byte("foo"))))
+               c.Check(err, Equals, nil)
+               c.Check(resp.Header.Get("Access-Control-Allow-Headers"), Equals, "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {
+       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29955)
+       waitForListener()
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("POST",
+                       "http://localhost:29955/",
+                       strings.NewReader("qux"))
+               req.Header.Add("Authorization", "OAuth2 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+               req.Header.Add("Content-Type", "application/octet-stream")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(err, Equals, nil)
+               c.Check(string(body), Equals,
+                       fmt.Sprintf("%x+%d", md5.Sum([]byte("qux")), 3))
+       }
+}
index a954d2b0385d82dfe3e33303898cb297630e25a7..05b410c97dd28c74cdb5e4639c15572497975a89 100644 (file)
@@ -81,10 +81,11 @@ func TestGetHandler(t *testing.T) {
                "Unauthenticated request, unsigned locator",
                string(TEST_BLOCK),
                response)
-       received_xbs := response.Header().Get("X-Block-Size")
-       expected_xbs := fmt.Sprintf("%d", len(TEST_BLOCK))
-       if received_xbs != expected_xbs {
-               t.Errorf("expected X-Block-Size %s, got %s", expected_xbs, received_xbs)
+
+       received_cl := response.Header().Get("Content-Length")
+       expected_cl := fmt.Sprintf("%d", len(TEST_BLOCK))
+       if received_cl != expected_cl {
+               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
        }
 
        // ----------------
@@ -102,10 +103,11 @@ func TestGetHandler(t *testing.T) {
                "Authenticated request, signed locator", http.StatusOK, response)
        ExpectBody(t,
                "Authenticated request, signed locator", string(TEST_BLOCK), response)
-       received_xbs = response.Header().Get("X-Block-Size")
-       expected_xbs = fmt.Sprintf("%d", len(TEST_BLOCK))
-       if received_xbs != expected_xbs {
-               t.Errorf("expected X-Block-Size %s, got %s", expected_xbs, received_xbs)
+
+       received_cl = response.Header().Get("Content-Length")
+       expected_cl = fmt.Sprintf("%d", len(TEST_BLOCK))
+       if received_cl != expected_cl {
+               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
        }
 
        // Authenticated request, unsigned locator
index bd1ca67bfc26643190b2e94a0169bc58f2030c88..c7559a1bee313783372cfa9e88f918618459dae5 100644 (file)
@@ -175,7 +175,7 @@ func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
                return
        }
 
-       resp.Header().Set("X-Block-Size", fmt.Sprintf("%d", len(block)))
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(block)))
 
        _, err = resp.Write(block)
 
index fd4e23467d63d8e91cece66d8e5cd46be7c35cfb..e30df876322ab9c09acdbab110f851ea0a954e20 100644 (file)
@@ -7,12 +7,14 @@ import (
        "github.com/gorilla/mux"
        "log"
        "net/http"
+       "strings"
 )
 
 type LoggingResponseWriter struct {
        Status int
        Length int
        http.ResponseWriter
+       ResponseBody string
 }
 
 func (loggingWriter *LoggingResponseWriter) WriteHeader(code int) {
@@ -22,6 +24,9 @@ func (loggingWriter *LoggingResponseWriter) WriteHeader(code int) {
 
 func (loggingWriter *LoggingResponseWriter) Write(data []byte) (int, error) {
        loggingWriter.Length += len(data)
+       if loggingWriter.Status >= 400 {
+               loggingWriter.ResponseBody += string(data)
+       }
        return loggingWriter.ResponseWriter.Write(data)
 }
 
@@ -35,7 +40,12 @@ func MakeLoggingRESTRouter() *LoggingRESTRouter {
 }
 
 func (loggingRouter *LoggingRESTRouter) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
-       loggingWriter := LoggingResponseWriter{200, 0, resp}
+       loggingWriter := LoggingResponseWriter{200, 0, resp, ""}
        loggingRouter.router.ServeHTTP(&loggingWriter, req)
-       log.Printf("[%s] %s %s %d %d", req.RemoteAddr, req.Method, req.URL.Path[1:], loggingWriter.Status, loggingWriter.Length)
+       statusText := "OK"
+       if loggingWriter.Status >= 400 {
+               statusText = strings.Replace(loggingWriter.ResponseBody, "\n", "", -1)
+       }
+       log.Printf("[%s] %s %s %d %d \"%s\"", req.RemoteAddr, req.Method, req.URL.Path[1:], loggingWriter.Status, loggingWriter.Length, statusText)
+
 }
deleted file mode 100644 (file)
index d6bc7fefd17b560b91e44b1fb806838ecd913cb0..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1,5 +0,0 @@
-*.pyc
-*.egg
-*.egg-info
-build/
-dist/
new file mode 120000 (symlink)
index 0000000000000000000000000000000000000000..ed3b3622f2486f6bd6ff8c3f486ef80fbb045db0
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
index 46a103eb02985a7ba9e24af464e96e0cf6dd5a7b..6319f4bbfc5cece52832842cfda05d9ae9253005 100644 (file)
@@ -30,12 +30,10 @@ class RemotePollLoopActor(actor_class):
     response to subscribers.  It takes care of error handling, and retrying
     requests with exponential backoff.
 
-    To use this actor, define CLIENT_ERRORS and the _send_request method.
-    If you also define an _item_key method, this class will support
-    subscribing to a specific item by key in responses.
+    To use this actor, define the _send_request method.  If you also
+    define an _item_key method, this class will support subscribing to
+    a specific item by key in responses.
     """
-    CLIENT_ERRORS = ()
-
     def __init__(self, client, timer_actor, poll_wait=60, max_poll_wait=180):
         super(RemotePollLoopActor, self).__init__()
         self._client = client
@@ -87,6 +85,9 @@ class RemotePollLoopActor(actor_class):
         return "{} got error: {} - waiting {} seconds".format(
             self.log_prefix, error, self.poll_wait)
 
+    def is_common_error(self, exception):
+        return False
+
     def poll(self, scheduled_start=None):
         self._logger.debug("%s sending poll", self.log_prefix)
         start_time = time.time()
@@ -96,7 +97,7 @@ class RemotePollLoopActor(actor_class):
             response = self._send_request()
         except Exception as error:
             errmsg = self._got_error(error)
-            if isinstance(error, self.CLIENT_ERRORS):
+            if self.is_common_error(error):
                 self._logger.warning(errmsg)
             else:
                 self._logger.exception(errmsg)
index ae0a65b9317e86f8d53afa00ba7b9483f047c4aa..48e8dcf45888de232ba9004c290023e6bf5999b7 100644 (file)
@@ -19,32 +19,38 @@ class ComputeNodeStateChangeBase(config.actor_class):
     This base class takes care of retrying changes and notifying
     subscribers when the change is finished.
     """
-    def __init__(self, logger_name, timer_actor, retry_wait, max_retry_wait):
+    def __init__(self, logger_name, cloud_client, timer_actor,
+                 retry_wait, max_retry_wait):
         super(ComputeNodeStateChangeBase, self).__init__()
         self._later = self.actor_ref.proxy()
-        self._timer = timer_actor
         self._logger = logging.getLogger(logger_name)
+        self._cloud = cloud_client
+        self._timer = timer_actor
         self.min_retry_wait = retry_wait
         self.max_retry_wait = max_retry_wait
         self.retry_wait = retry_wait
         self.subscribers = set()
 
     @staticmethod
-    def _retry(errors):
+    def _retry(errors=()):
         """Retry decorator for an actor method that makes remote requests.
 
         Use this function to decorator an actor method, and pass in a
         tuple of exceptions to catch.  This decorator will schedule
         retries of that method with exponential backoff if the
-        original method raises any of the given errors.
+        original method raises a known cloud driver error, or any of the
+        given exception types.
         """
         def decorator(orig_func):
             @functools.wraps(orig_func)
-            def wrapper(self, *args, **kwargs):
+            def retry_wrapper(self, *args, **kwargs):
                 start_time = time.time()
                 try:
                     orig_func(self, *args, **kwargs)
-                except errors as error:
+                except Exception as error:
+                    if not (isinstance(error, errors) or
+                            self._cloud.is_cloud_exception(error)):
+                        raise
                     self._logger.warning(
                         "Client error: %s - waiting %s seconds",
                         error, self.retry_wait)
@@ -56,7 +62,7 @@ class ComputeNodeStateChangeBase(config.actor_class):
                                           self.max_retry_wait)
                 else:
                     self.retry_wait = self.min_retry_wait
-            return wrapper
+            return retry_wrapper
         return decorator
 
     def _finished(self):
@@ -86,9 +92,9 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
                  cloud_size, arvados_node=None,
                  retry_wait=1, max_retry_wait=180):
         super(ComputeNodeSetupActor, self).__init__(
-            'arvnodeman.nodeup', timer_actor, retry_wait, max_retry_wait)
+            'arvnodeman.nodeup', cloud_client, timer_actor,
+            retry_wait, max_retry_wait)
         self._arvados = arvados_client
-        self._cloud = cloud_client
         self.cloud_size = cloud_size
         self.arvados_node = None
         self.cloud_node = None
@@ -97,12 +103,12 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
         else:
             self._later.prepare_arvados_node(arvados_node)
 
-    @ComputeNodeStateChangeBase._retry(config.ARVADOS_ERRORS)
+    @ComputeNodeStateChangeBase._retry()
     def create_arvados_node(self):
         self.arvados_node = self._arvados.nodes().create(body={}).execute()
         self._later.create_cloud_node()
 
-    @ComputeNodeStateChangeBase._retry(config.ARVADOS_ERRORS)
+    @ComputeNodeStateChangeBase._retry()
     def prepare_arvados_node(self, node):
         self.arvados_node = self._arvados.nodes().update(
             uuid=node['uuid'],
@@ -116,13 +122,19 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
             ).execute()
         self._later.create_cloud_node()
 
-    @ComputeNodeStateChangeBase._retry(config.CLOUD_ERRORS)
+    @ComputeNodeStateChangeBase._retry()
     def create_cloud_node(self):
         self._logger.info("Creating cloud node with size %s.",
                           self.cloud_size.name)
         self.cloud_node = self._cloud.create_node(self.cloud_size,
                                                   self.arvados_node)
         self._logger.info("Cloud node %s created.", self.cloud_node.id)
+        self._later.post_create()
+
+    @ComputeNodeStateChangeBase._retry()
+    def post_create(self):
+        self._cloud.post_create_node(self.cloud_node)
+        self._logger.info("%s post-create work done.", self.cloud_node.id)
         self._finished()
 
     def stop_if_no_cloud_node(self):
@@ -136,12 +148,18 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
     This actor simply destroys a cloud node, retrying as needed.
     """
     def __init__(self, timer_actor, cloud_client, node_monitor,
-                 retry_wait=1, max_retry_wait=180):
+                 cancellable=True, retry_wait=1, max_retry_wait=180):
+        # If a ShutdownActor is cancellable, it will ask the
+        # ComputeNodeMonitorActor if it's still eligible before taking each
+        # action, and stop the shutdown process if the node is no longer
+        # eligible.  Normal shutdowns based on job demand should be
+        # cancellable; shutdowns based on node misbehavior should not.
         super(ComputeNodeShutdownActor, self).__init__(
-            'arvnodeman.nodedown', timer_actor, retry_wait, max_retry_wait)
-        self._cloud = cloud_client
+            'arvnodeman.nodedown', cloud_client, timer_actor,
+            retry_wait, max_retry_wait)
         self._monitor = node_monitor.proxy()
         self.cloud_node = self._monitor.cloud_node.get()
+        self.cancellable = cancellable
         self.success = None
 
     def on_start(self):
@@ -153,8 +171,9 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
 
     def _stop_if_window_closed(orig_func):
         @functools.wraps(orig_func)
-        def wrapper(self, *args, **kwargs):
-            if not self._monitor.shutdown_eligible().get():
+        def stop_wrapper(self, *args, **kwargs):
+            if (self.cancellable and
+                  (not self._monitor.shutdown_eligible().get())):
                 self._logger.info(
                     "Cloud node %s shutdown cancelled - no longer eligible.",
                     self.cloud_node.id)
@@ -162,10 +181,10 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
                 return None
             else:
                 return orig_func(self, *args, **kwargs)
-        return wrapper
+        return stop_wrapper
 
     @_stop_if_window_closed
-    @ComputeNodeStateChangeBase._retry(config.CLOUD_ERRORS)
+    @ComputeNodeStateChangeBase._retry()
     def shutdown_node(self):
         if self._cloud.destroy_node(self.cloud_node):
             self._logger.info("Cloud node %s shut down.", self.cloud_node.id)
@@ -203,14 +222,14 @@ class ComputeNodeUpdateActor(config.actor_class):
 
     def _throttle_errors(orig_func):
         @functools.wraps(orig_func)
-        def wrapper(self, *args, **kwargs):
+        def throttle_wrapper(self, *args, **kwargs):
             throttle_time = self.next_request_time - time.time()
             if throttle_time > 0:
                 time.sleep(throttle_time)
             self.next_request_time = time.time()
             try:
                 result = orig_func(self, *args, **kwargs)
-            except config.CLOUD_ERRORS:
+            except Exception as error:
                 self.error_streak += 1
                 self.next_request_time += min(2 ** self.error_streak,
                                               self.max_retry_wait)
@@ -218,7 +237,7 @@ class ComputeNodeUpdateActor(config.actor_class):
             else:
                 self.error_streak = 0
                 return result
-        return wrapper
+        return throttle_wrapper
 
     @_throttle_errors
     def sync_node(self, cloud_node, arvados_node):
index 27397e5d5015f7eca875520238066b2625149396..6eaa8b937b979939c584f9b31927442aa4461d18 100644 (file)
@@ -10,6 +10,8 @@ from . import \
 from . import ComputeNodeShutdownActor as ShutdownActorBase
 
 class ComputeNodeShutdownActor(ShutdownActorBase):
+    SLURM_END_STATES = frozenset(['down\n', 'down*\n', 'drain\n', 'fail\n'])
+
     def on_start(self):
         arv_node = self._monitor.arvados_node.get()
         if arv_node is None:
@@ -42,7 +44,7 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
     def await_slurm_drain(self):
         output = subprocess.check_output(
             ['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
-        if output == 'drain\n':
+        if output in self.SLURM_END_STATES:
             self._later.shutdown_node()
         else:
             self._timer.schedule(time.time() + 10,
index a20cfde37146a46f05438817aef14ac307565491..3a0c2063b426ccd759ad4dd8323847d51f72bce4 100644 (file)
@@ -2,6 +2,10 @@
 
 from __future__ import absolute_import, print_function
 
+import libcloud.common.types as cloud_types
+
+from ...config import NETWORK_ERRORS
+
 class BaseComputeNodeDriver(object):
     """Abstract base class for compute node drivers.
 
@@ -15,6 +19,8 @@ class BaseComputeNodeDriver(object):
     creation kwargs with information about the specific Arvados node
     record), sync_node, and node_start_time.
     """
+    CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError,)
+
     def __init__(self, auth_kwargs, list_kwargs, create_kwargs, driver_class):
         self.real = driver_class(**auth_kwargs)
         self.list_kwargs = list_kwargs
@@ -52,6 +58,12 @@ class BaseComputeNodeDriver(object):
         kwargs['size'] = size
         return self.real.create_node(**kwargs)
 
+    def post_create_node(self, cloud_node):
+        # ComputeNodeSetupActor calls this method after the cloud node is
+        # created.  Any setup tasks that need to happen afterward (e.g.,
+        # tagging) should be done in this method.
+        pass
+
     def sync_node(self, cloud_node, arvados_node):
         # When a compute node first pings the API server, the API server
         # will automatically assign some attributes on the corresponding
@@ -62,3 +74,11 @@ class BaseComputeNodeDriver(object):
     @classmethod
     def node_start_time(cls, node):
         raise NotImplementedError("BaseComputeNodeDriver.node_start_time")
+
+    @classmethod
+    def is_cloud_exception(cls, exception):
+        # libcloud compute drivers typically raise bare Exceptions to
+        # represent API errors.  Return True for any exception that is
+        # exactly an Exception, or a better-known higher-level exception.
+        return (isinstance(exception, cls.CLOUD_ERRORS) or
+                getattr(exception, '__class__', None) is Exception)
index c0992f7b9635e2c47edd8e67cf5b887ba5692718..255a948a6c3aa0ee2c17ae1581685d282c341813 100644 (file)
@@ -79,8 +79,7 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         return 'auth', key
 
     def arvados_create_kwargs(self, arvados_node):
-        result = {'ex_metadata': self.tags.copy(),
-                  'name': arvados_node_fqdn(arvados_node)}
+        result = {'name': arvados_node_fqdn(arvados_node)}
         ping_secret = arvados_node['info'].get('ping_secret')
         if ping_secret is not None:
             ping_url = ('https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.
@@ -89,11 +88,12 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
             result['ex_userdata'] = ping_url
         return result
 
+    def post_create_node(self, cloud_node):
+        self.real.ex_create_tags(cloud_node, self.tags)
+
     def sync_node(self, cloud_node, arvados_node):
-        metadata = self.arvados_create_kwargs(arvados_node)
-        tags = metadata['ex_metadata']
-        tags['Name'] = metadata['name']
-        self.real.ex_create_tags(cloud_node, tags)
+        self.real.ex_create_tags(cloud_node,
+                                 {'Name': arvados_node_fqdn(arvados_node)})
 
     @classmethod
     def node_start_time(cls, node):
index 079e623c559e477ec37b0435a60f3574cdbefacb..b7ec1fc80d9a0211867b7d06e5dd8ffb272f1ff4 100644 (file)
@@ -6,10 +6,10 @@ import ConfigParser
 import importlib
 import logging
 import ssl
+import sys
 
 import arvados
 import httplib2
-import libcloud.common.types as cloud_types
 import pykka
 from apiclient import errors as apierror
 
@@ -18,7 +18,6 @@ from apiclient import errors as apierror
 # it's low-level, but unlikely to catch code bugs.
 NETWORK_ERRORS = (IOError, ssl.SSLError)
 ARVADOS_ERRORS = NETWORK_ERRORS + (apierror.Error,)
-CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError,)
 
 actor_class = pykka.ThreadingActor
 
@@ -42,6 +41,7 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
                        'poll_time': '60',
                        'max_poll_time': '300',
                        'poll_stale_after': '600',
+                       'boot_fail_after': str(sys.maxint),
                        'node_stale_after': str(60 * 60 * 2)},
             'Logging': {'file': '/dev/stderr',
                         'level': 'WARNING'},
index 9f22568faafbb1c45d9625816fb1c0027226882a..0e480786da369d3f844e8d0111835c389c5d03b4 100644 (file)
@@ -26,14 +26,16 @@ class _BaseNodeTracker(object):
         self.nodes = {}
         self.orphans = {}
 
-    def __getitem__(self, key):
-        return self.nodes[key]
-
-    def __len__(self):
-        return len(self.nodes)
+    # Proxy the methods listed below to self.nodes.
+    def _proxy_method(name):
+        method = getattr(dict, name)
+        @functools.wraps(method, ('__name__', '__doc__'))
+        def wrapper(self, *args, **kwargs):
+            return method(self.nodes, *args, **kwargs)
+        return wrapper
 
-    def get(self, key, default=None):
-        return self.nodes.get(key, default)
+    for _method_name in ['__contains__', '__getitem__', '__len__', 'get']:
+        locals()[_method_name] = _proxy_method(_method_name)
 
     def record_key(self, record):
         return self.item_key(getattr(record, self.RECORD_ATTR))
@@ -95,8 +97,10 @@ class NodeManagerDaemonActor(actor_class):
     def __init__(self, server_wishlist_actor, arvados_nodes_actor,
                  cloud_nodes_actor, cloud_update_actor, timer_actor,
                  arvados_factory, cloud_factory,
-                 shutdown_windows, min_nodes, max_nodes,
-                 poll_stale_after=600, node_stale_after=7200,
+                 shutdown_windows, min_size, min_nodes, max_nodes,
+                 poll_stale_after=600,
+                 boot_fail_after=1800,
+                 node_stale_after=7200,
                  node_setup_class=dispatch.ComputeNodeSetupActor,
                  node_shutdown_class=dispatch.ComputeNodeShutdownActor,
                  node_actor_class=dispatch.ComputeNodeMonitorActor):
@@ -112,9 +116,11 @@ class NodeManagerDaemonActor(actor_class):
         self._logger = logging.getLogger('arvnodeman.daemon')
         self._later = self.actor_ref.proxy()
         self.shutdown_windows = shutdown_windows
+        self.min_cloud_size = min_size
         self.min_nodes = min_nodes
         self.max_nodes = max_nodes
         self.poll_stale_after = poll_stale_after
+        self.boot_fail_after = boot_fail_after
         self.node_stale_after = node_stale_after
         self.last_polls = {}
         for poll_name in ['server_wishlist', 'arvados_nodes', 'cloud_nodes']:
@@ -174,6 +180,7 @@ class NodeManagerDaemonActor(actor_class):
                     break
         for key, record in self.cloud_nodes.orphans.iteritems():
             record.actor.stop()
+            record.cloud_node = None
             self.shutdowns.pop(key, None)
 
     def update_arvados_nodes(self, nodelist):
@@ -201,9 +208,12 @@ class NodeManagerDaemonActor(actor_class):
 
     def _nodes_wanted(self):
         up_count = self._nodes_up()
+        under_min = self.min_nodes - up_count
         over_max = up_count - self.max_nodes
         if over_max >= 0:
             return -over_max
+        elif under_min > 0:
+            return under_min
         else:
             up_count -= len(self.shutdowns) + self._nodes_busy()
             return len(self.last_wishlist) - up_count
@@ -248,7 +258,10 @@ class NodeManagerDaemonActor(actor_class):
         if nodes_wanted < 1:
             return None
         arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
-        cloud_size = self.last_wishlist[nodes_wanted - 1]
+        try:
+            cloud_size = self.last_wishlist[self._nodes_up()]
+        except IndexError:
+            cloud_size = self.min_cloud_size
         self._logger.info("Want %s more nodes.  Booting a %s node.",
                           nodes_wanted, cloud_size.name)
         new_setup = self._node_setup.start(
@@ -269,15 +282,15 @@ class NodeManagerDaemonActor(actor_class):
         return pykka.get_all([getattr(actor, name) for name in attr_names])
 
     def node_up(self, setup_proxy):
-        cloud_node, arvados_node = self._get_actor_attrs(
-            setup_proxy, 'cloud_node', 'arvados_node')
+        cloud_node = setup_proxy.cloud_node.get()
         del self.booting[setup_proxy.actor_ref.actor_urn]
         setup_proxy.stop()
         record = self.cloud_nodes.get(cloud_node.id)
         if record is None:
             record = self._new_node(cloud_node)
             self.booted[cloud_node.id] = record
-        self._pair_nodes(record, arvados_node)
+        self._timer.schedule(time.time() + self.boot_fail_after,
+                             self._later.shutdown_unpaired_node, cloud_node.id)
 
     @_check_poll_freshness
     def stop_booting_node(self):
@@ -292,19 +305,31 @@ class NodeManagerDaemonActor(actor_class):
                     self._later.stop_booting_node()
                 break
 
-    @_check_poll_freshness
-    def node_can_shutdown(self, node_actor):
-        if self._nodes_excess() < 1:
-            return None
+    def _begin_node_shutdown(self, node_actor, cancellable):
         cloud_node_id = node_actor.cloud_node.get().id
         if cloud_node_id in self.shutdowns:
             return None
         shutdown = self._node_shutdown.start(
             timer_actor=self._timer, cloud_client=self._new_cloud(),
-            node_monitor=node_actor.actor_ref).proxy()
+            node_monitor=node_actor.actor_ref, cancellable=cancellable).proxy()
         self.shutdowns[cloud_node_id] = shutdown
         shutdown.subscribe(self._later.node_finished_shutdown)
 
+    @_check_poll_freshness
+    def node_can_shutdown(self, node_actor):
+        if self._nodes_excess() > 0:
+            self._begin_node_shutdown(node_actor, cancellable=True)
+
+    def shutdown_unpaired_node(self, cloud_node_id):
+        for record_dict in [self.cloud_nodes, self.booted]:
+            if cloud_node_id in record_dict:
+                record = record_dict[cloud_node_id]
+                break
+        else:
+            return None
+        if record.arvados_node is None:
+            self._begin_node_shutdown(record.actor, cancellable=False)
+
     def node_finished_shutdown(self, shutdown_actor):
         success, cloud_node = self._get_actor_attrs(shutdown_actor, 'success',
                                                     'cloud_node')
index 239934f52911782730840d3d1aa5042866345451..06f66b71c244f5662fb0c9871f7c92b0603f1617 100644 (file)
@@ -38,11 +38,10 @@ class ServerCalculator(object):
             return True
 
 
-    def __init__(self, server_list, min_nodes=0, max_nodes=None):
+    def __init__(self, server_list, max_nodes=None):
         self.cloud_sizes = [self.CloudSizeWrapper(s, **kws)
                             for s, kws in server_list]
         self.cloud_sizes.sort(key=lambda s: s.price)
-        self.min_nodes = min_nodes
         self.max_nodes = max_nodes or float('inf')
         self.logger = logging.getLogger('arvnodeman.jobqueue')
         self.logged_jobs = set()
@@ -79,15 +78,11 @@ class ServerCalculator(object):
             elif (want_count <= self.max_nodes):
                 servers.extend([cloud_size.real] * max(1, want_count))
         self.logged_jobs.intersection_update(seen_jobs)
-
-        # Make sure the server queue has at least enough entries to
-        # satisfy min_nodes.
-        node_shortfall = self.min_nodes - len(servers)
-        if node_shortfall > 0:
-            basic_node = self.cloud_size_for_constraints({})
-            servers.extend([basic_node.real] * node_shortfall)
         return servers
 
+    def cheapest_size(self):
+        return self.cloud_sizes[0]
+
 
 class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
     """Actor to generate server wishlists from the job queue.
index 9f5e1627eaa1b1d808c87190dc1c40c2c15f5d3b..880158234da668ca212604252a070b8db30cddbd 100644 (file)
@@ -57,25 +57,22 @@ def setup_logging(path, level, **sublevels):
         sublogger = logging.getLogger(logger_name)
         sublogger.setLevel(sublevel)
 
-def launch_pollers(config):
-    cloud_client = config.new_cloud_client()
-    arvados_client = config.new_arvados_client()
-    cloud_size_list = config.node_sizes(cloud_client.list_sizes())
+def build_server_calculator(config):
+    cloud_size_list = config.node_sizes(config.new_cloud_client().list_sizes())
     if not cloud_size_list:
         abort("No valid node sizes configured")
+    return ServerCalculator(cloud_size_list,
+                            config.getint('Daemon', 'max_nodes'))
 
-    server_calculator = ServerCalculator(
-        cloud_size_list,
-        config.getint('Daemon', 'min_nodes'),
-        config.getint('Daemon', 'max_nodes'))
+def launch_pollers(config, server_calculator):
     poll_time = config.getint('Daemon', 'poll_time')
     max_poll_time = config.getint('Daemon', 'max_poll_time')
 
     timer = TimedCallBackActor.start(poll_time / 10.0).proxy()
     cloud_node_poller = CloudNodeListMonitorActor.start(
-        cloud_client, timer, poll_time, max_poll_time).proxy()
+        config.new_cloud_client(), timer, poll_time, max_poll_time).proxy()
     arvados_node_poller = ArvadosNodeListMonitorActor.start(
-        arvados_client, timer, poll_time, max_poll_time).proxy()
+        config.new_arvados_client(), timer, poll_time, max_poll_time).proxy()
     job_queue_poller = JobQueueMonitorActor.start(
         config.new_arvados_client(), timer, server_calculator,
         poll_time, max_poll_time).proxy()
@@ -108,17 +105,20 @@ def main(args=None):
     setup_logging(config.get('Logging', 'file'), **config.log_levels())
     node_setup, node_shutdown, node_update, node_monitor = \
         config.dispatch_classes()
+    server_calculator = build_server_calculator(config)
     timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
-        launch_pollers(config)
+        launch_pollers(config, server_calculator)
     cloud_node_updater = node_update.start(config.new_cloud_client).proxy()
     node_daemon = NodeManagerDaemonActor.start(
         job_queue_poller, arvados_node_poller, cloud_node_poller,
         cloud_node_updater, timer,
         config.new_arvados_client, config.new_cloud_client,
         config.shutdown_windows(),
+        server_calculator.cheapest_size(),
         config.getint('Daemon', 'min_nodes'),
         config.getint('Daemon', 'max_nodes'),
         config.getint('Daemon', 'poll_stale_after'),
+        config.getint('Daemon', 'boot_fail_after'),
         config.getint('Daemon', 'node_stale_after'),
         node_setup, node_shutdown, node_monitor).proxy()
 
index 7ddfb7ca33e8b97f8132117c66789529415d8b90..83dd93f077bfb504a8f41b6f0addd93c717ac7da 100644 (file)
@@ -11,10 +11,11 @@ class ArvadosNodeListMonitorActor(clientactor.RemotePollLoopActor):
     This actor regularly polls the list of Arvados node records, and
     sends it to subscribers.
     """
-
-    CLIENT_ERRORS = config.ARVADOS_ERRORS
     LOGGER_NAME = 'arvnodeman.arvados_nodes'
 
+    def is_common_error(self, exception):
+        return isinstance(exception, config.ARVADOS_ERRORS)
+
     def _item_key(self, node):
         return node['uuid']
 
@@ -28,10 +29,11 @@ class CloudNodeListMonitorActor(clientactor.RemotePollLoopActor):
     This actor regularly polls the cloud to get a list of running compute
     nodes, and sends it to subscribers.
     """
-
-    CLIENT_ERRORS = config.CLOUD_ERRORS
     LOGGER_NAME = 'arvnodeman.cloud_nodes'
 
+    def is_common_error(self, exception):
+        return self._client.is_cloud_exception(exception)
+
     def _item_key(self, node):
         return node.id
 
index 0f9cacad55601b88316e7513155ab3790900559c..024ed2b59b3089676b520aec5212caa1caa470ba 100644 (file)
@@ -27,6 +27,12 @@ max_poll_time = 300
 # information is too outdated.
 poll_stale_after = 600
 
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
 # "Node stale time" affects two related behaviors.
 # 1. If a compute node has been running for at least this long, but it
 # isn't paired with an Arvados node, do not shut it down, but leave it alone.
index 44a35f592a47dd9ea418583ba1a121218d4c29ac..5fc42941152464d909dd9b7b789bf51b8b9de035 100644 (file)
@@ -5,19 +5,27 @@ import subprocess
 import time
 
 from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
 
 SETUP_DIR = os.path.dirname(__file__) or "."
-cmd_opts = {'egg_info': {}}
-try:
-    git_tags = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct %h', SETUP_DIR]).split()
-    assert len(git_tags) == 2
-except (AssertionError, OSError, subprocess.CalledProcessError):
-    pass
-else:
-    git_tags[0] = time.strftime('%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-    cmd_opts['egg_info']['tag_build'] = '.{}.{}'.format(*git_tags)
+
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
+
 
 setup(name='arvados-node-manager',
       version='0.1',
@@ -38,5 +46,5 @@ setup(name='arvados-node-manager',
       test_suite='tests',
       tests_require=['mock>=1.0'],
       zip_safe=False,
-      options=cmd_opts,
+      cmdclass={'egg_info': TagBuildWithCommit},
       )
index 7f6988dbe9df21dce1797698156f5a6e6390e517..a1dfde30e1c90eeaeeb04e2542bd53866d4eff5f 100644 (file)
@@ -14,7 +14,7 @@ import arvnodeman.computenode.dispatch as dispatch
 from . import testutil
 
 class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
-    def make_mocks(self, arvados_effect=None, cloud_effect=None):
+    def make_mocks(self, arvados_effect=None):
         if arvados_effect is None:
             arvados_effect = [testutil.arvados_node_mock()]
         self.arvados_effect = arvados_effect
@@ -48,14 +48,33 @@ class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
         self.assertEqual(self.cloud_client.create_node(),
                          self.setup_actor.cloud_node.get(self.TIMEOUT))
 
-    def test_failed_calls_retried(self):
+    def test_failed_arvados_calls_retried(self):
         self.make_mocks([
                 arverror.ApiError(httplib2.Response({'status': '500'}), ""),
                 testutil.arvados_node_mock(),
                 ])
         self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'arvados_node')
+
+    def test_failed_cloud_calls_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            Exception("test cloud creation error"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
         self.wait_for_assignment(self.setup_actor, 'cloud_node')
 
+    def test_failed_post_create_retried(self):
+        self.make_mocks()
+        self.cloud_client.post_create_node.side_effect = [
+            Exception("test cloud post-create error"), None]
+        self.make_actor()
+        done = self.FUTURE_CLASS()
+        self.setup_actor.subscribe(done.set)
+        done.get(self.TIMEOUT)
+        self.assertEqual(2, self.cloud_client.post_create_node.call_count)
+
     def test_stop_when_no_cloud_node(self):
         self.make_mocks(
             arverror.ApiError(httplib2.Response({'status': '500'}), ""))
@@ -106,14 +125,14 @@ class ComputeNodeShutdownActorMixin(testutil.ActorTestMixin):
         self.cloud_node = cloud_node
         self.arvados_node = arvados_node
 
-    def make_actor(self):
+    def make_actor(self, cancellable=True):
         if not hasattr(self, 'timer'):
             self.make_mocks()
         monitor_actor = dispatch.ComputeNodeMonitorActor.start(
             self.cloud_node, time.time(), self.shutdowns, self.timer,
             self.updates, self.arvados_node)
         self.shutdown_actor = self.ACTOR_CLASS.start(
-            self.timer, self.cloud_client, monitor_actor).proxy()
+            self.timer, self.cloud_client, monitor_actor, cancellable).proxy()
         self.monitor_actor = monitor_actor.proxy()
 
     def check_success_flag(self, expected, allow_msg_count=1):
@@ -126,6 +145,15 @@ class ComputeNodeShutdownActorMixin(testutil.ActorTestMixin):
         else:
             self.fail("success flag {} is not {}".format(last_flag, expected))
 
+    def test_uncancellable_shutdown(self, *mocks):
+        self.make_mocks(shutdown_open=False)
+        self.cloud_client.destroy_node.return_value = False
+        self.make_actor(cancellable=False)
+        self.check_success_flag(None, 0)
+        self.shutdowns._set_state(True, 600)
+        self.cloud_client.destroy_node.return_value = True
+        self.check_success_flag(True)
+
 
 class ComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
                                        unittest.TestCase):
index ccac8b2449b1c54abbda22e4323256ec3de6834a..93cc60d4e8c93bb2d124b1bbe05ca722c3736541 100644 (file)
@@ -22,21 +22,31 @@ class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
         for s in args:
             self.assertIn(s, slurm_cmd)
 
-    def check_success_after_reset(self, proc_mock):
+    def check_success_after_reset(self, proc_mock, end_state='drain\n'):
         self.make_mocks(arvados_node=testutil.arvados_node_mock(63))
         self.make_actor()
         self.check_success_flag(None, 0)
         self.check_success_flag(None, 0)
         # Order is critical here: if the mock gets called when no return value
         # or side effect is set, we may invoke a real subprocess.
-        proc_mock.return_value = 'drain\n'
+        proc_mock.return_value = end_state
         proc_mock.side_effect = None
         self.check_success_flag(True, 3)
         self.check_slurm_got_args(proc_mock, 'compute63')
 
-    def test_wait_for_drained_state(self, proc_mock):
-        proc_mock.return_value = 'drng\n'
-        self.check_success_after_reset(proc_mock)
+    def make_wait_state_test(start_state='drng\n', end_state='drain\n'):
+        def test(self, proc_mock):
+            proc_mock.return_value = start_state
+            self.check_success_after_reset(proc_mock, end_state)
+        return test
+
+    for wait_state in ['alloc\n', 'drng\n', 'idle*\n']:
+        locals()['test_wait_while_' + wait_state.strip()
+                 ] = make_wait_state_test(start_state=wait_state)
+
+    for end_state in ['down\n', 'down*\n', 'drain\n', 'fail\n']:
+        locals()['test_wait_until_' + end_state.strip()
+                 ] = make_wait_state_test(end_state=end_state)
 
     def test_retry_failed_slurm_calls(self, proc_mock):
         proc_mock.side_effect = subprocess.CalledProcessError(1, ["mock"])
index fde103e10e606f68ca5e0b3ba262f0a350e6df64..fae63a5663d82035b43d82288de82ade2788f99b 100644 (file)
@@ -2,9 +2,11 @@
 
 from __future__ import absolute_import, print_function
 
+import ssl
 import time
 import unittest
 
+import libcloud.common.types as cloud_types
 import mock
 
 import arvnodeman.computenode.driver.ec2 as ec2
@@ -55,30 +57,41 @@ class EC2ComputeNodeDriverTestCase(unittest.TestCase):
                       create_method.call_args[1].get('ex_userdata',
                                                      'arg missing'))
 
-    def test_tags_created_from_arvados_node(self):
+    def test_hostname_from_arvados_node(self):
         arv_node = testutil.arvados_node_mock(8)
-        cloud_node = testutil.cloud_node_mock(8)
-        driver = self.new_driver(list_kwargs={'tag:list': 'test'})
-        self.assertEqual({'ex_metadata': {'list': 'test'},
-                          'name': 'compute8.zzzzz.arvadosapi.com'},
-                         driver.arvados_create_kwargs(arv_node))
+        driver = self.new_driver()
+        self.assertEqual('compute8.zzzzz.arvadosapi.com',
+                         driver.arvados_create_kwargs(arv_node)['name'])
 
-    def test_tags_set_default_hostname_from_new_arvados_node(self):
+    def test_default_hostname_from_new_arvados_node(self):
         arv_node = testutil.arvados_node_mock(hostname=None)
         driver = self.new_driver()
-        actual = driver.arvados_create_kwargs(arv_node)
         self.assertEqual('dynamic.compute.zzzzz.arvadosapi.com',
-                         actual['name'])
+                         driver.arvados_create_kwargs(arv_node)['name'])
+
+    def check_node_tagged(self, cloud_node, expected_tags):
+        tag_mock = self.driver_mock().ex_create_tags
+        self.assertTrue(tag_mock.called)
+        self.assertIs(cloud_node, tag_mock.call_args[0][0])
+        self.assertEqual(expected_tags, tag_mock.call_args[0][1])
+
+    def test_post_create_node_tags_from_list_kwargs(self):
+        expect_tags = {'key1': 'test value 1', 'key2': 'test value 2'}
+        list_kwargs = {('tag_' + key): value
+                       for key, value in expect_tags.iteritems()}
+        list_kwargs['instance-state-name'] = 'running'
+        cloud_node = testutil.cloud_node_mock()
+        driver = self.new_driver(list_kwargs=list_kwargs)
+        driver.post_create_node(cloud_node)
+        self.check_node_tagged(cloud_node, expect_tags)
 
     def test_sync_node(self):
         arv_node = testutil.arvados_node_mock(1)
         cloud_node = testutil.cloud_node_mock(2)
         driver = self.new_driver()
         driver.sync_node(cloud_node, arv_node)
-        tag_mock = self.driver_mock().ex_create_tags
-        self.assertTrue(tag_mock.called)
-        self.assertEqual('compute1.zzzzz.arvadosapi.com',
-                         tag_mock.call_args[0][1].get('Name', 'no name'))
+        self.check_node_tagged(cloud_node,
+                               {'Name': 'compute1.zzzzz.arvadosapi.com'})
 
     def test_node_create_time(self):
         refsecs = int(time.time())
@@ -87,3 +100,16 @@ class EC2ComputeNodeDriverTestCase(unittest.TestCase):
         node.extra = {'launch_time': time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
                                                    reftuple)}
         self.assertEqual(refsecs, ec2.ComputeNodeDriver.node_start_time(node))
+
+    def test_cloud_exceptions(self):
+        for error in [Exception("test exception"),
+                      IOError("test exception"),
+                      ssl.SSLError("test exception"),
+                      cloud_types.LibcloudError("test exception")]:
+            self.assertTrue(ec2.ComputeNodeDriver.is_cloud_exception(error),
+                            "{} not flagged as cloud exception".format(error))
+
+    def test_noncloud_exceptions(self):
+        self.assertFalse(
+            ec2.ComputeNodeDriver.is_cloud_exception(ValueError("test error")),
+            "ValueError flagged as cloud exception")
index 31a682ffd8b682be156ea71f563da39cc707ba3e..96fcde9524b910b56b45a2aea01bd3bdf06066cf 100644 (file)
@@ -14,22 +14,30 @@ from . import testutil
 
 class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                                      unittest.TestCase):
+    def new_setup_proxy(self):
+        # Make sure that every time the daemon starts a setup actor,
+        # it gets a new mock object back.
+        self.last_setup = mock.MagicMock(name='setup_proxy_mock')
+        return self.last_setup
+
     def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
-                    min_nodes=0, max_nodes=8):
+                    min_size=testutil.MockSize(1), min_nodes=0, max_nodes=8):
         for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
             setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
         self.arv_factory = mock.MagicMock(name='arvados_mock')
         self.cloud_factory = mock.MagicMock(name='cloud_mock')
         self.cloud_factory().node_start_time.return_value = time.time()
         self.cloud_updates = mock.MagicMock(name='updates_mock')
-        self.timer = testutil.MockTimer()
+        self.timer = testutil.MockTimer(deliver_immediately=False)
         self.node_setup = mock.MagicMock(name='setup_mock')
+        self.node_setup.start().proxy.side_effect = self.new_setup_proxy
+        self.node_setup.reset_mock()
         self.node_shutdown = mock.MagicMock(name='shutdown_mock')
         self.daemon = nmdaemon.NodeManagerDaemonActor.start(
             self.server_wishlist_poller, self.arvados_nodes_poller,
             self.cloud_nodes_poller, self.cloud_updates, self.timer,
             self.arv_factory, self.cloud_factory,
-            [54, 5, 1], min_nodes, max_nodes, 600, 3600,
+            [54, 5, 1], min_size, min_nodes, max_nodes, 600, 1800, 3600,
             self.node_setup, self.node_shutdown).proxy()
         if cloud_nodes is not None:
             self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
@@ -44,21 +52,29 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
     def alive_monitor_count(self):
         return sum(1 for actor in self.monitor_list() if actor.is_alive())
 
+    def assertShutdownCancellable(self, expected=True):
+        self.assertTrue(self.node_shutdown.start.called)
+        self.assertIs(expected,
+                      self.node_shutdown.start.call_args[1]['cancellable'],
+                      "ComputeNodeShutdownActor incorrectly cancellable")
+
     def test_easy_node_creation(self):
         size = testutil.MockSize(1)
         self.make_daemon(want_sizes=[size])
         self.stop_proxy(self.daemon)
         self.assertTrue(self.node_setup.start.called)
 
+    def check_monitors_arvados_nodes(self, *arv_nodes):
+        pairings = [monitor.proxy().arvados_node
+                    for monitor in self.monitor_list() if monitor.is_alive()]
+        self.assertItemsEqual(arv_nodes, pykka.get_all(pairings, self.TIMEOUT))
+
     def test_node_pairing(self):
         cloud_node = testutil.cloud_node_mock(1)
         arv_node = testutil.arvados_node_mock(1)
         self.make_daemon([cloud_node], [arv_node])
         self.stop_proxy(self.daemon)
-        self.assertEqual(1, self.alive_monitor_count())
-        self.assertIs(
-            self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
-            arv_node)
+        self.check_monitors_arvados_nodes(arv_node)
 
     def test_node_pairing_after_arvados_update(self):
         cloud_node = testutil.cloud_node_mock(2)
@@ -67,21 +83,23 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         arv_node = testutil.arvados_node_mock(2)
         self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
-        self.assertEqual(1, self.alive_monitor_count())
-        self.assertIs(
-            self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
-            arv_node)
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_arvados_node_un_and_re_paired(self):
+        arv_node = testutil.arvados_node_mock(3)
+        self.make_daemon([testutil.cloud_node_mock(3)], [arv_node])
+        self.check_monitors_arvados_nodes(arv_node)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.assertEqual(0, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([testutil.cloud_node_mock(3)])
+        self.stop_proxy(self.daemon)
+        self.check_monitors_arvados_nodes(arv_node)
 
     def test_old_arvados_node_not_double_assigned(self):
         arv_node = testutil.arvados_node_mock(3, age=9000)
         size = testutil.MockSize(3)
         self.make_daemon(arvados_nodes=[arv_node])
-        setup_ref = self.node_setup.start().proxy().actor_ref
-        setup_ref.actor_urn = 0
-        self.node_setup.start.reset_mock()
         self.daemon.update_server_wishlist([size]).get(self.TIMEOUT)
-        self.daemon.max_nodes.get(self.TIMEOUT)
-        setup_ref.actor_urn += 1
         self.daemon.update_server_wishlist([size, size]).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
         used_nodes = [call[1].get('arvados_node')
@@ -114,6 +132,26 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.stop_proxy(self.daemon)
         self.assertTrue(self.node_setup.start.called)
 
+    def test_boot_new_node_below_min_nodes(self):
+        min_size = testutil.MockSize(1)
+        wish_size = testutil.MockSize(3)
+        self.make_daemon([], [], None, min_size=min_size, min_nodes=2)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual([wish_size, min_size],
+                         [call[1].get('cloud_size')
+                          for call in self.node_setup.start.call_args_list])
+
+    def test_no_new_node_when_ge_min_nodes_busy(self):
+        cloud_nodes = [testutil.cloud_node_mock(n) for n in range(1, 4)]
+        arv_nodes = [testutil.arvados_node_mock(n, job_uuid=True)
+                     for n in range(1, 4)]
+        self.make_daemon(cloud_nodes, arv_nodes, [], min_nodes=2)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(0, self.node_setup.start.call_count)
+
     def test_no_new_node_when_max_nodes_busy(self):
         self.make_daemon([testutil.cloud_node_mock(3)],
                          [testutil.arvados_node_mock(3, job_uuid=True)],
@@ -122,14 +160,6 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.stop_proxy(self.daemon)
         self.assertFalse(self.node_setup.start.called)
 
-    def mock_setup_actor(self, cloud_node, arv_node):
-        setup = self.node_setup.start().proxy()
-        self.node_setup.reset_mock()
-        setup.actor_urn = cloud_node.id
-        setup.cloud_node.get.return_value = cloud_node
-        setup.arvados_node.get.return_value = arv_node
-        return setup
-
     def start_node_boot(self, cloud_node=None, arv_node=None, id_num=1):
         if cloud_node is None:
             cloud_node = testutil.cloud_node_mock(id_num)
@@ -138,7 +168,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.make_daemon(want_sizes=[testutil.MockSize(id_num)])
         self.daemon.max_nodes.get(self.TIMEOUT)
         self.assertEqual(1, self.node_setup.start.call_count)
-        return self.mock_setup_actor(cloud_node, arv_node)
+        self.last_setup.cloud_node.get.return_value = cloud_node
+        self.last_setup.arvados_node.get.return_value = arv_node
+        return self.last_setup
 
     def test_no_duplication_when_booting_node_listed_fast(self):
         # Test that we don't start two ComputeNodeMonitorActors when
@@ -173,8 +205,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.daemon.update_server_wishlist(
             [testutil.MockSize(1)]).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
-        self.assertFalse(self.node_setup.start.called,
-                         "daemon did not count booted node toward wishlist")
+        self.assertEqual(1, self.node_setup.start.call_count)
 
     def test_booted_node_can_shutdown(self):
         setup = self.start_node_boot()
@@ -195,8 +226,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         monitor = self.monitor_list()[0].proxy()
         self.daemon.update_server_wishlist([])
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
-        self.assertTrue(self.node_shutdown.start.called,
-                        "daemon did not shut down booted node on offer")
+        self.assertShutdownCancellable(True)
         shutdown = self.node_shutdown.start().proxy()
         shutdown.cloud_node.get.return_value = cloud_node
         self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
@@ -210,12 +240,42 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.assertTrue(self.node_setup.start.called,
                         "second node not started after booted node stopped")
 
+    def test_booted_node_shut_down_when_never_listed(self):
+        setup = self.start_node_boot()
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.assertFalse(self.node_shutdown.start.called)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertShutdownCancellable(False)
+
+    def test_booted_node_shut_down_when_never_paired(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        setup = self.start_node_boot(cloud_node)
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertShutdownCancellable(False)
+
+    def test_node_that_pairs_not_considered_failed_boot(self):
+        cloud_node = testutil.cloud_node_mock(3)
+        arv_node = testutil.arvados_node_mock(3)
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
     def test_booting_nodes_shut_down(self):
         self.make_daemon(want_sizes=[testutil.MockSize(1)])
         self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
-        self.assertTrue(
-            self.node_setup.start().proxy().stop_if_no_cloud_node.called)
+        self.assertTrue(self.last_setup.stop_if_no_cloud_node.called)
 
     def test_shutdown_declined_at_wishlist_capacity(self):
         cloud_node = testutil.cloud_node_mock(1)
@@ -317,6 +377,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.assertTrue(new_node.stop_if_no_cloud_node.called)
         self.daemon.node_up(new_node).get(self.TIMEOUT)
         self.assertTrue(new_node.stop.called)
+        self.timer.deliver()
         self.assertTrue(
             self.daemon.actor_ref.actor_stopped.wait(self.TIMEOUT))
 
@@ -325,5 +386,6 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.make_daemon(want_sizes=[size])
         self.daemon.shutdown().get(self.TIMEOUT)
         self.daemon.update_server_wishlist([size] * 2).get(self.TIMEOUT)
+        self.timer.deliver()
         self.stop_proxy(self.daemon)
         self.assertEqual(1, self.node_setup.start.call_count)
index ae5bf1e90872371671a784a44c8e40f62954487e..4c97aed8b109a576843105bad7cf7f62b14426ce 100644 (file)
@@ -48,29 +48,15 @@ class ServerCalculatorTestCase(unittest.TestCase):
                                   {'min_scratch_mb_per_node': 200})
         self.assertEqual(6, len(servlist))
 
-    def test_server_calc_min_nodes_0_jobs(self):
-        servcalc = self.make_calculator([1], min_nodes=3, max_nodes=9)
-        servlist = self.calculate(servcalc, {})
-        self.assertEqual(3, len(servlist))
-
-    def test_server_calc_min_nodes_1_job(self):
-        servcalc = self.make_calculator([1], min_nodes=3, max_nodes=9)
-        servlist = self.calculate(servcalc, {'min_nodes': 1})
-        self.assertEqual(3, len(servlist))
-
-    def test_server_calc_more_jobs_than_min_nodes(self):
-        servcalc = self.make_calculator([1], min_nodes=2, max_nodes=9)
-        servlist = self.calculate(servcalc,
-                                  {'min_nodes': 1},
-                                  {'min_nodes': 1},
-                                  {'min_nodes': 1})
-        self.assertEqual(3, len(servlist))
-
     def test_job_requesting_max_nodes_accepted(self):
         servcalc = self.make_calculator([1], max_nodes=4)
         servlist = self.calculate(servcalc, {'min_nodes': 4})
         self.assertEqual(4, len(servlist))
 
+    def test_cheapest_size(self):
+        servcalc = self.make_calculator([2, 4, 1, 3])
+        self.assertEqual(testutil.MockSize(1), servcalc.cheapest_size())
+
 
 class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
                                    unittest.TestCase):
index 56f22c8e08fc66f69c90422e30bb116b372dfbf6..30808ac73816e9056d6ee8c91025305e7570520e 100644 (file)
@@ -2,6 +2,7 @@
 
 from __future__ import absolute_import, print_function
 
+import threading
 import time
 
 import mock
@@ -62,8 +63,23 @@ class MockSize(object):
 
 
 class MockTimer(object):
+    def __init__(self, deliver_immediately=True):
+        self.deliver_immediately = deliver_immediately
+        self.messages = []
+        self.lock = threading.Lock()
+
+    def deliver(self):
+        with self.lock:
+            to_deliver = self.messages
+            self.messages = []
+        for callback, args, kwargs in to_deliver:
+            callback(*args, **kwargs)
+
     def schedule(self, want_time, callback, *args, **kwargs):
-        return callback(*args, **kwargs)
+        with self.lock:
+            self.messages.append((callback, args, kwargs))
+        if self.deliver_immediately:
+            self.deliver()
 
 
 class ActorTestMixin(object):