9701: Merge branch '9463-change-arvput-use-collection-class' into 9701-collection...
authorLucas Di Pentima <lucas@curoverse.com>
Mon, 3 Oct 2016 19:44:31 +0000 (16:44 -0300)
committerLucas Di Pentima <lucas@curoverse.com>
Mon, 3 Oct 2016 19:44:31 +0000 (16:44 -0300)
437 files changed:
README.md
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/infinite_scroll.js
apps/workbench/app/assets/javascripts/pipeline_instances.js
apps/workbench/app/assets/javascripts/work_unit_log.js [new file with mode: 0644]
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/collections_controller.rb
apps/workbench/app/controllers/container_requests_controller.rb
apps/workbench/app/controllers/containers_controller.rb
apps/workbench/app/controllers/jobs_controller.rb
apps/workbench/app/controllers/projects_controller.rb
apps/workbench/app/controllers/work_unit_templates_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/work_units_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/workflows_controller.rb [new file with mode: 0644]
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/models/arvados_api_client.rb
apps/workbench/app/models/container_request.rb
apps/workbench/app/models/container_work_unit.rb
apps/workbench/app/models/job.rb
apps/workbench/app/models/pipeline_instance.rb
apps/workbench/app/models/pipeline_instance_work_unit.rb
apps/workbench/app/models/proxy_work_unit.rb
apps/workbench/app/models/work_unit.rb
apps/workbench/app/models/workflow.rb [new file with mode: 0644]
apps/workbench/app/views/application/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_delete_object_button.html.erb
apps/workbench/app/views/application/_name_and_description.html.erb
apps/workbench/app/views/application/_object_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_object_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_files.html.erb
apps/workbench/app/views/collections/show.html.erb
apps/workbench/app/views/container_requests/_name_and_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_inputs.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_status.html.erb
apps/workbench/app/views/containers/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/containers/_show_status.html.erb
apps/workbench/app/views/jobs/_show_status.html.erb
apps/workbench/app/views/pipeline_instances/_show_components.html.erb
apps/workbench/app/views/projects/_show_dashboard.html.erb
apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb [deleted file]
apps/workbench/app/views/projects/_show_pipeline_templates.html.erb
apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/show.html.erb
apps/workbench/app/views/users/profile.html.erb
apps/workbench/app/views/work_unit/_show_component.html.erb [deleted file]
apps/workbench/app/views/work_unit/_show_status.html.erb [deleted file]
apps/workbench/app/views/work_units/_component_detail.html.erb [moved from apps/workbench/app/views/work_unit/_component_detail.html.erb with 97% similarity]
apps/workbench/app/views/work_units/_progress.html.erb [moved from apps/workbench/app/views/work_unit/_progress.html.erb with 100% similarity]
apps/workbench/app/views/work_units/_show_all_processes.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_child.html.erb [moved from apps/workbench/app/views/work_unit/_show_child.html.erb with 87% similarity]
apps/workbench/app/views/work_units/_show_component.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_output.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_outputs.html.erb [moved from apps/workbench/app/views/work_unit/_show_outputs.html.erb with 100% similarity]
apps/workbench/app/views/work_units/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/workflows/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/config/application.default.yml
apps/workbench/config/initializers/lograge.rb [new file with mode: 0644]
apps/workbench/config/initializers/time_format.rb [new file with mode: 0644]
apps/workbench/config/load_config.rb
apps/workbench/config/routes.rb
apps/workbench/test/controllers/application_controller_test.rb
apps/workbench/test/controllers/collections_controller_test.rb
apps/workbench/test/controllers/container_requests_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/containers_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/controllers/work_units_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/workflows_controller_test.rb [new file with mode: 0644]
apps/workbench/test/helpers/fake_websocket_helper.rb [new file with mode: 0644]
apps/workbench/test/integration/anonymous_access_test.rb
apps/workbench/test/integration/application_layout_test.rb
apps/workbench/test/integration/container_requests_test.rb [new file with mode: 0644]
apps/workbench/test/integration/download_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/integration/projects_test.rb
apps/workbench/test/integration/user_profile_test.rb
apps/workbench/test/integration/websockets_test.rb
apps/workbench/test/integration/work_units_test.rb [new file with mode: 0644]
apps/workbench/test/integration_helper.rb
apps/workbench/test/integration_performance/collections_perf_test.rb
apps/workbench/test/support/fake_websocket.js [new file with mode: 0644]
apps/workbench/test/test_helper.rb
apps/workbench/test/unit/work_unit_test.rb
build/README [new file with mode: 0644]
build/go-python-package-scripts/postinst [new file with mode: 0755]
build/go-python-package-scripts/prerm [new file with mode: 0755]
build/package-build-dockerfiles/Makefile
build/package-build-dockerfiles/centos6/Dockerfile
build/package-build-dockerfiles/centos7/Dockerfile
build/package-build-dockerfiles/debian7/Dockerfile
build/package-build-dockerfiles/debian8/Dockerfile
build/package-build-dockerfiles/ubuntu1204/Dockerfile
build/package-build-dockerfiles/ubuntu1404/Dockerfile
build/package-test-dockerfiles/centos6/Dockerfile
build/run-build-docker-images.sh
build/run-build-docker-jobs-image.sh
build/run-build-packages.sh
build/run-library.sh
build/run-tests.sh
crunch_scripts/cwl-runner
doc/_config.yml
doc/_includes/_container_runtime_constraints.liquid [new file with mode: 0644]
doc/_includes/_install_compute_docker.liquid [new file with mode: 0644]
doc/_includes/_install_compute_fuse.liquid [new file with mode: 0644]
doc/_includes/_install_docker_cleaner.liquid [new file with mode: 0644]
doc/_includes/_mount_types.liquid [new file with mode: 0644]
doc/_includes/_pipeline_deprecation_notice.liquid [new file with mode: 0644]
doc/api/methods/container_requests.html.textile.liquid [new file with mode: 0644]
doc/api/methods/containers.html.textile.liquid [new file with mode: 0644]
doc/api/methods/groups.html.textile.liquid
doc/api/methods/workflows.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Container.html.textile.liquid [new file with mode: 0644]
doc/api/schema/ContainerRequest.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Workflow.html.textile.liquid [new file with mode: 0644]
doc/install/create-standard-objects.html.textile.liquid
doc/install/crunch2-slurm/install-compute-node.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-dispatch.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-test.html.textile.liquid [new file with mode: 0644]
doc/install/install-compute-node.html.textile.liquid
doc/install/install-keepstore.html.textile.liquid
doc/sdk/cli/install.html.textile.liquid
doc/sdk/java/index.html.textile.liquid
doc/sdk/python/sdk-python.html.textile.liquid
doc/user/cwl/bwa-mem/bwa-mem-input-local.yml [new file with mode: 0755]
doc/user/cwl/bwa-mem/bwa-mem-input.yml [new file with mode: 0755]
doc/user/cwl/bwa-mem/bwa-mem-template.yml [new file with mode: 0644]
doc/user/cwl/bwa-mem/bwa-mem.cwl [new file with mode: 0755]
doc/user/cwl/cwl-runner.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/cwl-style.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/vm-login-with-webshell.html.textile.liquid
doc/user/topics/arv-copy.html.textile.liquid
doc/user/topics/crunch-tools-overview.html.textile.liquid
doc/user/topics/running-pipeline-command-line.html.textile.liquid
doc/user/topics/tutorial-parallel.html.textile.liquid
doc/user/tutorials/running-external-program.html.textile.liquid
doc/user/tutorials/tutorial-firstscript.html.textile.liquid
doc/user/tutorials/tutorial-submit-job.html.textile.liquid
docker/.gitignore [deleted file]
docker/README.md [deleted file]
docker/api/.gitolite.rc [deleted file]
docker/api/Dockerfile [deleted file]
docker/api/apache2_foreground.sh [deleted file]
docker/api/apache2_vhost.in [deleted file]
docker/api/application.yml.in [deleted file]
docker/api/apt.arvados.org.list [deleted file]
docker/api/arvados-clients.yml.in [deleted file]
docker/api/config_databases.sh.in [deleted file]
docker/api/crunch-dispatch-run.sh [deleted file]
docker/api/database.yml.in [deleted file]
docker/api/keep_proxy.json [deleted file]
docker/api/keep_server_0.json.in [deleted file]
docker/api/keep_server_1.json.in [deleted file]
docker/api/munge.key [deleted file]
docker/api/munge.sh [deleted file]
docker/api/omniauth.rb.in [deleted file]
docker/api/passenger.sh [deleted file]
docker/api/setup-gitolite.sh.in [deleted file]
docker/api/setup.sh.in [deleted file]
docker/api/slurm.conf.in [deleted file]
docker/api/superuser_token.in [deleted file]
docker/api/supervisor.conf [deleted file]
docker/arv-web/Dockerfile [deleted file]
docker/arv-web/apache2_foreground.sh [deleted file]
docker/arv-web/apache2_vhost [deleted file]
docker/arvdock [deleted file]
docker/base/Dockerfile [deleted file]
docker/base/apt.arvados.org.list [deleted file]
docker/bcbio-nextgen/Dockerfile [deleted file]
docker/build.sh [deleted file]
docker/build_tools/Makefile [deleted file]
docker/build_tools/build.rb [deleted file]
docker/build_tools/config.rb [deleted file]
docker/compute/Dockerfile [deleted file]
docker/compute/fuse.conf [deleted file]
docker/compute/munge.sh [deleted file]
docker/compute/setup.sh.in [deleted file]
docker/compute/supervisor.conf [deleted file]
docker/compute/wrapdocker [deleted file]
docker/config.yml.example [deleted file]
docker/doc/Dockerfile [deleted file]
docker/doc/apache2_foreground.sh [deleted file]
docker/doc/apache2_vhost.in [deleted file]
docker/install_sdk.sh [deleted file]
docker/java-bwa-samtools/Dockerfile [deleted file]
docker/jobs/Dockerfile
docker/keep/Dockerfile [deleted file]
docker/keep/keep_signing_secret.in [deleted file]
docker/keep/run-keep.in [deleted file]
docker/keepproxy/Dockerfile [deleted file]
docker/keepproxy/run-keepproxy.in [deleted file]
docker/mkimage-debootstrap.sh [deleted file]
docker/passenger/Dockerfile [deleted file]
docker/postgresql/Dockerfile [deleted file]
docker/postgresql/pg_hba.conf [deleted file]
docker/postgresql/postgresql.conf [deleted file]
docker/shell/Dockerfile [deleted file]
docker/shell/fuse.conf [deleted file]
docker/shell/setup.sh.in [deleted file]
docker/shell/superuser_token.in [deleted file]
docker/shell/supervisor.conf [deleted file]
docker/slurm/Dockerfile [deleted file]
docker/slurm/munge.key [deleted file]
docker/slurm/slurm.conf.in [deleted file]
docker/slurm/supervisor.conf [deleted file]
docker/sso/Dockerfile [deleted file]
docker/sso/apache2_foreground.sh [deleted file]
docker/sso/apache2_vhost.in [deleted file]
docker/sso/application.yml.in [deleted file]
docker/sso/database.yml.in [deleted file]
docker/sso/seeds.rb.in [deleted file]
docker/workbench/.gitignore [deleted file]
docker/workbench/Dockerfile [deleted file]
docker/workbench/apache2_foreground.sh [deleted file]
docker/workbench/apache2_vhost.in [deleted file]
docker/workbench/application.yml.in [deleted file]
docker/workbench/production.rb.in [deleted file]
docker/workbench/secret_token.rb.in [deleted file]
docker/workbench/workbench_rails_env.in [deleted file]
lib/crunchstat/crunchstat.go [new file with mode: 0644]
lib/crunchstat/crunchstat_test.go [new file with mode: 0644]
sdk/cli/bin/crunch-job
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arv-cwl-schema.yml [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/arvjob.py
sdk/cwl/arvados_cwl/arvtool.py
sdk/cwl/arvados_cwl/arvworkflow.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/fsaccess.py
sdk/cwl/arvados_cwl/pathmapper.py
sdk/cwl/arvados_cwl/perf.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/test_with_arvbox.sh
sdk/cwl/tests/arvados-tests.sh [new file with mode: 0755]
sdk/cwl/tests/arvados-tests.yml [new file with mode: 0644]
sdk/cwl/tests/dir-job.yml [new file with mode: 0644]
sdk/cwl/tests/keep-dir-test-input.cwl [new file with mode: 0644]
sdk/cwl/tests/runner.sh [new file with mode: 0755]
sdk/cwl/tests/submit_test_job.json
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_job.py
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/testdir/a [new file with mode: 0644]
sdk/cwl/tests/testdir/b [new file with mode: 0644]
sdk/cwl/tests/testdir/c/d [new file with mode: 0644]
sdk/cwl/tests/tool/submit_tool.cwl
sdk/cwl/tests/wf/expect_packed.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/inputs_test.cwl
sdk/cwl/tests/wf/scatter2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/scatter2_subwf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_wf.cwl
sdk/go/arvados/client.go
sdk/go/arvados/client_test.go
sdk/go/arvados/container.go
sdk/go/arvados/duration.go
sdk/go/arvados/keep_service.go
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadosclient/pool.go
sdk/go/arvadostest/fixtures.go
sdk/go/arvadostest/run_servers.go
sdk/go/config/load.go [new file with mode: 0644]
sdk/go/crunchrunner/crunchrunner.go
sdk/go/crunchrunner/crunchrunner_test.go
sdk/go/dispatch/dispatch.go
sdk/go/keepclient/collectionreader.go
sdk/go/keepclient/collectionreader_test.go
sdk/go/keepclient/discover.go
sdk/go/keepclient/discover_test.go
sdk/go/keepclient/keepclient.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
sdk/go/logger/logger.go
sdk/go/util/util.go
sdk/java/ArvadosSDKJavaExample.java
sdk/java/ArvadosSDKJavaExampleWithPrompt.java
sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java
sdk/java/src/test/resources/first_pipeline.json
sdk/pam/setup.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/keepdocker.py
sdk/python/arvados/commands/run.py
sdk/python/arvados/keep.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_keep_client.py
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/container_requests_controller.rb
services/api/app/controllers/arvados/v1/containers_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/arvados/v1/jobs_controller.rb
services/api/app/controllers/arvados/v1/workflows_controller.rb [new file with mode: 0644]
services/api/app/models/api_client_authorization.rb
services/api/app/models/arvados_model.rb
services/api/app/models/collection.rb
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/app/models/job.rb
services/api/app/models/log.rb
services/api/app/models/user.rb
services/api/app/models/workflow.rb [new file with mode: 0644]
services/api/config/application.default.yml
services/api/config/initializers/load_config.rb
services/api/config/initializers/lograge.rb [new file with mode: 0644]
services/api/config/routes.rb
services/api/db/migrate/20160808151559_create_workflows.rb [new file with mode: 0644]
services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb [new file with mode: 0644]
services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb [new file with mode: 0644]
services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/eventbus.rb
services/api/lib/simulate_job_log.rb
services/api/lib/tasks/delete_old_container_logs.rake [new file with mode: 0644]
services/api/lib/tasks/delete_old_job_logs.rake
services/api/test/factories/api_client.rb
services/api/test/factories/api_client_authorization.rb
services/api/test/factories/user.rb
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/containers.yml
services/api/test/fixtures/groups.yml
services/api/test/fixtures/jobs.yml
services/api/test/fixtures/logs.yml
services/api/test/fixtures/pipeline_instances.yml
services/api/test/fixtures/workflows.yml [new file with mode: 0644]
services/api/test/functional/arvados/v1/containers_controller_test.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
services/api/test/functional/arvados/v1/jobs_controller_test.rb
services/api/test/integration/collections_api_test.rb
services/api/test/integration/permissions_test.rb
services/api/test/integration/websocket_test.rb
services/api/test/tasks/delete_old_container_logs_test.rb [new file with mode: 0644]
services/api/test/test_helper.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/container_test.rb
services/api/test/unit/job_test.rb
services/api/test/unit/log_test.rb
services/api/test/unit/permission_test.rb
services/api/test/unit/repository_test.rb
services/api/test/unit/user_test.rb
services/api/test/unit/workflow_test.rb [new file with mode: 0644]
services/api/test/websocket_runner.rb
services/arv-git-httpd/arv-git-httpd.service [new file with mode: 0644]
services/arv-git-httpd/auth_handler.go
services/arv-git-httpd/git_handler.go
services/arv-git-httpd/git_handler_test.go
services/arv-git-httpd/gitolite_test.go
services/arv-git-httpd/integration_test.go
services/arv-git-httpd/main.go
services/arv-git-httpd/server.go
services/arv-git-httpd/usage.go [new file with mode: 0644]
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-local/crunch-dispatch-local_test.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.service [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-dispatch-slurm/script.go [new file with mode: 0644]
services/crunch-dispatch-slurm/script_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/usage.go [new file with mode: 0644]
services/crunch-run/cgroup.go [new file with mode: 0644]
services/crunch-run/cgroup_test.go [new file with mode: 0644]
services/crunch-run/crunchrun.go
services/crunch-run/crunchrun_test.go
services/crunch-run/logging.go
services/crunch-run/logging_test.go
services/crunchstat/crunchstat.go
services/crunchstat/crunchstat_test.go
services/datamanager/collection/collection.go
services/datamanager/collection/collection_test.go
services/datamanager/datamanager.go
services/datamanager/datamanager_test.go
services/datamanager/keep/keep.go
services/datamanager/keep/keep_test.go
services/datamanager/summary/pull_list_test.go
services/datamanager/summary/summary_test.go
services/datamanager/summary/trash_list.go
services/datamanager/summary/trash_list_test.go
services/dockercleaner/MANIFEST.in
services/dockercleaner/arvados-docker-cleaner.service [new file with mode: 0644]
services/dockercleaner/arvados_docker/cleaner.py
services/dockercleaner/setup.py
services/dockercleaner/tests/test_cleaner.py
services/fuse/arvados_fuse/__init__.py
services/fuse/arvados_fuse/command.py
services/fuse/arvados_fuse/fresh.py
services/fuse/arvados_fuse/fusedir.py
services/fuse/arvados_fuse/fusefile.py
services/fuse/tests/integration_test.py
services/fuse/tests/mount_test_base.py
services/fuse/tests/test_cache.py
services/fuse/tests/test_inodes.py
services/fuse/tests/test_mount.py
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keep-balance/integration_test.go
services/keep-balance/keep-balance.service [new file with mode: 0644]
services/keep-balance/main.go
services/keep-balance/main_test.go
services/keep-balance/usage.go
services/keep-web/anonymous.go [deleted file]
services/keep-web/doc.go
services/keep-web/handler.go
services/keep-web/handler_test.go
services/keep-web/keep-web.service [new file with mode: 0644]
services/keep-web/main.go
services/keep-web/server.go
services/keep-web/server_test.go
services/keep-web/usage.go [new file with mode: 0644]
services/keepproxy/keepproxy.go
services/keepproxy/keepproxy.service [new file with mode: 0644]
services/keepproxy/keepproxy_test.go
services/keepproxy/usage.go [new file with mode: 0644]
services/keepstore/azure_blob_volume.go
services/keepstore/handlers.go
services/keepstore/keepstore.go
services/keepstore/pull_worker.go
services/keepstore/pull_worker_integration_test.go
services/keepstore/pull_worker_test.go
services/keepstore/s3_volume.go
services/keepstore/s3_volume_test.go
services/keepstore/trash_worker.go
services/keepstore/trash_worker_test.go
services/keepstore/volume_generic_test.go
services/keepstore/volume_unix.go
services/keepstore/work_queue_test.go
services/nodemanager/arvnodeman/computenode/driver/__init__.py
services/nodemanager/tests/test_computenode_dispatch.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.demo
tools/arvbox/lib/arvbox/docker/Dockerfile.dev

index 629c2f070ee89c46f5cfca164617e0ca17b182fe..cf09171b024716b4a57a412746c7d1a9decbb87e 100644 (file)
--- a/README.md
+++ b/README.md
@@ -58,7 +58,7 @@ contributers to Arvados.
 
 ## Development
 
-[![Build Status](https://ci.curoverse.com/buildStatus/icon?job=arvados-api-server)](https://ci.curoverse.com/job/arvados-api-server/)
+[![Build Status](https://ci.curoverse.com/buildStatus/icon?job=run-tests)](https://ci.curoverse.com/job/run-tests/)
 
 The Arvados public bug tracker is located at https://dev.arvados.org/projects/arvados/issues
 
index e35cc83ece303aec8b84c4e43e9a15f0258ba7d4..20d64d17a16dc87b2a076c13b2ad6c356b0a041a 100644 (file)
@@ -93,3 +93,8 @@ gem "deep_merge", :require => 'deep_merge/rails_compat'
 
 gem 'morrisjs-rails'
 gem 'raphael-rails'
+
+gem 'lograge'
+gem 'logstash-event'
+
+gem 'safe_yaml'
index 2618e47cbf606dbf59ab7ef63d2833e60553c020..a8431a7dfd373d0357053df0e06df901498ca0ca 100644 (file)
@@ -142,6 +142,11 @@ GEM
     logging (2.1.0)
       little-plugger (~> 1.1)
       multi_json (~> 1.10)
+    lograge (0.3.6)
+      actionpack (>= 3)
+      activesupport (>= 3)
+      railties (>= 3)
+    logstash-event (1.2.02)
     mail (2.6.3)
       mime-types (>= 1.16, < 3)
     memoist (0.14.0)
@@ -211,6 +216,7 @@ GEM
     rubyzip (1.1.7)
     rvm-capistrano (1.5.5)
       capistrano (~> 2.15.4)
+    safe_yaml (1.0.4)
     sass (3.4.9)
     sass-rails (5.0.1)
       railties (>= 4.0.0, < 5.0)
@@ -284,6 +290,8 @@ DEPENDENCIES
   jquery-rails
   less
   less-rails
+  lograge
+  logstash-event
   minitest (>= 5.0.0)
   mocha
   morrisjs-rails
@@ -299,6 +307,7 @@ DEPENDENCIES
   ruby-debug-passenger
   ruby-prof
   rvm-capistrano
+  safe_yaml
   sass
   sass-rails
   selenium-webdriver
@@ -309,3 +318,6 @@ DEPENDENCIES
   therubyracer
   uglifier (>= 1.0.3)
   wiselinks
+
+BUNDLED WITH
+   1.12.1
index 047858c5a0e3a9811408de40442f24605994868d..b89ac817cba242b6e25decae674420217e46c604 100644 (file)
@@ -130,6 +130,7 @@ function maybe_load_more_content(event) {
                 $container.find(".spinner").detach();
                 $container.append(data.content);
                 $container.attr('data-infinite-content-href', data.next_page_href);
+                ping_all_scrollers();
             });
      }
 }
@@ -151,7 +152,8 @@ function mergeInfiniteContentParams($container) {
     // For example, filterable.js writes filters in
     // infiniteContentParamsFilterable ("search for text foo")
     // without worrying about clobbering the filters set up by the
-    // tab pane ("only show jobs and pipelines in this tab").
+    // tab pane ("only show container requests and pipeline instances
+    // in this tab").
     $.each($container.data(), function(datakey, datavalue) {
         // Note: We attach these data to DOM elements using
         // <element data-foo-bar="baz">. We store/retrieve them
index 8bb25c13c080138641e6e8ed1a22124a585c44ae..ec43747a08e0e05f50611d4866582254812e4718 100644 (file)
@@ -101,26 +101,6 @@ $(document).on('ready ajax:success', function() {
     });
 });
 
-$(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(event, eventData){
-    if (this != event.target) {
-        // Not interested in events sent to child nodes.
-        return;
-    }
-    var wasatbottom = ($(this).scrollTop() + $(this).height() >= this.scrollHeight);
-
-    if (eventData.event_type == "stderr" || eventData.event_type == "stdout") {
-        if( eventData.prepend ) {
-            $(this).prepend(eventData.properties.text);
-        } else {
-            $(this).append(eventData.properties.text);
-        }
-    }
-
-    if (wasatbottom) {
-        this.scrollTop = this.scrollHeight;
-    }
-});
-
 // Set up all events for the pipeline instances compare button.
 (function() {
     var compare_form = '#compare';
diff --git a/apps/workbench/app/assets/javascripts/work_unit_log.js b/apps/workbench/app/assets/javascripts/work_unit_log.js
new file mode 100644 (file)
index 0000000..d81637e
--- /dev/null
@@ -0,0 +1,68 @@
+$(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(event, eventData){
+    var wasatbottom, txt;
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+
+    if (!('properties' in eventData)) {
+        return;
+    }
+
+    txt = '';
+    if ('text' in eventData.properties &&
+       eventData.properties.text.length > 0) {
+        txt += eventData.properties.text;
+        if (txt.slice(txt.length-1) != "\n") {
+            txt += "\n";
+        }
+    }
+    if (eventData.event_type == 'update' &&
+        eventData.object_uuid.indexOf("-dz642-") == 5 &&
+        'old_attributes' in eventData.properties &&
+        'new_attributes' in eventData.properties) {
+        // Container update
+        if (eventData.properties.old_attributes.state != eventData.properties.new_attributes.state) {
+            var stamp = eventData.event_at + " ";
+            switch(eventData.properties.new_attributes.state) {
+            case "Queued":
+                txt += stamp + "Container "+eventData.object_uuid+" was returned to the queue\n";
+                break;
+            case "Locked":
+                txt += stamp + "Container "+eventData.object_uuid+" was taken from the queue by a dispatch process\n";
+                break;
+            case "Running":
+                txt += stamp + "Container "+eventData.object_uuid+" started\n";
+                break;
+            case "Complete":
+                var outcome = eventData.properties.new_attributes.exit_code === 0 ? "success" : "failure";
+                txt += stamp + "Container "+eventData.object_uuid+" finished with exit code " +
+                    eventData.properties.new_attributes.exit_code +
+                    " ("+outcome+")\n";
+                break;
+            case "Cancelled":
+                txt += stamp + "Container "+eventData.object_uuid+" was cancelled\n";
+                break;
+            default:
+                // Unknown state -- unexpected, might as well log it.
+                txt += stamp + "Container "+eventData.object_uuid+" changed state to " +
+                    eventData.properties.new_attributes.state + "\n";
+                break;
+            }
+        }
+    }
+
+    if (txt == '') {
+        return;
+    }
+
+    wasatbottom = ($(this).scrollTop() + $(this).height() >= this.scrollHeight);
+    if (eventData.prepend) {
+        $(this).prepend(txt);
+    } else {
+        $(this).append(txt);
+    }
+    if (wasatbottom) {
+        this.scrollTop = this.scrollHeight;
+    }
+});
index 0ed629403c41fb3b10ca545af37200515f711fee..0da59561da077f4b79ec24c24af71820beb5e499 100644 (file)
@@ -7,6 +7,7 @@ class ApplicationController < ActionController::Base
 
   ERROR_ACTIONS = [:render_error, :render_not_found]
 
+  prepend_before_filter :set_current_request_id, except: ERROR_ACTIONS
   around_filter :thread_clear
   around_filter :set_thread_api_token
   # Methods that don't require login should
@@ -115,7 +116,7 @@ class ApplicationController < ActionController::Base
   # Column names should always be qualified by a table name and a direction is optional, defaulting to asc
   # (e.g. "collections.name" or "collections.name desc").
   # If a column name is specified, that table will be sorted by that column.
-  # If there are objects from different models that will be shown (such as in Jobs and Pipelines tab),
+  # If there are objects from different models that will be shown (such as in Pipelines and processes tab),
   # then a sort column name can optionally be specified for each model, passed as an comma-separated list (e.g. "jobs.script, pipeline_instances.name")
   # Currently only one sort column name and direction can be specified for each model.
   def load_filters_and_paging_params
@@ -241,6 +242,28 @@ class ApplicationController < ActionController::Base
     end
   end
 
+  helper_method :next_page_filters
+  def next_page_filters nextpage_operator
+    next_page_filters = @filters.reject do |attr, op, val|
+      (attr == 'created_at' and op == nextpage_operator) or
+      (attr == 'uuid' and op == 'not in')
+    end
+
+    if @objects.any?
+      last_created_at = @objects.last.created_at
+
+      last_uuids = []
+      @objects.each do |obj|
+        last_uuids << obj.uuid if obj.created_at.eql?(last_created_at)
+      end
+
+      next_page_filters += [['created_at', nextpage_operator, last_created_at]]
+      next_page_filters += [['uuid', 'not in', last_uuids]]
+    end
+
+    next_page_filters
+  end
+
   def show
     if !@object
       return render_not_found("object not found")
@@ -1190,4 +1213,10 @@ class ApplicationController < ActionController::Base
   def wiselinks_layout
     'body'
   end
+
+  def set_current_request_id
+    # Request ID format: '<timestamp>-<9_digits_random_number>'
+    current_request_id = "#{Time.new.to_i}-#{sprintf('%09d', rand(0..10**9-1))}"
+    Thread.current[:current_request_id] = current_request_id
+  end
 end
index 7a002427cfe97b0418b6f8cda2a671dd0148c10f..20b227c3c7277d491c74b96c0f5de7bc415c0f4c 100644 (file)
@@ -253,13 +253,6 @@ class CollectionsController < ApplicationController
         @permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
           .where(head_uuid: @object.uuid, link_class: 'permission',
                  name: 'can_read').results
-        @logs = Log.limit(RELATION_LIMIT).order("created_at DESC")
-          .select(%w(uuid event_type object_uuid event_at summary))
-          .where(object_uuid: @object.uuid).results
-        @is_persistent = Link.limit(1)
-          .where(head_uuid: @object.uuid, tail_uuid: current_user.uuid,
-                 link_class: 'resources', name: 'wants')
-          .results.any?
         @search_sharing = search_scopes
 
         if params["tab_pane"] == "Used_by"
index 4a32cd8171c53ffa64d17a1e4640abb7ca837bf6..b67d100887c838917ee4a2fc6ba8ac2871893cd3 100644 (file)
@@ -1,6 +1,15 @@
 class ContainerRequestsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   def show_pane_list
-    %w(Status Log Advanced)
+    panes = %w(Status Log Advanced)
+    if @object.andand.state == 'Uncommitted'
+      panes = %w(Inputs) + panes - %w(Log)
+    end
+    panes
   end
 
   def cancel
@@ -11,4 +20,43 @@ class ContainerRequestsController < ApplicationController
       redirect_to @object
     end
   end
+
+  def update
+    @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+    input_obj = @updates[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content]
+    if input_obj
+      workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content]
+      get_cwl_inputs(workflow).each do |input_schema|
+        if not input_obj.include? cwl_shortname(input_schema[:id])
+          next
+        end
+        required, primary_type, param_id = cwl_input_info(input_schema)
+        if input_obj[param_id] == ""
+          input_obj[param_id] = nil
+        elsif primary_type == "boolean"
+          input_obj[param_id] = input_obj[param_id] == "true"
+        elsif ["int", "long"].include? primary_type
+          input_obj[param_id] = input_obj[param_id].to_i
+        elsif ["float", "double"].include? primary_type
+          input_obj[param_id] = input_obj[param_id].to_f
+        elsif ["File", "Directory"].include? primary_type
+          re = CollectionsHelper.match_uuid_with_optional_filepath(input_obj[param_id])
+          if re
+            c = Collection.find(re[1])
+            input_obj[param_id] = {"class" => primary_type,
+                                   "location" => "keep:#{c.portable_data_hash}#{re[4]}",
+                                   "arv:collection" => input_obj[param_id]}
+          end
+        end
+      end
+    end
+    params[:merge] = true
+    begin
+      super
+    rescue => e
+      flash[:error] = e.to_s
+      show
+    end
+  end
+
 end
index 86582dff4fe85ce5073f9f3a8e8851680028b9f0..1df2c3acb0f5bcba19562c57b8f794c641375c88 100644 (file)
@@ -1,4 +1,9 @@
 class ContainersController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   def show_pane_list
     %w(Status Log Advanced)
   end
index 398417734c71c34f2aaac71fbf700eaf4d5f50d1..f18a79d646c4a0a1dc774e52f0c2d4da1c8f9346 100644 (file)
@@ -61,14 +61,9 @@ class JobsController < ApplicationController
   end
 
   def logs
-    @logs = Log.select(%w(event_type object_uuid event_at properties))
-               .order('event_at DESC')
-               .filter([["event_type",  "=", "stderr"],
-                        ["object_uuid", "in", [@object.uuid]]])
-               .limit(500)
-               .results
-               .to_a
-               .map{ |e| e.serializable_hash.merge({ 'prepend' => true }) }
+    @logs = @object.
+      stderr_log_query(Rails.configuration.running_job_log_records_to_fetch).
+      map { |e| e.serializable_hash.merge({ 'prepend' => true }) }
     respond_to do |format|
       format.json { render json: @logs }
     end
index e49ed1fab65f38b6631c0298f8ba508feacd9087..16212a8d0ad489b381aa3619d69d72443905cfcb 100644 (file)
@@ -63,13 +63,13 @@ class ProjectsController < ApplicationController
       }
     pane_list <<
       {
-        :name => 'Jobs_and_pipelines',
-        :filters => [%w(uuid is_a) + [%w(arvados#job arvados#pipelineInstance)]]
+        :name => 'Pipelines_and_processes',
+        :filters => [%w(uuid is_a) + [%w(arvados#containerRequest arvados#pipelineInstance)]]
       }
     pane_list <<
       {
         :name => 'Pipeline_templates',
-        :filters => [%w(uuid is_a arvados#pipelineTemplate)]
+        :filters => [%w(uuid is_a) + [%w(arvados#pipelineTemplate arvados#workflow)]]
       }
     pane_list <<
       {
@@ -213,9 +213,13 @@ class ProjectsController < ApplicationController
       @name_link_for = {}
       kind_filters.each do |attr,op,val|
         (val.is_a?(Array) ? val : [val]).each do |type|
+          filters = @filters - kind_filters + [['uuid', 'is_a', type]]
+          if type == 'arvados#containerRequest'
+            filters = filters + [['container_requests.requesting_container_uuid', '=', nil]]
+          end
           objects = @object.contents(order: @order,
                                      limit: @limit,
-                                     filters: (@filters - kind_filters + [['uuid', 'is_a', type]]),
+                                     filters: filters,
                                     )
           objects.each do |object|
             @name_link_for[object.andand.uuid] = objects.links_for(object, 'name').first
@@ -226,23 +230,9 @@ class ProjectsController < ApplicationController
       @objects = @objects.to_a.sort_by(&:created_at)
       @objects.reverse! if nextpage_operator == '<='
       @objects = @objects[0..@limit-1]
-      @next_page_filters = @filters.reject do |attr,op,val|
-        (attr == 'created_at' and op == nextpage_operator) or
-          (attr == 'uuid' and op == 'not in')
-      end
 
       if @objects.any?
-        last_created_at = @objects.last.created_at
-
-        last_uuids = []
-        @objects.each do |obj|
-          last_uuids << obj.uuid if obj.created_at.eql?(last_created_at)
-        end
-
-        @next_page_filters += [['created_at',
-                                nextpage_operator,
-                                last_created_at]]
-        @next_page_filters += [['uuid', 'not in', last_uuids]]
+        @next_page_filters = next_page_filters(nextpage_operator)
         @next_page_href = url_for(partial: :contents_rows,
                                   limit: @limit,
                                   filters: @next_page_filters.to_json)
diff --git a/apps/workbench/app/controllers/work_unit_templates_controller.rb b/apps/workbench/app/controllers/work_unit_templates_controller.rb
new file mode 100644 (file)
index 0000000..6b5f114
--- /dev/null
@@ -0,0 +1,30 @@
+class WorkUnitTemplatesController < ApplicationController
+  def find_objects_for_index
+    return if !params[:partial]
+
+    @limit = 40
+    @filters = @filters || []
+
+    # get next page of pipeline_templates
+    filters = @filters + [["uuid", "is_a", ["arvados#pipelineTemplate"]]]
+    pipelines = PipelineTemplate.limit(@limit).order(["created_at desc"]).filter(filters)
+
+    # get next page of workflows
+    filters = @filters + [["uuid", "is_a", ["arvados#workflow"]]]
+    workflows = Workflow.limit(@limit).order(["created_at desc"]).filter(filters)
+
+    @objects = (pipelines.to_a + workflows.to_a).sort_by(&:created_at).reverse.first(@limit)
+
+    if @objects.any?
+      @next_page_filters = next_page_filters('<=')
+      @next_page_href = url_for(partial: :choose_rows,
+                                filters: @next_page_filters.to_json)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+end
diff --git a/apps/workbench/app/controllers/work_units_controller.rb b/apps/workbench/app/controllers/work_units_controller.rb
new file mode 100644 (file)
index 0000000..6ed25dd
--- /dev/null
@@ -0,0 +1,114 @@
+class WorkUnitsController < ApplicationController
+  def find_objects_for_index
+    # If it's not the index rows partial display, just return
+    # The /index request will again be invoked to display the
+    # partial at which time, we will be using the objects found.
+    return if !params[:partial]
+
+    @limit = 20
+    @filters = @filters || []
+
+    # get next page of pipeline_instances
+    filters = @filters + [["uuid", "is_a", ["arvados#pipelineInstance"]]]
+    pipelines = PipelineInstance.limit(@limit).order(["created_at desc"]).filter(filters)
+
+    # get next page of jobs
+    filters = @filters + [["uuid", "is_a", ["arvados#job"]]]
+    jobs = Job.limit(@limit).order(["created_at desc"]).filter(filters)
+
+    # get next page of container_requests
+    filters = @filters + [["uuid", "is_a", ["arvados#containerRequest"]]]
+    crs = ContainerRequest.limit(@limit).order(["created_at desc"]).filter(filters)
+    @objects = (jobs.to_a + pipelines.to_a + crs.to_a).sort_by(&:created_at).reverse.first(@limit)
+
+    if @objects.any?
+      @next_page_filters = next_page_filters('<=')
+      @next_page_href = url_for(partial: :all_processes_rows,
+                                filters: @next_page_filters.to_json)
+      preload_links_for_objects(@objects.to_a)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+
+  def create
+    template_uuid = params['work_unit']['template_uuid']
+
+    attrs = {}
+    rc = resource_class_for_uuid(template_uuid)
+    if rc == PipelineTemplate
+      model_class = PipelineInstance
+      attrs['pipeline_template_uuid'] = template_uuid
+    elsif rc == Workflow
+      # workflow json
+      workflow = Workflow.find? template_uuid
+      if workflow.definition
+        begin
+          wf_json = YAML::load(workflow.definition)
+        rescue => e
+          logger.error "Error converting definition yaml to json: #{e.message}"
+          raise ArgumentError, "Error converting definition yaml to json: #{e.message}"
+        end
+      end
+
+      model_class = ContainerRequest
+
+      attrs['name'] = "#{workflow['name']} container" if workflow['name'].present?
+      attrs['properties'] = {'template_uuid' => template_uuid}
+      attrs['priority'] = 1
+      attrs['state'] = "Uncommitted"
+
+      # required
+      attrs['command'] = ["arvados-cwl-runner", "--local", "--api=containers", "/var/lib/cwl/workflow.json#main", "/var/lib/cwl/cwl.input.json"]
+      attrs['container_image'] = "arvados/jobs"
+      attrs['cwd'] = "/var/spool/cwl"
+      attrs['output_path'] = "/var/spool/cwl"
+
+      # mounts
+      mounts = {
+        "/var/lib/cwl/cwl.input.json" => {
+          "kind" => "json",
+          "content" => {}
+        },
+        "stdout" => {
+          "kind" => "file",
+          "path" => "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl" => {
+          "kind" => "collection",
+          "writable" => true
+        }
+      }
+      if wf_json
+        mounts["/var/lib/cwl/workflow.json"] = {
+          "kind" => "json",
+          "content" => wf_json
+        }
+      end
+      attrs['mounts'] = mounts
+
+      # runtime constriants
+      runtime_constraints = {
+        "vcpus" => 1,
+        "ram" => 256000000,
+        "API" => true
+      }
+      attrs['runtime_constraints'] = runtime_constraints
+    else
+      raise ArgumentError, "Unsupported template uuid: #{template_uuid}"
+    end
+
+    attrs['owner_uuid'] = params['work_unit']['owner_uuid']
+    @object ||= model_class.new attrs
+
+    if @object.save
+      redirect_to @object
+    else
+      render_error status: 422
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/workflows_controller.rb b/apps/workbench/app/controllers/workflows_controller.rb
new file mode 100644 (file)
index 0000000..a3ba7d6
--- /dev/null
@@ -0,0 +1,6 @@
+class WorkflowsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+end
index a37ecda7041c99ff820b59bd7874d5e8f5b7e9e4..b5df9f38a8c3c5da421476d47621ee96f3ca612e 100644 (file)
@@ -418,6 +418,168 @@ module ApplicationHelper
     lt
   end
 
+  def get_cwl_inputs(workflow)
+    if workflow[:inputs]
+      return workflow[:inputs]
+    else
+      workflow[:"$graph"].each do |tool|
+        if tool[:id] == "#main"
+          return tool[:inputs]
+        end
+      end
+    end
+  end
+
+  def cwl_shortname(id)
+    if id[0] == "#"
+      id = id[1..-1]
+    end
+    return id.split("/")[-1]
+  end
+
+  def cwl_input_info(input_schema)
+    required = !(input_schema[:type].include? "null")
+    if input_schema[:type].is_a? Array
+      primary_type = input_schema[:type].select { |n| n != "null" }[0]
+    elsif input_schema[:type].is_a? String
+      primary_type = input_schema[:type]
+    elsif input_schema[:type].is_a? Hash
+      primary_type = input_schema[:type]
+    end
+    param_id = cwl_shortname(input_schema[:id])
+    return required, primary_type, param_id
+  end
+
+  def cwl_input_value(object, input_schema, set_attr_path)
+    dn = ""
+    attrvalue = object
+    set_attr_path.each do |a|
+      dn += "[#{a}]"
+      attrvalue = attrvalue[a.to_sym]
+    end
+    return dn, attrvalue
+  end
+
+  def cwl_inputs_required(object, inputs_schema, set_attr_path)
+    r = 0
+    inputs_schema.each do |input|
+      required, primary_type, param_id = cwl_input_info(input)
+      dn, attrvalue = cwl_input_value(object, input, set_attr_path + [param_id])
+      r += 1 if required and attrvalue.nil?
+    end
+    r
+  end
+
+  def render_cwl_input(object, input_schema, set_attr_path, htmloptions={})
+    required, primary_type, param_id = cwl_input_info(input_schema)
+
+    dn, attrvalue = cwl_input_value(object, input_schema, set_attr_path + [param_id])
+    attrvalue = if attrvalue.nil? then "" else attrvalue end
+
+    id = "#{object.uuid}-#{param_id}"
+
+    opt_empty_selection = if required then [] else [{value: "", text: ""}] end
+
+    if ["Directory", "File"].include? primary_type
+      chooser_title = "Choose a #{primary_type == 'Directory' ? 'dataset' : 'file'}:"
+      selection_param = object.class.to_s.underscore + dn
+      if attrvalue.is_a? Hash
+        display_value = attrvalue[:"arv:collection"] || attrvalue[:location]
+        re = CollectionsHelper.match_uuid_with_optional_filepath(display_value)
+        if re
+          if re[4]
+            display_value = "#{Collection.find(re[1]).name} / #{re[4][1..-1]}"
+          else
+            display_value = Collection.find(re[1]).name
+          end
+        end
+      end
+      modal_path = choose_collections_path \
+      ({ title: chooser_title,
+         filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
+         action_name: 'OK',
+         action_href: container_request_path(id: object.uuid),
+         action_method: 'patch',
+         preconfigured_search_str: "",
+         action_data: {
+           merge: true,
+           use_preview_selection: primary_type == 'File' ? true : nil,
+           selection_param: selection_param,
+           success: 'page-refresh'
+         }.to_json,
+        })
+
+      return content_tag('div', :class => 'input-group') do
+        html = text_field_tag(dn, display_value,
+                              :class =>
+                              "form-control #{'required' if required}")
+        html + content_tag('span', :class => 'input-group-btn') do
+          link_to('Choose',
+                  modal_path,
+                  { :class => "btn btn-primary",
+                    :remote => true,
+                    :method => 'get',
+                  })
+        end
+      end
+    elsif "boolean" == primary_type
+      return link_to attrvalue.to_s, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => "select",
+                     "data-source" => (opt_empty_selection + [{value: "true", text: "true"}, {value: "false", text: "false"}]).to_json,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue.to_s,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                   }.merge(htmloptions)
+    elsif primary_type.is_a? Hash and primary_type[:type] == "enum"
+      return link_to attrvalue, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => "select",
+                     "data-source" => (opt_empty_selection + primary_type[:symbols].map {|i| {:value => i, :text => i} }).to_json,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                   }.merge(htmloptions)
+    elsif primary_type.is_a? String
+      if ["int", "long"].include? primary_type
+        datatype = "number"
+      else
+        datatype = "text"
+      end
+
+      return link_to attrvalue, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => datatype,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                     }.merge(htmloptions)
+    else
+      return "Unable to render editing control for parameter type #{primary_type}"
+    end
+  end
+
   def render_arvados_object_list_start(list, button_text, button_href,
                                        params={}, *rest, &block)
     show_max = params.delete(:show_max) || 3
index 13d4a24c69cc5f7e687c47c0e95ed715ab9f5fa2..eb93dfcfaa76d133635f8c55d429b1c00233bb37 100644 (file)
@@ -85,6 +85,9 @@ class ArvadosApiClient
     if not @api_client
       @client_mtx.synchronize do
         @api_client = HTTPClient.new
+        @api_client.ssl_config.timeout = Rails.configuration.api_client_connect_timeout
+        @api_client.connect_timeout = Rails.configuration.api_client_connect_timeout
+        @api_client.receive_timeout = Rails.configuration.api_client_receive_timeout
         if Rails.configuration.arvados_insecure_https
           @api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
         else
@@ -114,6 +117,7 @@ class ArvadosApiClient
                            Thread.current[:reader_tokens] ||
                            []) +
                           [Rails.configuration.anonymous_user_token]).to_json,
+      'current_request_id' => (Thread.current[:current_request_id] || ''),
     }
     if !data.nil?
       data.each do |k,v|
index 62d8bff042c16dec335f746ff6f0991e5e37250e..0148de51f7459a678d49547fe4f24a10e6bc27e9 100644 (file)
@@ -7,6 +7,10 @@ class ContainerRequest < ArvadosBase
     [ 'description' ]
   end
 
+  def self.goes_in_projects?
+    true
+  end
+
   def work_unit(label=nil)
     ContainerWorkUnit.new(self, label)
   end
index 037a6e53eebceaa2e45ea964126a65c33afe1a17..b6e72dc526538d022166d52ff0a342373bb585cc 100644 (file)
@@ -46,13 +46,17 @@ class ContainerWorkUnit < ProxyWorkUnit
   end
 
   def can_cancel?
-    @proxied.is_a?(ContainerRequest) && state_label.in?(["Queued", "Locked", "Running"]) && priority > 0
+    @proxied.is_a?(ContainerRequest) && @proxied.state == "Committed" && @proxied.priority > 0 && @proxied.editable?
   end
 
   def container_uuid
     get(:container_uuid)
   end
 
+  def priority
+    @proxied.priority
+  end
+
   # For the following properties, use value from the @container if exists
   # This applies to a ContainerRequest with container_uuid
 
@@ -75,7 +79,15 @@ class ContainerWorkUnit < ProxyWorkUnit
   end
 
   def state_label
-    get_combined(:state)
+    ec = exit_code
+    return "Failed" if (ec && ec != 0)
+    state = get_combined(:state)
+    return "Ready" if ((priority == 0) and (["Queued", "Locked"].include?(state)))
+    state
+  end
+
+  def exit_code
+    get_combined(:exit_code)
   end
 
   def docker_image
@@ -86,10 +98,6 @@ class ContainerWorkUnit < ProxyWorkUnit
     get_combined(:runtime_constraints)
   end
 
-  def priority
-    get_combined(:priority)
-  end
-
   def log_collection
     get_combined(:log)
   end
@@ -124,7 +132,25 @@ class ContainerWorkUnit < ProxyWorkUnit
     get_combined(:output_path)
   end
 
-  # End combined propeties
+  def log_object_uuids
+    [get(:uuid, @container), get(:uuid, @proxied)].compact
+  end
+
+  def render_log
+    collection = Collection.find(log_collection) rescue nil
+    if collection
+      return {log: collection, partial: 'collections/show_files', locals: {object: collection, no_checkboxes: true}}
+    end
+  end
+
+  def template_uuid
+    properties = get(:properties)
+    if properties
+      properties[:template_uuid]
+    end
+  end
+
+  # End combined properties
 
   protected
   def get_combined key
index 73f1f63be4c7d5dcb5fa33e390d722c87b16e0b0..bf202c4eaaadffbd92f1a44d1160c3bd8c51572e 100644 (file)
@@ -43,8 +43,7 @@ class Job < ArvadosBase
   end
 
   def stderr_log_query(limit=nil)
-    query = Log.where(event_type: "stderr", object_uuid: self.uuid)
-               .order("id DESC")
+    query = Log.where(object_uuid: self.uuid).order("created_at DESC")
     query = query.limit(limit) if limit
     query
   end
index b51f07c40b36e0324b09e14c81dbf632dbcd7a68..62bbc5431937e6a4b89826a3f03e2cda5c37ff27 100644 (file)
@@ -13,7 +13,7 @@ class PipelineInstance < ArvadosBase
       template = if lookup and lookup[self.pipeline_template_uuid]
                    lookup[self.pipeline_template_uuid]
                  else
-                   PipelineTemplate.where(uuid: self.pipeline_template_uuid).first
+                   PipelineTemplate.find?(self.pipeline_template_uuid) if self.pipeline_template_uuid
                  end
       if template
         template.name
index 889fa1a7f3cccecf86c53f5cd837ad4f34cb7ca2..dd5685ac3d8082d5a5836896afa310b416e728f2 100644 (file)
@@ -51,4 +51,8 @@ class PipelineInstanceWorkUnit < ProxyWorkUnit
   def title
     "pipeline"
   end
+
+  def template_uuid
+    get(:pipeline_template_uuid)
+  end
 end
index f672c8c64cea79c46e7dc99b8f1e37a3ce09c3a6..11ec0ee196326d6a5c7d06cf0f0455a11fc9b167 100644 (file)
@@ -23,6 +23,10 @@ class ProxyWorkUnit < WorkUnit
     get(:modified_by_user_uuid)
   end
 
+  def owner_uuid
+    get(:owner_uuid)
+  end
+
   def created_at
     t = get(:created_at)
     t = Time.parse(t) if (t.is_a? String)
@@ -51,6 +55,8 @@ class ProxyWorkUnit < WorkUnit
     state = get(:state)
     if ["Running", "RunningOnServer", "RunningOnClient"].include? state
       "Running"
+    elsif state == 'New'
+      "Not started"
     else
       state
     end
@@ -322,6 +328,19 @@ class ProxyWorkUnit < WorkUnit
     resp
   end
 
+  def log_object_uuids
+    [uuid]
+  end
+
+  def live_log_lines(limit)
+    Log.where(object_uuid: log_object_uuids).
+      order("created_at DESC").
+      limit(limit).
+      select { |log| log.properties[:text].is_a? String }.
+      reverse.
+      flat_map { |log| log.properties[:text].split("\n") }
+  end
+
   protected
 
   def get key, obj=@proxied
index 1c2d02fa96edd5e2c021f5f8d5041930b397afe3..924e067815718fc0fddf52a836c99cd2d6ffd94b 100644 (file)
@@ -17,6 +17,10 @@ class WorkUnit
     # returns uuid of the user who modified this work unit most recently
   end
 
+  def owner_uuid
+    # returns uuid of the owner of this work unit
+  end
+
   def created_at
     # returns created_at timestamp
   end
@@ -37,6 +41,10 @@ class WorkUnit
     # returns a string representing state of the work unit
   end
 
+  def exit_code
+    # returns the work unit's execution exit code
+  end
+
   def state_bootstrap_class
     # returns a class like "danger", "success", or "warning" that a view can use directly to make a display class
   end
@@ -179,4 +187,20 @@ class WorkUnit
   def container_uuid
     # container_uuid of a container_request
   end
+
+  def log_object_uuids
+    # object uuids for live log
+  end
+
+  def live_log_lines(limit)
+    # fetch log entries from logs table for @proxied
+  end
+
+  def render_log
+    # return partial and locals to be rendered
+  end
+
+  def template_uuid
+    # return the uuid of this work unit's template, if one exists
+  end
 end
diff --git a/apps/workbench/app/models/workflow.rb b/apps/workbench/app/models/workflow.rb
new file mode 100644 (file)
index 0000000..553f141
--- /dev/null
@@ -0,0 +1,5 @@
+class Workflow < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/views/application/_choose_rows.html.erb b/apps/workbench/app/views/application/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..9b96b47
--- /dev/null
@@ -0,0 +1,8 @@
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= url_for object %>?tab_pane=chooser_preview">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw fa-gear"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
index 6ece8606a839a1e974386cffdcb081cc217faadf..744839c2d863c3dfa6abf2f12f7c2ab0123c07bc 100644 (file)
@@ -1,5 +1,5 @@
 <% if object.deletable? %>
-  <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "Really delete #{object.class_for_display.downcase} '#{object.friendly_link_name}'?"}) do %>
+  <%= link_to({controller: object.class.table_name, action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "Really delete #{object.class_for_display.downcase} '#{object.friendly_link_name}'?"}) do %>
     <i class="glyphicon glyphicon-trash"></i>
   <% end %>
 <% end %>
index 68a201f19542d71b1fa78e662b3ddcea4b0603fe..78b6f8b135f9e1424bc00c46262a47da71c03b9f 100644 (file)
@@ -1,11 +1,2 @@
-<% if @object.respond_to? :name %>
-  <h2>
-    <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => "New #{controller.model_class.to_s.underscore.gsub("_"," ")}" } %>
-  </h2>
-<% end %>
-
-<% if @object.respond_to? :description %>
-  <div class="arv-description-as-subtitle">
-    <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
-  </div>
-<% end %>
+<%= render partial: 'object_name' %>
+<%= render partial: 'object_description' %>
diff --git a/apps/workbench/app/views/application/_object_description.html.erb b/apps/workbench/app/views/application/_object_description.html.erb
new file mode 100644 (file)
index 0000000..7260940
--- /dev/null
@@ -0,0 +1,5 @@
+<% if @object.respond_to? :description %>
+  <div class="arv-description-as-subtitle">
+    <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/application/_object_name.html.erb b/apps/workbench/app/views/application/_object_name.html.erb
new file mode 100644 (file)
index 0000000..b303853
--- /dev/null
@@ -0,0 +1,5 @@
+<% if @object.respond_to? :name %>
+  <h2>
+    <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => "New #{controller.model_class.to_s.underscore.gsub("_"," ")}" } %>
+  </h2>
+<% end %>
index e3c79f143b0de8fdf6d50dcc591ef05af6fef7b8..a21a514c47de1c51ec184dbe1531d5785c2b68d9 100644 (file)
@@ -9,6 +9,8 @@
   end
 %>
 
+<% object = @object unless object %>
+
 <div class="selection-action-container" style="padding-left: <%=padding_left%>">
   <% if Collection.creatable? and (!defined? no_checkboxes or !no_checkboxes) %>
     <div class="row">
@@ -19,7 +21,7 @@
             <li><%= link_to "Create new collection with selected files", '#',
                     method: :post,
                     'data-href' => combine_selected_path(
-                      action_data: {current_project_uuid: @object.owner_uuid}.to_json
+                      action_data: {current_project_uuid: object.owner_uuid}.to_json
                     ),
                     'data-selection-param-name' => 'selection[]',
                     'data-selection-action' => 'combine-collections',
@@ -39,7 +41,7 @@
     <p/>
   <% end %>
 
-  <% file_tree = @object.andand.files_tree %>
+  <% file_tree = object.andand.files_tree %>
   <% if file_tree.nil? or file_tree.empty? %>
     <p>This collection is empty.</p>
   <% else %>
@@ -59,8 +61,8 @@
         <ul class="collection_files">
       <% else %>
         <% link_params = {controller: 'collections', action: 'show_file',
-                          uuid: @object.portable_data_hash, file: file_path, size: size} %>
-         <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=@object.uuid%>/<%=file_path%>">
+                          uuid: object.portable_data_hash, file: file_path, size: size} %>
+         <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=object.uuid%>/<%=file_path%>">
           <div class="collection_files_buttons pull-right">
             <%= raw(human_readable_bytes_html(size)) %>
             <%= link_to(raw('<i class="fa fa-search"></i>'),
 
           <div class="collection_files_name">
             <% if (!defined? no_checkboxes or !no_checkboxes) and current_user %>
-            <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
+            <%= check_box_tag 'uuids[]', "#{object.uuid}/#{file_path}", false, {
                   :class => "persistent-selection",
                   :friendly_type => "File",
-                  :friendly_name => "#{@object.uuid}/#{file_path}",
+                  :friendly_name => "#{object.uuid}/#{file_path}",
                   :href => url_for(controller: 'collections', action: 'show_file',
-                                   uuid: @object.portable_data_hash, file: file_path),
+                                   uuid: object.portable_data_hash, file: file_path),
                   :title => "Include #{file_path} in your selections",
-                  :id => "#{@object.uuid}_file_#{index}",
+                  :id => "#{object.uuid}_file_#{index}",
                 } %>
             <span>&nbsp;</span>
             <% end %>
         <% if CollectionsHelper::is_image(filename) %>
             <i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
           <div class="collection_files_inline">
-            <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
+            <%= link_to(image_tag("#{url_for object}/#{file_path}"),
                         link_params.merge(disposition: 'inline'),
                         {title: file_path}) %>
           </div>
          </div>
         <% else %>
-            <i class="fa fa-fw fa-file" href="<%=@object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
+            <i class="fa fa-fw fa-file" href="<%=object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
          </div>
         <% end %>
         </li>
index c6bad7d3aa3d865d576c98f9bb4b360aaafe0882..f0af963c2a270ee89b2eaa160223f0640c902088 100644 (file)
@@ -1,5 +1,5 @@
 <div class="row row-fill-height">
-  <div class="col-md-6">
+  <div class="col-md-7">
     <div class="panel panel-info">
       <div class="panel-heading">
         <h3 class="panel-title">
       </div>
     </div>
   </div>
-  <div class="col-md-3">
-    <div class="panel panel-default">
-      <div class="panel-heading">
-        <h3 class="panel-title">
-          Activity
-        </h3>
-      </div>
-      <div class="panel-body smaller-text">
-        <!--
-        <input type="text" class="form-control" placeholder="Search"/>
-        -->
-        <div style="height:0.5em;"></div>
-        <% name_or_object = @name_link.andand.uuid ? @name_link : @object %>
-        <% if name_or_object.created_at and not @logs.andand.any? %>
-          <p>
-            Created: <%= name_or_object.created_at.to_s(:long) if name_or_object.created_at %>
-          </p>
-          <p>
-            Last modified: <%= name_or_object.modified_at.to_s(:long) if name_or_object.modified_at %> by <%= link_to_if_arvados_object name_or_object.modified_by_user_uuid, friendly_name: true %>
-          </p>
-        <% else %>
-          <%= render_arvados_object_list_start(@logs, 'Show all activity',
-                logs_path(filters: [['object_uuid','=',name_or_object.uuid]].to_json)) do |log| %>
-          <p>
-          <%= time_ago_in_words(log.event_at) rescue 'unknown time' %> ago: <%= log.summary %>
-            <% if log.object_uuid %>
-            <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
-            <% end %>
-          </p>
-          <% end %>
-        <% end %>
-      </div>
-    </div>
-  </div>
   <% if current_user %>
-  <div class="col-md-3">
+  <div class="col-md-5">
     <div class="panel panel-default">
       <div class="panel-heading">
         <h3 class="panel-title">
     </div>
   </div>
   <% else %>
-  <div class="col-md-3">
+  <div class="col-md-5">
     <div class="panel panel-default">
       <div class="panel-heading">
         <h3 class="panel-title">
diff --git a/apps/workbench/app/views/container_requests/_name_and_description.html.erb b/apps/workbench/app/views/container_requests/_name_and_description.html.erb
new file mode 100644 (file)
index 0000000..f409519
--- /dev/null
@@ -0,0 +1,21 @@
+<%
+  wu = @object.work_unit
+  template_uuid = wu.template_uuid
+  template = Workflow.find?(template_uuid) if template_uuid
+  div_class = "col-sm-12"
+  div_class = "col-sm-6" if template
+%>
+
+<div class="<%=div_class%>">
+  <%= render partial: 'object_name' %>
+  <%= render partial: 'object_description' %>
+</div>
+
+<% if template %>
+  <div class="alert alert-info <%=div_class%>">
+     This container request was created from the workflow <%= link_to_if_arvados_object template, friendly_name: true %><br />
+     <% if template.modified_at && (template.modified_at > @object.created_at) %>
+        Note: This workflow has been modified since this container request was created.
+     <% end %>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/container_requests/_show_inputs.html.erb b/apps/workbench/app/views/container_requests/_show_inputs.html.erb
new file mode 100644 (file)
index 0000000..a6c4bff
--- /dev/null
@@ -0,0 +1,41 @@
+<% n_inputs = cwl_inputs_required(@object, get_cwl_inputs(@object.mounts[:"/var/lib/cwl/workflow.json"][:content]), [:mounts, :"/var/lib/cwl/cwl.input.json", :content]) %>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+  <div class="form-group">
+    <% workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content] %>
+    <% inputs = get_cwl_inputs(workflow) %>
+    <% inputs.each do |input| %>
+      <label for="#input-<%= cwl_shortname(input[:id]) %>">
+        <%= input[:label] || cwl_shortname(input[:id]) %>
+      </label>
+      <div>
+        <p class="form-control-static">
+          <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+        </p>
+      </div>
+      <p class="help-block">
+        <%= input[:doc] %>
+      </p>
+    <% end %>
+  </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+  <p><i>This workflow does not need any further inputs specified.  Click the "Run" button at the bottom of the page to start the workflow.</i></p>
+<% else %>
+  <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the workflow.</i></p>
+<% end %>
+
+<% if @object.editable? %>
+  <%= content_for :pi_input_form %>
+  <%= link_to(url_for('container_request[state]' => 'Committed'),
+        class: 'btn btn-primary run-pipeline-button',
+        method: :patch
+        ) do %>
+    Run <i class="fa fa-fw fa-play"></i>
+  <% end %>
+<% end %>
+
+<%= render_unreadable_inputs_present %>
diff --git a/apps/workbench/app/views/container_requests/_show_log.html.erb b/apps/workbench/app/views/container_requests/_show_log.html.erb
new file mode 100644 (file)
index 0000000..f623fd6
--- /dev/null
@@ -0,0 +1 @@
+<%= render(partial: 'work_units/show_log', locals: {obj: @object, name: @object[:name] || 'this container'}) %>
index d6d8c67ecc1f21e4faea88d589cddc4ac7a8508d..fc3f7be506e818b5d3cfb7dbab0ae9004f69a809 100644 (file)
@@ -1 +1 @@
-<%= render(partial: 'work_unit/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
diff --git a/apps/workbench/app/views/containers/_show_log.html.erb b/apps/workbench/app/views/containers/_show_log.html.erb
new file mode 100644 (file)
index 0000000..f623fd6
--- /dev/null
@@ -0,0 +1 @@
+<%= render(partial: 'work_units/show_log', locals: {obj: @object, name: @object[:name] || 'this container'}) %>
index 00a55926136172135087e835c64f01a183ebbc22..b6a23719345ad4712eb2a345f55690a6840471d1 100644 (file)
@@ -1,4 +1,4 @@
-<%= render(partial: 'work_unit/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
 
 <div class="panel panel-default">
   <div class="panel-heading">
index 6b1ea03c2ba63ee0dfcaf9d183076b57519df64d..bb5444f238fb89df9eb477d8d8642c1dbbfaf240 100644 (file)
@@ -1,4 +1,4 @@
-<%= render(partial: 'work_unit/show_status', locals: {current_obj: @object, name: @object[:name] || 'this job'}) %>
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this job'}) %>
 
 <div class="panel panel-default">
   <div class="panel-heading">
index 4196558b3c07570c2d0e84b059cba20145b4fa41..b79759f989181564dc380e4dce55bdd5c27f292d 100644 (file)
@@ -9,7 +9,7 @@
        data-object-uuids="<%= @object.uuid %> <%= job_uuids.join(' ') %>"
        ></div>
 
-  <%= render partial: 'work_unit/show_component', locals: {wu: @object.work_unit(@object.name)} %>
+  <%= render partial: 'work_units/show_component', locals: {wu: @object.work_unit(@object.name)} %>
 
 <% else %>
   <%# state is either New or Ready %>
index 6dfa1bc880ed13ee04cf3b0a5d0c57aad5eb1d4c..c02577fcfd28ca3e9a2e552a14af22e12a183620 100644 (file)
@@ -7,19 +7,19 @@
             <span class="pull-right recent-processes-actions">
               <span>
                 <%= link_to(
-                choose_pipeline_templates_path(
-                  title: 'Choose a pipeline to run:',
+                choose_work_unit_templates_path(
+                  title: 'Choose a pipeline or workflow to run:',
                   action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
-                  action_href: pipeline_instances_path,
+                  action_href: work_units_path,
                   action_method: 'post',
-                  action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+                  action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
                 { class: "btn btn-primary btn-xs", remote: true }) do %>
                   <i class="fa fa-fw fa-gear"></i> Run a pipeline...
                 <% end %>
               </span>
               <span>
-                  <%= link_to pipeline_instances_path, class: 'btn btn-default btn-xs' do %>
-                    All pipelines <i class="fa fa-fw fa-arrow-circle-right"></i>
+                  <%= link_to all_processes_path, class: 'btn btn-default btn-xs' do %>
+                    All processes <i class="fa fa-fw fa-arrow-circle-right"></i>
                   <% end %>
               </span>
             </span>
                     Active for <%= render_runtime(wu_time, false) %>
                   <% end %>
 
-                  <span class="pull-right text-overflow-ellipsis" style="max-width: 100%">
-                    <% outputs = wu.outputs %>
-                    <% if outputs.size == 0 %>
-                      No output.
-                    <% elsif outputs.size == 1 %>
-                      <i class="fa fa-fw fa-archive"></i> <%= link_to_if_arvados_object outputs[0], friendly_name: true %>
-                    <% else %>
-                      <%= render partial: 'work_unit/show_outputs', locals: {id: wu.uuid, outputs: outputs, align:"pull-right"} %>
-                    <% end %>
-                  </span>
+                  <%= render partial: 'work_units/show_output', locals: {wu: wu, align: 'pull-right', include_icon: true} %>
                 </div>
               </div>
 
             </div>
             <% else %>
             <div class="dashboard-panel-info-row row-<%=wu.uuid%>">
-              <div class="clearfix">
-                <%= link_to_if_arvados_object p, {friendly_name: true} %>
-                <div class="pull-right" style="width: 40%">
+              <div class="row">
+                <div class="col-md-6">
+                  <%= link_to_if_arvados_object p, {friendly_name: true} %>
+                </div>
+                <% if wu.is_running? %>
+                <div class="col-md-6">
                   <div class="progress" style="margin-bottom: 0px">
                     <% wu.progress %>
                   </div>
                 </div>
+                <% else %>
+                <div class="col-md-2">
+                  <span class="label label-<%=wu.state_bootstrap_class%>"><%=wu.state_label%></span>
+                </div>
+                <% end %>
               </div>
 
               <%
                 <% end %>
               </span>
             <% end %>
-            <span>
-              <%= link_to jobs_path, class: 'btn btn-default btn-xs' do %>
-                All jobs <i class="fa fa-fw fa-arrow-circle-right"></i>
-              <% end %>
-            </span>
           </span>
         </div>
         <div class="panel-body compute-node-summary-pane">
diff --git a/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb b/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb
deleted file mode 100644 (file)
index 3637ef4..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-<%= render_pane 'tab_contents', to_string: true, locals: {
-        limit: 50,
-           filters: [['uuid', 'is_a', ["arvados#job", "arvados#pipelineInstance"]]],
-           sortable_columns: { 'name' => 'jobs.script, pipeline_instances.name', 'description' => 'jobs.description, pipeline_instances.description' }
-    }.merge(local_assigns) %>
index 402ce26f5911e59243bab999916fac11be29d008..d51e1a39c74413fe219d6f1d7bf64c2fec022364 100644 (file)
@@ -1,4 +1,5 @@
 <%= render_pane 'tab_contents', to_string: true, locals: {
-    filters: [['uuid', 'is_a', ["arvados#pipelineTemplate"]]],
-       sortable_columns: { 'name' => 'pipeline_templates.name', 'description' => 'pipeline_templates.description' }
+    limit: 50,
+    filters: [['uuid', 'is_a', ["arvados#pipelineTemplate", "arvados#workflow"]]],
+       sortable_columns: { 'name' => 'pipeline_templates.name, workflows.name', 'description' => 'pipeline_templates.description, workflows.description' }
     }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb b/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb
new file mode 100644 (file)
index 0000000..1ee3070
--- /dev/null
@@ -0,0 +1,5 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+      limit: 50,
+      filters: [['uuid', 'is_a', ["arvados#containerRequest", "arvados#pipelineInstance"]]],
+      sortable_columns: { 'name' => 'container_requests.name, pipeline_instances.name', 'description' => 'container_requests.description, pipeline_instances.description' }
+    }.merge(local_assigns) %>
index 6033a3491051d657bfb470eb351f2df710edb90c..e52d826cf60da778f9b343d1d44578c6cc5b0c7f 100644 (file)
       </ul>
     </div>
     <%= link_to(
-          choose_pipeline_templates_path(
-            title: 'Choose a pipeline to run:',
+          choose_work_unit_templates_path(
+            title: 'Choose a pipeline or workflow to run:',
             action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
-            action_href: pipeline_instances_path,
+            action_href: work_units_path,
             action_method: 'post',
-            action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
-          { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline in this project" }) do %>
+            action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline or workflow in this project" }) do %>
       <i class="fa fa-fw fa-gear"></i> Run a pipeline...
     <% end %>
     <%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: :post, title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
index 4fefa821caafb88be0a7647112514f98fa7c55f8..af7a71dc725e4d4abb9c6eea5b034816b6699015 100644 (file)
@@ -71,7 +71,7 @@
                     </label>
                     <% if entry['type'] == 'select' %>
                       <div class="col-sm-8">
-                        <select class="form-control" name="user[prefs][:profile][:<%=entry['key']%>]">
+                        <select class="form-control" name="user[prefs][profile][<%=entry['key']%>]">
                           <% entry['options'].each do |option| %>
                             <option value="<%=option%>" <%='selected' if option==value%>><%=option%></option>
                           <% end %>
@@ -79,7 +79,7 @@
                       </div>
                     <% else %>
                       <div class="col-sm-8">
-                        <input type="text" class="form-control" name="user[prefs][:profile][:<%=entry['key']%>]" placeholder="<%=entry['form_field_description']%>" value="<%=value%>" ></input>
+                        <input type="text" class="form-control" name="user[prefs][profile][<%=entry['key']%>]" placeholder="<%=entry['form_field_description']%>" value="<%=value%>" ></input>
                       </div>
                     <% end %>
                   </div>
@@ -89,7 +89,7 @@
               <%# If the user has other prefs, we need to preserve them %>
               <% current_user.prefs.each do |key, value| %>
                 <% if key != :profile %>
-                  <input type="hidden" name="user[prefs][:<%=key%>]" value="<%=value.to_json%>">
+                  <input type="hidden" name="user[prefs][<%=key%>]" value="<%=value.to_json%>">
                 <% end %>
               <% end %>
 
diff --git a/apps/workbench/app/views/work_unit/_show_component.html.erb b/apps/workbench/app/views/work_unit/_show_component.html.erb
deleted file mode 100644 (file)
index da3e5a7..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-<%# Work unit status %>
-
-<div class="container-fluid>
-  <div class="row-fluid">
-    <%# Need additional handling for main object display  %>
-    <% if @object.uuid == wu.uuid %>
-    <div class="container-fluid">
-      <div class="pull-right">
-        <div class="container-fluid">
-          <div class="row-fulid pipeline-instance-spacing">
-            <div class="col-md-7">
-            <% if wu.is_running? and wu.child_summary_str %>
-                <%= wu.child_summary_str %>
-            <% end %>
-            </div>
-            <div class="col-md-3">
-              <%= render partial: 'work_unit/progress', locals: {wu: wu} %>
-            </div>
-            <div class="col-md-1">
-              <% if wu.can_cancel? and @object.editable? %>
-                  <%= form_tag "#{wu.uri}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
-                    <%= hidden_field_tag :return_to, url_for(@object) %>
-                    <%= button_tag "Cancel", {class: 'btn btn-xs btn-danger', id: "cancel-obj-button"} %>
-                  <% end %>
-              <% end %>
-            </div>
-          </div>
-        </div>
-      </div>
-    </div>
-    <% end %>
-
-    <div class="col-md-10" >
-      <% if wu.is_paused? %>
-        <p>
-          This <%= wu.title %> is paused. Children that are already running
-          will continue to run, but no new processes will be submitted.
-        </p>
-      <% end %>
-
-      <%= raw(wu.show_runtime) %>
-    </div>
-  </div>
-
-<p>
-  <%= render(partial: 'work_unit/component_detail', locals: {current_obj: wu}) %>
-</p>
-
-<%# Work unit children %>
-
-<%
-  uuids = wu.children.collect {|c| c.uuid}.compact
-  if uuids.any?
-    resource_class = resource_class_for_uuid(uuids.first, friendly_name: true)
-    preload_objects_for_dataclass resource_class, uuids
-  end
-
-  collections = wu.children.collect {|j| j.outputs}.compact
-  collections = collections.flatten.uniq
-  collections.concat wu.children.collect {|j| j.docker_image}.uniq.compact
-  collections_pdhs = collections.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
-  collections_uuids = collections - collections_pdhs
-  preload_collections_for_objects collections_uuids if collections_uuids.any?
-  preload_for_pdhs collections_pdhs if collections_pdhs.any?
-%>
-
-<% if wu.has_unreadable_children %>
-  <%= render(partial: "pipeline_instances/show_components_json",
-             locals: {error_name: "Unreadable components", backtrace: nil, wu: wu}) %>
-<% else %>
-  <% @descendent_count = 0 if !@descendent_count %>
-  <% wu.children.each do |c| %>
-    <% @descendent_count += 1 %>
-    <%= render(partial: 'work_unit/show_child', locals: {current_obj: c, i: @descendent_count, expanded: false}) %>
-  <% end %>
-<% end %>
diff --git a/apps/workbench/app/views/work_unit/_show_status.html.erb b/apps/workbench/app/views/work_unit/_show_status.html.erb
deleted file mode 100644 (file)
index 0c1e80e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-<div class="arv-log-refresh-control"
-     data-load-throttle="15000"
-     ></div>
-<%=
-   render(partial: 'work_unit/show_component', locals: {wu: current_obj.work_unit(name)})
-%>
similarity index 97%
rename from apps/workbench/app/views/work_unit/_component_detail.html.erb
rename to apps/workbench/app/views/work_units/_component_detail.html.erb
index ba9d3cee7b12b66f9f87a4a93f42373bfe861bf0..e15cc443a93ca9062b4edb8bc99554ca42e48c2b 100644 (file)
@@ -27,7 +27,7 @@
                       <% if val.size == 1 %>
                         <%= link_to_arvados_object_if_readable(val[0], 'Output data not available', friendly_name: true) %>
                       <% else %>
-                        <%= render partial: 'work_unit/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
+                        <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
                       <% end %>
                     <% else %>
                       <%= val %>
diff --git a/apps/workbench/app/views/work_units/_show_all_processes.html.erb b/apps/workbench/app/views/work_units/_show_all_processes.html.erb
new file mode 100644 (file)
index 0000000..ea17843
--- /dev/null
@@ -0,0 +1,51 @@
+<div class="container">
+  <div class="row">
+    <div class="pull-right">
+      <input type="text" class="form-control filterable-control recent-all-processes-filterable-control"
+             placeholder="Search all processes"
+             data-filterable-target="#all-processes-scroll"
+             value="<%= params[:search] %>">
+      </input>
+    </div>
+  </div>
+  <div>
+    <div>
+      <table class="table table-condensed table-fixedlayout arv-recent-all-processes">
+        <colgroup>
+          <col width="25%" />
+          <col width="10%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="5%" />
+        </colgroup>
+
+        <thead>
+          <tr class="contain-align-left">
+            <th>
+              Process
+            </th>
+            <th>
+              Status
+            </th>
+            <th>
+              Owner
+            </th>
+            <th>
+              Created at
+            </th>
+            <th>
+              Output
+            </th>
+            <th>
+            </th>
+          </tr>
+        </thead>
+
+        <tbody data-infinite-scroller="#all-processes-scroll" id="all-processes-scroll"
+               data-infinite-content-href="<%= url_for partial: :all_processes_rows %>" >
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb b/apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb
new file mode 100644 (file)
index 0000000..0652a9d
--- /dev/null
@@ -0,0 +1,23 @@
+<% @objects.each do |obj| %>
+  <% wu = obj.work_unit %>
+  <tr data-object-uuid="<%= wu.uuid %>" >
+    <td>
+      <%= link_to_if_arvados_object obj, friendly_name: true %>
+    </td>
+    <td>
+      <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
+    </td>
+    <td>
+      <%= link_to_if_arvados_object wu.owner_uuid, friendly_name: true %>
+    </td>
+    <td>
+      <%= render_localized_date(wu.created_at) %>
+    </td>
+    <td>
+      <%= render partial: 'work_units/show_output', locals: {wu: wu, align: ''} %>
+    </td>
+    <td>
+      <%= render partial: 'delete_object_button', locals: {object:obj} %>
+    </td>
+  </tr>
+<% end %>
similarity index 87%
rename from apps/workbench/app/views/work_unit/_show_child.html.erb
rename to apps/workbench/app/views/work_units/_show_child.html.erb
index a9c8d2f2268c632c4a1de4b811092fb7bc607265..acf19fd6b4cedc06553c2f9b4cd25d087bcebe65 100644 (file)
@@ -1,7 +1,6 @@
 <div class="panel panel-default">
   <div class="panel-heading">
-    <div class="container-fluid">
-      <div class="row-fluid">
+      <div class="row">
         <div class="col-md-2" style="word-break:break-all;">
           <h4 class="panel-title">
             <a data-toggle="collapse" href="#collapse<%= i %>">
@@ -11,7 +10,7 @@
         </div>
 
         <div class="col-md-2 pipeline-instance-spacing">
-          <%= render partial: 'work_unit/progress', locals: {wu: current_obj} %>
+          <%= render partial: 'work_units/progress', locals: {wu: current_obj} %>
         </div>
 
         <% if not current_obj %>
@@ -48,7 +47,7 @@
                 <% if outputs.size == 1 %>
                   <%= link_to_arvados_object_if_readable(outputs[0], 'Output data not available', link_text: "Output of #{current_obj.label}") %>
                 <% else %>
-                  <%= render partial: 'work_unit/show_outputs', locals: {id: current_obj.uuid, outputs: outputs, align:"pull-right"} %>
+                  <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: outputs, align:"pull-right"} %>
                 <% end %>
               <% else %>
                 No output.
           </div>
         <% end %>
       </div>
-    </div>
   </div>
 
   <div id="collapse<%= i %>" class="panel-collapse collapse <%= if expanded then 'in' end %>">
     <div class="panel-body">
-      <%= render partial: 'work_unit/show_component', locals: {wu: current_obj} %>
+      <%= render partial: 'work_units/show_component', locals: {wu: current_obj} %>
     </div>
   </div>
 </div>
diff --git a/apps/workbench/app/views/work_units/_show_component.html.erb b/apps/workbench/app/views/work_units/_show_component.html.erb
new file mode 100644 (file)
index 0000000..89233cf
--- /dev/null
@@ -0,0 +1,66 @@
+<%# Work unit status %>
+
+<div class="row">
+  <div class="col-md-4">
+    <% if wu.is_paused? %>
+      <p>
+        This <%= wu.title %> is paused. Children that are already running
+        will continue to run, but no new processes will be submitted.
+      </p>
+    <% end %>
+
+    <%= raw(wu.show_runtime) %>
+  </div>
+  <%# Need additional handling for main object display  %>
+  <% if @object.uuid == wu.uuid %>
+    <div class="col-md-3">
+      <% if wu.is_running? and wu.child_summary_str %>
+        <%= wu.child_summary_str %>
+      <% end %>
+    </div>
+    <div class="col-md-3">
+      <%= render partial: 'work_units/progress', locals: {wu: wu} %>
+    </div>
+    <div class="col-md-2">
+      <% if wu.can_cancel? and @object.editable? %>
+        <%= form_tag "#{wu.uri}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
+          <%= hidden_field_tag :return_to, url_for(@object) %>
+          <%= button_tag "Cancel", {class: 'btn btn-xs btn-danger', id: "cancel-obj-button"} %>
+        <% end %>
+      <% end %>
+    </div>
+  <% end %>
+</div>
+
+<p>
+  <%= render(partial: 'work_units/component_detail', locals: {current_obj: wu}) %>
+</p>
+
+<%# Work unit children %>
+
+<%
+  uuids = wu.children.collect {|c| c.uuid}.compact
+  if uuids.any?
+    resource_class = resource_class_for_uuid(uuids.first, friendly_name: true)
+    preload_objects_for_dataclass resource_class, uuids
+  end
+
+  collections = wu.children.collect {|j| j.outputs}.compact
+  collections = collections.flatten.uniq
+  collections.concat wu.children.collect {|j| j.docker_image}.uniq.compact
+  collections_pdhs = collections.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
+  collections_uuids = collections - collections_pdhs
+  preload_collections_for_objects collections_uuids if collections_uuids.any?
+  preload_for_pdhs collections_pdhs if collections_pdhs.any?
+%>
+
+<% if wu.has_unreadable_children %>
+  <%= render(partial: "pipeline_instances/show_components_json",
+             locals: {error_name: "Unreadable components", backtrace: nil, wu: wu}) %>
+<% else %>
+  <% @descendent_count = 0 if !@descendent_count %>
+  <% wu.children.each do |c| %>
+    <% @descendent_count += 1 %>
+    <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: @descendent_count, expanded: false}) %>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_log.html.erb b/apps/workbench/app/views/work_units/_show_log.html.erb
new file mode 100644 (file)
index 0000000..323c9e8
--- /dev/null
@@ -0,0 +1,28 @@
+<% wu = obj.work_unit(name) %>
+
+<% render_log = wu.render_log %>
+<% if render_log %>
+  <div>
+    <% log_url = url_for render_log[:log] %>
+    <p> <a href="<%= log_url %>">Download the log</a> </p>
+    <%= render(partial: render_log[:partial], locals: render_log[:locals]) %>
+  </div>
+<% end %>
+
+<% live_log_lines = wu.live_log_lines(Rails.configuration.running_job_log_records_to_fetch).join("\n") %>
+<% if !render_log or (live_log_lines.size > 0) %>
+<%# Still running, or recently finished and logs are still available from logs table %>
+<%# Show recent logs in terminal window %>
+<h4>Recent logs</h4>
+<div id="event_log_div"
+     class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
+     data-object-uuids="<%= wu.log_object_uuids.join(' ') %>"
+  ><%= live_log_lines %>
+</div>
+
+<%# Applying a long throttle suppresses the auto-refresh of this
+    partial that would normally be triggered by arv-log-event. %>
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>>
+</div>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_output.html.erb b/apps/workbench/app/views/work_units/_show_output.html.erb
new file mode 100644 (file)
index 0000000..83dabd1
--- /dev/null
@@ -0,0 +1,13 @@
+<span class="<%=align%> text-overflow-ellipsis" style="max-width: 100%">
+  <% outputs = wu.outputs %>
+  <% if outputs.size == 0 %>
+    No output
+  <% elsif outputs.size == 1 %>
+    <% if defined?(include_icon) && include_icon %>
+      <i class="fa fa-fw fa-archive"></i>
+    <% end %>
+    <%= link_to_if_arvados_object outputs[0], friendly_name: true %>
+  <% else %>
+    <%= render partial: 'work_units/show_outputs', locals: {id: wu.uuid, outputs: outputs, align:align} %>
+  <% end %>
+</span>
diff --git a/apps/workbench/app/views/work_units/_show_status.html.erb b/apps/workbench/app/views/work_units/_show_status.html.erb
new file mode 100644 (file)
index 0000000..4b629c8
--- /dev/null
@@ -0,0 +1,6 @@
+<div class="arv-log-refresh-control"
+     data-load-throttle="15000"
+     ></div>
+<%=
+   render(partial: 'work_units/show_component', locals: {wu: current_obj.work_unit(name)})
+%>
diff --git a/apps/workbench/app/views/work_units/index.html.erb b/apps/workbench/app/views/work_units/index.html.erb
new file mode 100644 (file)
index 0000000..b6e978d
--- /dev/null
@@ -0,0 +1 @@
+<%= render partial: 'work_units/show_all_processes' %>
diff --git a/apps/workbench/app/views/workflows/_show_chooser_preview.html.erb b/apps/workbench/app/views/workflows/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..395dda9
--- /dev/null
@@ -0,0 +1,3 @@
+<div class="col-sm-11 col-sm-push-1 arv-description-in-table">
+  <%= (@object.description if @object.description.present?) || 'No description' %>
+</div>
index 5400debbfdaf55e1f64c004adf70f98ca4037cb1..74d317bdf0ef3c537b0d10101568f377acc86395 100644 (file)
@@ -219,6 +219,10 @@ common:
   # Ask Arvados API server to compress its response payloads.
   api_response_compression: true
 
+  # Timeouts for API requests.
+  api_client_connect_timeout: 120
+  api_client_receive_timeout: 300
+
   # ShellInABox service endpoint URL for a given VM.  If false, do not
   # offer web shell logins.
   #
diff --git a/apps/workbench/config/initializers/lograge.rb b/apps/workbench/config/initializers/lograge.rb
new file mode 100644 (file)
index 0000000..fa19667
--- /dev/null
@@ -0,0 +1,16 @@
+ArvadosWorkbench::Application.configure do
+  config.lograge.enabled = true
+  config.lograge.formatter = Lograge::Formatters::Logstash.new
+  config.lograge.custom_options = lambda do |event|
+    exceptions = %w(controller action format id)
+    params = {current_request_id: Thread.current[:current_request_id]}.
+             merge(event.payload[:params].except(*exceptions))
+    params_s = Oj.dump(params)
+    Thread.current[:current_request_id] = nil # Clear for next request
+    if params_s.length > 1000
+      { params_truncated: params_s[0..1000] + "[...]" }
+    else
+      { params: params }
+    end
+  end
+end
diff --git a/apps/workbench/config/initializers/time_format.rb b/apps/workbench/config/initializers/time_format.rb
new file mode 100644 (file)
index 0000000..d476781
--- /dev/null
@@ -0,0 +1,5 @@
+class ActiveSupport::TimeWithZone
+  def as_json *args
+    strftime "%Y-%m-%dT%H:%M:%S.%NZ"
+  end
+end
index f14c3ca8456b3b252574027f3b4ba53ad501ff85..95f6edc95267a04e9dbb97fd6d56799575f9ea83 100644 (file)
@@ -7,7 +7,7 @@ $application_config = {}
   path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
   if File.exists? path
     yaml = ERB.new(IO.read path).result(binding)
-    confs = YAML.load(yaml)
+    confs = YAML.load(yaml, deserialize_symbols: true)
     $application_config.merge!(confs['common'] || {})
     $application_config.merge!(confs[::Rails.env.to_s] || {})
   end
index 41614846791c8c091ad8e10ba492a15d7c666e5b..7f7854864190318171750cb498a0dc7d941156c9 100644 (file)
@@ -13,6 +13,9 @@ ArvadosWorkbench::Application.routes.draw do
   get "report_issue_popup" => 'actions#report_issue_popup', :as => :report_issue_popup
   post "report_issue" => 'actions#report_issue', :as => :report_issue
   get "star" => 'actions#star', :as => :star
+  get "all_processes" => 'work_units#index', :as => :all_processes
+  get "choose_work_unit_templates" => 'work_unit_templates#choose', :as => :choose_work_unit_templates
+  resources :work_units
   resources :nodes
   resources :humans
   resources :traits
@@ -98,6 +101,8 @@ ArvadosWorkbench::Application.routes.draw do
     get 'choose', :on => :collection
   end
 
+  resources :workflows
+
   post 'actions' => 'actions#post'
   get 'actions' => 'actions#show'
   get 'websockets' => 'websocket#index'
index ef2a989427948a693c1257e7466f09e523c43c7b..2554ec3ae0c37442d5936ca7423041ea26e5d583 100644 (file)
@@ -334,6 +334,30 @@ class ApplicationControllerTest < ActionController::TestCase
     assert_response 404
   end
 
+  test "requesting to the API server includes client_session_id param" do
+    got_query = nil
+    stub_api_calls
+    stub_api_client.stubs(:post).with do |url, query, opts={}|
+      got_query = query
+      true
+    end.returns fake_api_response('{}', 200, {})
+
+    Rails.configuration.anonymous_user_token =
+      api_fixture("api_client_authorizations", "anonymous", "api_token")
+    @controller = ProjectsController.new
+    test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
+    get(:show, {id: test_uuid})
+
+    assert_includes got_query, 'current_request_id'
+    assert_match /\d{10}-\d{9}/, got_query['current_request_id']
+  end
+
+  test "current_request_id is nil after a request" do
+    @controller = NodesController.new
+    get(:index, {}, session_for(:active))
+    assert_nil Thread.current[:current_request_id]
+  end
+
   [".navbar .login-menu a",
    ".navbar .login-menu .dropdown-menu a"
   ].each do |css_selector|
index 45aab3c8575dd94f801536a25cfe6dbb18f2f43c..1bf967ccfd8c1e8e2da60a10db81b7e241d70fa9 100644 (file)
@@ -112,13 +112,6 @@ class CollectionsControllerTest < ActionController::TestCase
                     "controller did not find logger job")
   end
 
-  test "viewing a collection fetches logs about it" do
-    show_collection(:foo_file, :active)
-    assert_includes(assigns(:logs).map(&:uuid),
-                    api_fixture('logs')['system_adds_foo_file']['uuid'],
-                    "controller did not find related log")
-  end
-
   test "sharing auths available to admin" do
     show_collection("collection_owned_by_active", "admin_trustedclient")
     assert_not_nil assigns(:search_sharing)
diff --git a/apps/workbench/test/controllers/container_requests_controller_test.rb b/apps/workbench/test/controllers/container_requests_controller_test.rb
new file mode 100644 (file)
index 0000000..8dbbbd0
--- /dev/null
@@ -0,0 +1,32 @@
+require 'test_helper'
+
+class ContainerRequestsControllerTest < ActionController::TestCase
+  test "visit completed container request log tab" do
+    use_token 'active'
+
+    cr = api_fixture('container_requests')['completed']
+    container_uuid = cr['container_uuid']
+    container = Container.find(container_uuid)
+
+    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
+    assert_select "a", {:href=>"#{container['log']}/baz"}
+    assert_not_includes @response.body, '<div id="event_log_div"'
+  end
+
+  test "visit running container request log tab" do
+    use_token 'active'
+
+    cr = api_fixture('container_requests')['running']
+    container_uuid = cr['container_uuid']
+    container = Container.find(container_uuid)
+
+    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_includes @response.body, '<div id="event_log_div"'
+    assert_select 'Download the log', false
+  end
+end
diff --git a/apps/workbench/test/controllers/containers_controller_test.rb b/apps/workbench/test/controllers/containers_controller_test.rb
new file mode 100644 (file)
index 0000000..ce37239
--- /dev/null
@@ -0,0 +1,15 @@
+require 'test_helper'
+
+class ContainersControllerTest < ActionController::TestCase
+  test "visit container log" do
+    use_token 'active'
+
+    container = api_fixture('containers')['completed']
+
+    get :show, {id: container['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
+    assert_select "a", {:href=>"#{container['log']}/baz"}
+  end
+end
index 58914a84ac87b5b0949f07d634a826226a2b64af..d31d6e3458a94f629bc21329ba3fa5db1b79061e 100644 (file)
@@ -387,14 +387,80 @@ class ProjectsControllerTest < ActionController::TestCase
   end
 
   [
-    ["jobs", "/jobs"],
-    ["pipelines", "/pipeline_instances"],
-    ["collections", "/collections"],
-  ].each do |target,path|
-    test "test dashboard button all #{target}" do
-      get :index, {}, session_for(:active)
-      assert_includes @response.body, "href=\"#{path}\""
-      assert_includes @response.body, "All #{target}"
+    [:admin, true],
+    [:active, false],
+  ].each do |user, expect_all_nodes|
+    test "in dashboard other index page links as #{user}" do
+      get :index, {}, session_for(user)
+
+      [["processes", "/all_processes"],
+       ["collections", "/collections"],
+      ].each do |target, path|
+        assert_includes @response.body, "href=\"#{path}\""
+        assert_includes @response.body, "All #{target}"
+      end
+
+      if expect_all_nodes
+        assert_includes @response.body, "href=\"/nodes\""
+        assert_includes @response.body, "All nodes"
+      else
+        assert_not_includes @response.body, "href=\"/nodes\""
+        assert_not_includes @response.body, "All nodes"
+      end
+    end
+  end
+
+  test "dashboard should show the correct status for processes" do
+    get :index, {}, session_for(:active)
+    assert_select 'div.panel-body.recent-processes' do
+      [
+        {
+          fixture: 'container_requests',
+          state: 'completed',
+          selectors: [['div.progress', false],
+                      ['span.label.label-success', true, 'Complete']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'uncommitted',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Uncommitted']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'queued',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Queued']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'running',
+          selectors: [['div.progress', true]]
+        },
+        {
+          fixture: 'pipeline_instances',
+          state: 'new_pipeline',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Not started']]
+        },
+        {
+          fixture: 'pipeline_instances',
+          state: 'pipeline_in_running_state',
+          selectors: [['div.progress', true]]
+        },
+      ].each do |c|
+        uuid = api_fixture(c[:fixture])[c[:state]]['uuid']
+        assert_select "div.dashboard-panel-info-row.row-#{uuid}" do
+          if c.include? :selectors
+            c[:selectors].each do |selector, should_show, label|
+              assert_select selector, should_show, "UUID #{uuid} should #{should_show ? '' : 'not'} show '#{selector}'"
+              if should_show and not label.nil?
+                assert_select selector, label, "UUID #{uuid} state label should show #{label}"
+              end
+            end
+          end
+        end
+      end
     end
   end
 
@@ -421,7 +487,7 @@ class ProjectsControllerTest < ActionController::TestCase
 
   [
     ["active", 5, ["aproject", "asubproject"], "anonymously_accessible_project"],
-    ["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_jobs"],
+    ["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_crs"],
     ["admin", 5, ["anonymously_accessible_project", "subproject_in_anonymous_accessible_project"], "aproject"],
   ].each do |user, page_size, tree_segment, unexpected|
     test "build my projects tree for #{user} user and verify #{unexpected} is omitted" do
diff --git a/apps/workbench/test/controllers/work_units_controller_test.rb b/apps/workbench/test/controllers/work_units_controller_test.rb
new file mode 100644 (file)
index 0000000..12e0271
--- /dev/null
@@ -0,0 +1,68 @@
+require 'test_helper'
+
+class WorkUnitsControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls.
+  # Save some time by skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  [
+    ['foo', 10, 25,
+      ['/pipeline_instances/zzzzz-d1hrv-1xfj6xkicf2muk2',
+       '/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-grx15v5mjnsyxk7'],
+      ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+    ['pipeline_with_tagged_collection_input', 1, 1,
+      ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3'],
+      ['/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-pshmckwoma9plh7',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+    ['no_such_match', 0, 0,
+      [],
+      ['/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-pshmckwoma9plh7',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+  ].each do |search_filter, expected_min, expected_max, expected, not_expected|
+    test "all_processes page for search filter '#{search_filter}'" do
+      work_units_index(filters: [['any','@@', search_filter]])
+      assert_response :success
+
+      # Verify that expected number of processes are found
+      found_count = json_response['content'].scan('<tr').count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+          "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+      else
+        assert_equal(true, found_count>=expected_min,
+          "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+          "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+
+      # verify that all expected uuid links are found
+      expected.each do |link|
+        assert_match /href="#{link}"/, json_response['content']
+      end
+
+      # verify that none of the not_expected uuid links are found
+      not_expected.each do |link|
+        assert_no_match /href="#{link}"/, json_response['content']
+      end
+    end
+  end
+
+  def work_units_index params
+    params = {
+      partial: :all_processes_rows,
+      format: :json,
+    }.merge(params)
+    encoded_params = Hash[params.map { |k,v|
+                            [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
+                          }]
+    get :index, encoded_params, session_for(:active)
+  end
+end
diff --git a/apps/workbench/test/controllers/workflows_controller_test.rb b/apps/workbench/test/controllers/workflows_controller_test.rb
new file mode 100644 (file)
index 0000000..14db731
--- /dev/null
@@ -0,0 +1,9 @@
+require 'test_helper'
+
+class WorkflowsControllerTest < ActionController::TestCase
+  test "index" do
+    get :index, {}, session_for(:active)
+    assert_response :success
+    assert_includes @response.body, 'Valid workflow with no definition yaml'
+  end
+end
diff --git a/apps/workbench/test/helpers/fake_websocket_helper.rb b/apps/workbench/test/helpers/fake_websocket_helper.rb
new file mode 100644 (file)
index 0000000..91d2575
--- /dev/null
@@ -0,0 +1,18 @@
+module FakeWebsocketHelper
+  def use_fake_websocket_driver
+    Capybara.current_driver = :poltergeist_with_fake_websocket
+  end
+
+  def fake_websocket_event(logdata)
+    stamp = Time.now.utc.in_time_zone.as_json
+    defaults = {
+      owner_uuid: api_fixture('users')['system_user']['uuid'],
+      event_at: stamp,
+      created_at: stamp,
+      updated_at: stamp,
+    }
+    event = {data: Oj.dump(defaults.merge(logdata), mode: :compat)}
+    script = '$(window).data("arv-websocket").onmessage('+Oj.dump(event, mode: :compat)+');'
+    page.evaluate_script(script)
+  end
+end
index d58a0315ee54595c2e3d4d10e09c4a822d2e93ae..aae8c418962098c302f9945ab0f7e350607cf03f 100644 (file)
@@ -68,7 +68,7 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
 
     assert_selector 'a', text: 'Description'
     assert_selector 'a', text: 'Data collections'
-    assert_selector 'a', text: 'Jobs and pipelines'
+    assert_selector 'a', text: 'Pipelines and processes'
     assert_selector 'a', text: 'Pipeline templates'
     assert_selector 'a', text: 'Subprojects'
     assert_selector 'a', text: 'Advanced'
@@ -123,39 +123,35 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
   end
 
   [
-    'running_job',
-    'completed_job',
+    'running anonymously accessible cr',
     'pipelineInstance'
-  ].each do |type|
-    test "anonymous user accesses jobs and pipelines tab in shared project and clicks on #{type}" do
+  ].each do |proc|
+    test "anonymous user accesses pipelines and processes tab in shared project and clicks on '#{proc}'" do
       visit PUBLIC_PROJECT
       click_link 'Data collections'
       assert_text 'GNU General Public License'
 
-      click_link 'Jobs and pipelines'
+      click_link 'Pipelines and processes'
       assert_text 'Pipeline in publicly accessible project'
 
-      # click on the specified job
-      if type.include? 'job'
-        verify_job_row type
-      else
+      if proc.include? 'pipeline'
         verify_pipeline_instance_row
+      else
+        verify_container_request_row proc
       end
     end
   end
 
-  def verify_job_row look_for
+  def verify_container_request_row look_for
     within first('tr', text: look_for) do
       click_link 'Show'
     end
     assert_text 'Public Projects Unrestricted public data'
-    assert_text 'script_version'
+    assert_text 'command'
 
     assert_text 'zzzzz-tpzed-xurymjxw79nv3jz' # modified by user
     assert_no_selector 'a', text: 'zzzzz-tpzed-xurymjxw79nv3jz'
-    assert_no_selector 'a', text: 'Move job'
     assert_no_selector 'button', text: 'Cancel'
-    assert_no_selector 'button', text: 'Re-run job'
   end
 
   def verify_pipeline_instance_row
@@ -171,24 +167,40 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
     assert_no_selector 'a', text: 'Re-run options'
   end
 
-  test "anonymous user accesses pipeline templates tab in shared project" do
-    visit PUBLIC_PROJECT
-    click_link 'Data collections'
-    assert_text 'GNU General Public License'
+  [
+    'pipelineTemplate',
+    'workflow'
+  ].each do |type|
+    test "anonymous user accesses pipeline templates tab in shared project and click on #{type}" do
+      visit PUBLIC_PROJECT
+      click_link 'Data collections'
+      assert_text 'GNU General Public License'
 
-    assert_selector 'a', text: 'Pipeline templates'
+      assert_selector 'a', text: 'Pipeline templates'
 
-    click_link 'Pipeline templates'
-    assert_text 'Pipeline template in publicly accessible project'
+      click_link 'Pipeline templates'
+      assert_text 'Pipeline template in publicly accessible project'
+      assert_text 'Workflow with input specifications'
 
-    within first('tr[data-kind="arvados#pipelineTemplate"]') do
-      click_link 'Show'
-    end
+      if type == 'pipelineTemplate'
+        within first('tr[data-kind="arvados#pipelineTemplate"]') do
+          click_link 'Show'
+        end
 
-    # in template page
-    assert_text 'Public Projects Unrestricted public data'
-    assert_text 'script version'
-    assert_no_selector 'a', text: 'Run this pipeline'
+        # in template page
+        assert_text 'Public Projects Unrestricted public data'
+        assert_text 'script version'
+        assert_no_selector 'a', text: 'Run this pipeline'
+      else
+        within first('tr[data-kind="arvados#workflow"]') do
+          click_link 'Show'
+        end
+
+        # in workflow page
+        assert_text 'Public Projects Unrestricted public data'
+        assert_text 'this workflow has inputs specified'
+      end
+    end
   end
 
   test "anonymous user accesses subprojects tab in shared project" do
index 61905f31b2e21b9735755ab4715263f1e93ab6bc..b0a842e8c3da816bd33b531bbb0c6bc9d92b70b0 100644 (file)
@@ -252,12 +252,12 @@ class ApplicationLayoutTest < ActionDispatch::IntegrationTest
       assert_text 'Recent pipelines and processes' # seeing dashboard now
       within('.recent-processes-actions') do
         assert page.has_link?('Run a pipeline')
-        assert page.has_link?('All pipelines')
+        assert page.has_link?('All processes')
       end
 
       within('.recent-processes') do
-        assert_text 'running_with_job'
-        within('.row-zzzzz-d1hrv-runningpipeline') do
+        assert_text 'pipeline_with_job'
+        within('.row-zzzzz-d1hrv-1yfj6xkidf2muk3') do
           assert_text 'foo'
         end
 
@@ -268,7 +268,7 @@ class ApplicationLayoutTest < ActionDispatch::IntegrationTest
 
         assert_text 'completed container request'
         within('.row-zzzzz-xvhdp-cr4completedctr')do
-          assert page.has_link? 'foo_file'
+          assert page.has_link? '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
         end
       end
 
@@ -278,7 +278,6 @@ class ApplicationLayoutTest < ActionDispatch::IntegrationTest
         else
           assert page.has_no_link?('All nodes')
         end
-        assert page.has_link? 'All jobs'
       end
 
       within('.compute-node-summary-pane') do
@@ -287,53 +286,4 @@ class ApplicationLayoutTest < ActionDispatch::IntegrationTest
       end
     end
   end
-
-  [
-    ['jobs', 'running_job_with_components', true],
-    ['pipeline_instances', 'components_is_jobspec', false],
-    ['containers', 'running', false],
-    ['container_requests', 'running', true],
-  ].each do |type, fixture, cancelable|
-    test "cancel button for #{type}/#{fixture}" do
-      if cancelable
-        need_selenium 'to cancel'
-      end
-
-      obj = api_fixture(type)[fixture]
-      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
-
-      assert_text 'created_at'
-      if cancelable
-        assert page.has_button?('Cancel'), 'No Cancel button'
-        click_button 'Cancel'
-        wait_for_ajax
-        assert page.has_no_button?('Cancel'), 'Cancel button not expected after clicking'
-      else
-        assert page.has_no_button?('Cancel'), 'Cancel button not expected'
-      end
-    end
-  end
-
-  [
-    ['jobs', 'running_job_with_components'],
-    ['pipeline_instances', 'has_component_with_completed_jobs'],
-    ['container_requests', 'running'],
-    ['container_requests', 'completed'],
-  ].each do |type, fixture|
-    test "edit description for #{type}/#{fixture}" do
-      obj = api_fixture(type)[fixture]
-      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
-
-      within('.arv-description-as-subtitle') do
-        find('.fa-pencil').click
-        find('.editable-input textarea').set('*Textile description for object*')
-        find('.editable-submit').click
-      end
-      wait_for_ajax
-
-      # verify description
-      assert page.has_no_text? '*Textile description for object*'
-      assert page.has_text? 'Textile description for object'
-    end
-  end
 end
diff --git a/apps/workbench/test/integration/container_requests_test.rb b/apps/workbench/test/integration/container_requests_test.rb
new file mode 100644 (file)
index 0000000..df6584e
--- /dev/null
@@ -0,0 +1,99 @@
+require 'integration_helper'
+
+class ContainerRequestsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  [
+    ['ex_string', 'abc'],
+    ['ex_string_opt', 'abc'],
+    ['ex_int', 12],
+    ['ex_int_opt', 12],
+    ['ex_long', 12],
+    ['ex_double', '12.34', 12.34],
+    ['ex_float', '12.34', 12.34],
+  ].each do |input_id, input_value, expected_value|
+    test "set input #{input_id} with #{input_value}" do
+      request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+      visit page_with_token("active", "/container_requests/#{request_uuid}")
+      selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][#{input_id}]']"
+      find(selector).click
+      find(".editable-input input").set(input_value)
+      find("#editable-submit").click
+      assert_no_selector(".editable-popup")
+      assert_selector(selector, text: expected_value || input_value)
+    end
+  end
+
+  test "select value for boolean input" do
+    request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_boolean]']"
+    find(selector).click
+    within(".editable-input") do
+      select "true"
+    end
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(selector, text: "true")
+  end
+
+  test "select value for enum typed input" do
+    request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_enum]']"
+    find(selector).click
+    within(".editable-input") do
+      select "b"    # second value
+    end
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(selector, text: "b")
+  end
+
+  [
+    ['directory_type'],
+    ['file_type'],
+  ].each do |type|
+    test "select value for #{type} input" do
+      request_uuid = api_fixture("container_requests", "uncommitted-with-directory-input", "uuid")
+      visit page_with_token("active", "/container_requests/#{request_uuid}")
+      assert_text 'Provide a value for the following parameter'
+      click_link 'Choose'
+      within('.modal-dialog') do
+        wait_for_ajax
+        collection = api_fixture('collections', 'collection_with_one_property', 'uuid')
+        find("div[data-object-uuid=#{collection}]").click
+        if type == 'ex_file'
+          wait_for_ajax
+          find('.preview-selectable', text: 'bar').click
+        end
+        find('button', text: 'OK').click
+      end
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+      assert_text 'This workflow does not need any further inputs'
+      click_link "Run"
+      wait_for_ajax
+      assert_text 'This container is queued'
+    end
+  end
+
+  test "Run button enabled once all required inputs are provided" do
+    request_uuid = api_fixture("container_requests", "uncommitted-with-required-and-optional-inputs", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    assert_text 'Provide a value for the following parameter'
+
+    page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][int_required]']"
+    find(selector).click
+    find(".editable-input input").set(2016)
+    find("#editable-submit").click
+
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+    click_link "Run"
+    wait_for_ajax
+    assert_text 'This container is queued'
+  end
+end
index 8a16fb8a66b547ae704cc2791f06f330a5268bc9..e980b2ffb98c900c7428df19b379d53a48f08ca1 100644 (file)
@@ -65,13 +65,13 @@ class DownloadTest < ActionDispatch::IntegrationTest
     within "#collection_files" do
       find('[title~=Download]').click
     end
-    wait_for_download 'w a z', 'w a z'
+    wait_for_download 'w a z', 'w a z', timeout: 20
   end
 
-  def wait_for_download filename, expect_data
+  def wait_for_download filename, expect_data, timeout: 3
     data = nil
     tries = 0
-    while tries < 20
+    while tries < timeout*10 && data != expect_data
       sleep 0.1
       tries += 1
       data = File.read(DownloadHelper.path.join filename) rescue nil
index 2ab8beb294ab8f2ae99e1c6866d2ad7efbcb0822..3d8cbf0b630ee9ddcd7db17ccc4398c645db2b0c 100644 (file)
@@ -82,7 +82,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       wait_for_ajax
     end
 
-    click_link 'Jobs and pipelines'
+    click_link 'Pipelines and processes'
     find('tr[data-kind="arvados#pipelineInstance"]', text: '(none)').
       find('a', text: 'Show').
       click
index 01e84b1c0219d19551122356006f7081b0d42629..e5877aca6d1e88824b2575ba571eda21de403ee3 100644 (file)
@@ -514,23 +514,23 @@ class ProjectsTest < ActionDispatch::IntegrationTest
 
   [
     ['project_with_10_pipelines', 10, 0],
-    ['project_with_2_pipelines_and_60_jobs', 2, 60],
+    ['project_with_2_pipelines_and_60_crs', 2, 60],
     ['project_with_25_pipelines', 25, 0],
-  ].each do |project_name, num_pipelines, num_jobs|
-    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_jobs} jobs" do
-      item_list_parameter = "Jobs_and_pipelines"
+  ].each do |project_name, num_pipelines, num_crs|
+    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_crs} container requests" do
+      item_list_parameter = "Pipelines_and_processes"
       scroll_setup project_name,
-                   num_pipelines + num_jobs,
+                   num_pipelines + num_crs,
                    item_list_parameter
       # check the general scrolling and the pipelines
       scroll_items_check num_pipelines,
                          "pipeline_",
                          item_list_parameter,
                          'tr[data-kind="arvados#pipelineInstance"]'
-      # Check job count separately
-      jobs_found = page.all('tr[data-kind="arvados#job"]')
-      found_job_count = jobs_found.count
-      assert_equal num_jobs, found_job_count, 'Did not find expected number of jobs'
+      # Check container request count separately
+      crs_found = page.all('tr[data-kind="arvados#containerRequest"]')
+      found_cr_count = crs_found.count
+      assert_equal num_crs, found_cr_count, 'Did not find expected number of container requests'
     end
   end
 
@@ -618,8 +618,8 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       assert_no_selector 'li.disabled', text: 'Copy selected'
     end
 
-    # Go to Jobs and pipelines tab and assert none selected
-    click_link 'Jobs and pipelines'
+    # Go to Pipelines and processes tab and assert none selected
+    click_link 'Pipelines and processes'
     wait_for_ajax
 
     # Since this is the first visit to this tab, all selection options should be disabled
@@ -737,4 +737,27 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       assert_no_selector 'li', text: 'Unrestricted public data'
     end
   end
+
+  [
+    ['Two Part Pipeline Template', 'part-one', 'Provide a value for the following'],
+    ['Workflow with input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+  ].each do |template_name, preview_txt, process_txt|
+    test "run a process using template #{template_name} in a project" do
+      project = api_fixture('groups')['aproject']
+      visit page_with_token 'active', '/projects/' + project['uuid']
+
+      find('.btn', text: 'Run a pipeline').click
+
+      # in the chooser, verify preview and click Next button
+      within('.modal-dialog') do
+        find('.selectable', text: template_name).click
+        assert_text preview_txt
+        find('.btn', text: 'Next: choose inputs').click
+      end
+
+      # in the process page now
+      assert_text process_txt
+      assert_text project['name']
+    end
+  end
 end
index a98fa3542d859be996225e082db21b38c2a6ece2..247d5ed3558c6968b3fc3c2e83e4f2ede3313b33 100644 (file)
@@ -14,22 +14,21 @@ class UserProfileTest < ActionDispatch::IntegrationTest
     profile_config = Rails.configuration.user_profile_form_fields
 
     if !user
-      assert page.has_text?('Please log in'), 'Not found text - Please log in'
+      assert_text('Please log in')
     elsif user['is_active']
       if profile_config && !has_profile
-        assert page.has_text?('Save profile'), 'No text - Save profile'
+        assert_text('Save profile')
         add_profile user
       else
-        assert page.has_text?('Recent pipelines and processes'), 'Not found text - Recent pipelines and processes'
-        assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+        assert_text('Recent pipelines and processes')
+        assert_no_text('Save profile')
       end
     elsif invited
-      assert page.has_text?('Please check the box below to indicate that you have read and accepted the user agreement'),
-        'Not found text - Please check the box below . . .'
-      assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+      assert_text('Please check the box below to indicate that you have read and accepted the user agreement')
+      assert_no_text('Save profile')
     else
-      assert page.has_text?('Your account is inactive'), 'Not found text - Your account is inactive'
-      assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+      assert_text('Your account is inactive')
+      assert_no_text('Save profile')
     end
 
     # If the user has not already seen getting_started modal, it will be shown on first visit.
@@ -47,25 +46,25 @@ class UserProfileTest < ActionDispatch::IntegrationTest
         assert page.has_link?('Log in'), 'Not found link - Log in'
       else
         # my account menu
-        assert(page.has_link?("notifications-menu"), 'no user menu')
+        assert_selector("#notifications-menu")
         page.find("#notifications-menu").click
         within('.dropdown-menu') do
           if user['is_active']
-            assert page.has_no_link?('Not active'), 'Found link - Not active'
-            assert page.has_no_link?('Sign agreements'), 'Found link - Sign agreements'
+            assert_no_selector('a', text: 'Not active')
+            assert_no_selector('a', text: 'Sign agreements')
 
-            assert page.has_link?('Virtual machines'), 'No link - Virtual machines'
-            assert page.has_link?('Repositories'), 'No link - Repositories'
-            assert page.has_link?('Current token'), 'No link - Current token'
-            assert page.has_link?('SSH keys'), 'No link - SSH Keys'
+            assert_selector('a', text: 'Virtual machines')
+            assert_selector('a', text: 'Repositories')
+            assert_selector('a', text: 'Current token')
+            assert_selector('a', text: 'SSH keys')
 
             if profile_config
-              assert page.has_link?('Manage profile'), 'No link - Manage profile'
+              assert_selector('a', text: 'Manage profile')
             else
-              assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+              assert_no_selector('a', text: 'Manage profile')
             end
           end
-          assert page.has_link?('Log out'), 'No link - Log out'
+          assert_selector('a', text: 'Log out')
         end
       end
     end
@@ -73,45 +72,49 @@ class UserProfileTest < ActionDispatch::IntegrationTest
 
   # Check manage profile page and add missing profile to the user
   def add_profile user
-    assert page.has_no_text?('My projects'), 'Found text - My projects'
-    assert page.has_no_text?('Projects shared with me'), 'Found text - Projects shared with me'
+    assert_no_text('My projects')
+    assert_no_text('Projects shared with me')
 
-    assert page.has_text?('Profile'), 'No text - Profile'
-    assert page.has_text?('First Name'), 'No text - First Name'
-    assert page.has_text?('Last Name'), 'No text - Last Name'
-    assert page.has_text?('Identity URL'), 'No text - Identity URL'
-    assert page.has_text?('E-mail'), 'No text - E-mail'
-    assert page.has_text?(user['email']), 'No text - user email'
+    assert_text('Profile')
+    assert_text('First Name')
+    assert_text('Last Name')
+    assert_text('Identity URL')
+    assert_text('E-mail')
+    assert_text(user['email'])
 
     # Using the default profile which has message and one required field
 
     # Save profile without filling in the required field. Expect to be back in this profile page again
     click_button "Save profile"
-    assert page.has_text?('Profile'), 'No text - Profile'
-    assert page.has_text?('First Name'), 'No text - First Name'
-    assert page.has_text?('Last Name'), 'No text - Last Name'
-    assert page.has_text?('Save profile'), 'No text - Save profile'
+    assert_text('Profile')
+    assert_text('First Name')
+    assert_text('Last Name')
+    assert_text('Save profile')
 
     # This time fill in required field and then save. Expect to go to requested page after that.
     profile_message = Rails.configuration.user_profile_form_message
     required_field_title = ''
     required_field_key = ''
     profile_config = Rails.configuration.user_profile_form_fields
-    profile_config.andand.each do |entry|
+    profile_config.each do |entry|
       if entry['required']
         required_field_key = entry['key']
         required_field_title = entry['form_field_title']
+        break
       end
     end
 
     assert page.has_text? profile_message.gsub(/<.*?>/,'')
-    assert page.has_text?(required_field_title), 'No text - configured required field title'
+    assert_text(required_field_title)
 
-    page.find_field('user[prefs][:profile][:'+required_field_key+']').set 'value to fill required field'
+    page.find_field('user[prefs][profile]['+required_field_key+']').set 'value to fill required field'
 
     click_button "Save profile"
     # profile saved and in profile page now with success
-    assert page.has_text?('Thank you for filling in your profile'), 'No text - Thank you for filling'
+    assert_text('Thank you for filling in your profile')
+    assert_selector('input' +
+                    '[name="user[prefs][profile]['+required_field_key+']"]' +
+                    '[value="value to fill required field"]')
     if user['prefs']['getting_started_shown']
       click_link 'Back to work!'
     else
@@ -119,48 +122,37 @@ class UserProfileTest < ActionDispatch::IntegrationTest
     end
 
     # profile saved and in home page now
-    assert page.has_text?('Recent pipelines and processes'), 'No text - Recent pipelines and processes'
+    assert_text('Recent pipelines and processes')
   end
 
   [
-    [nil, nil, false, false],
-    ['inactive', api_fixture('users')['inactive'], true, false],
-    ['inactive_uninvited', api_fixture('users')['inactive_uninvited'], false, false],
-    ['active', api_fixture('users')['active'], true, true],
-    ['admin', api_fixture('users')['admin'], true, true],
-    ['active_no_prefs', api_fixture('users')['active_no_prefs'], true, false],
-    ['active_no_prefs_profile_no_getting_started_shown',
-      api_fixture('users')['active_no_prefs_profile_no_getting_started_shown'], true, false],
-    ['active_no_prefs_profile_with_getting_started_shown',
-      api_fixture('users')['active_no_prefs_profile_with_getting_started_shown'], true, false],
-  ].each do |token, user, invited, has_profile|
-
-    test "visit home page when profile is configured for user #{token}" do
-      # Our test config enabled profile by default. So, no need to update config
-      Rails.configuration.enable_getting_started_popup = true
-
-      if !token
-        visit ('/')
-      else
-        visit page_with_token(token)
-      end
-
-      verify_homepage_with_profile user, invited, has_profile
-    end
+    [nil, false, false],
+    ['inactive', true, false],
+    ['inactive_uninvited', false, false],
+    ['active', true, true],
+    ['admin', true, true],
+    ['active_no_prefs', true, false],
+    ['active_no_prefs_profile_no_getting_started_shown', true, false],
+    ['active_no_prefs_profile_with_getting_started_shown', true, false],
+  ].each do |token, invited, has_profile|
+    [true, false].each do |profile_required|
+      test "visit #{token} home page when profile is #{'not ' if !profile_required}configured" do
+        if !profile_required
+          Rails.configuration.user_profile_form_fields = false
+        else
+          # Our test config enabled profile by default. So, no need to update config
+        end
+        Rails.configuration.enable_getting_started_popup = true
 
-    test "visit home page when profile not configured for user #{token}" do
-      Rails.configuration.user_profile_form_fields = false
-      Rails.configuration.enable_getting_started_popup = true
+        if !token
+          visit ('/')
+        else
+          visit page_with_token(token)
+        end
 
-      if !token
-        visit ('/')
-      else
-        visit page_with_token(token)
+        user = token && api_fixture('users')[token]
+        verify_homepage_with_profile user, invited, has_profile
       end
-
-      verify_homepage_with_profile user, invited, has_profile
     end
-
   end
-
 end
index 655ad92c94d1d18f23988990c59cb832a817fb9e..38917f05ae3ad60213afaaae9c2196df706022b7 100644 (file)
@@ -3,64 +3,55 @@ require 'integration_helper'
 class WebsocketTest < ActionDispatch::IntegrationTest
   setup do
     need_selenium "to make websockets work"
+    @dispatch_client = ArvadosApiClient.new
+  end
+
+  def dispatch_log(body)
+    use_token :dispatch1 do
+      @dispatch_client.api('logs', '', log: body)
+    end
   end
 
   test "test page" do
-    visit(page_with_token("admin", "/websockets"))
+    visit(page_with_token("active", "/websockets"))
     fill_in("websocket-message-content", :with => "Stuff")
     click_button("Send")
     assert_text '"status":400'
   end
 
-  test "test live logging" do
-    visit(page_with_token("admin", "/pipeline_instances/zzzzz-d1hrv-9fm8l10i9z2kqc6"))
-    click_link("Log")
-    assert_no_text '123 hello'
-
-    api = ArvadosApiClient.new
-
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
-    api.api("logs", "", {log: {
-                object_uuid: "zzzzz-d1hrv-9fm8l10i9z2kqc6",
-                event_type: "stderr",
-                properties: {"text" => "123 hello"}}})
-    assert_text '123 hello'
-  end
-
+  [
+   ['pipeline_instances', 'pipeline_in_running_state', api_fixture('jobs')['running']],
+   ['jobs', 'running'],
+   ['containers', 'running'],
+   ['container_requests', 'running', api_fixture('containers')['running']],
+  ].each do |controller, view_fixture_name, log_target_fixture|
+    view_fixture = api_fixture(controller)[view_fixture_name]
+    log_target_fixture ||= view_fixture
 
-  [["pipeline_instances", api_fixture("pipeline_instances")['pipeline_with_newer_template']['uuid']],
-   ["jobs", api_fixture("jobs")['running']['uuid']]].each do |c|
-    test "test live logging scrolling #{c[0]}" do
+    test "test live logging and scrolling for #{controller}" do
 
-      controller = c[0]
-      uuid = c[1]
-
-      visit(page_with_token("admin", "/#{controller}/#{uuid}"))
-      click_link("Log")
+      visit(page_with_token("active", "/#{controller}/#{view_fixture['uuid']}\#Log"))
       assert_no_text '123 hello'
 
-      api = ArvadosApiClient.new
-
       text = ""
       (1..1000).each do |i|
         text << "#{i} hello\n"
       end
 
-      Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
-      api.api("logs", "", {log: {
-                  object_uuid: uuid,
-                  event_type: "stderr",
-                  properties: {"text" => text}}})
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "stderr",
+                   properties: {"text" => text})
       assert_text '1000 hello'
 
       # First test that when we're already at the bottom of the page, it scrolls down
       # when a new line is added.
       old_top = page.evaluate_script("$('#event_log_div').scrollTop()")
 
-      api.api("logs", "", {log: {
-                  object_uuid: uuid,
-                  event_type: "stderr",
-                  properties: {"text" => "1001 hello\n"}}})
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "dispatch",
+                   properties: {"text" => "1001 hello\n"})
       assert_text '1001 hello'
 
       # Check that new value of scrollTop is greater than the old one
@@ -71,10 +62,10 @@ class WebsocketTest < ActionDispatch::IntegrationTest
       page.execute_script "$('#event_log_div').scrollTop(30)"
       assert_equal 30, page.evaluate_script("$('#event_log_div').scrollTop()")
 
-      api.api("logs", "", {log: {
-                  object_uuid: uuid,
-                  event_type: "stderr",
-                  properties: {"text" => "1002 hello\n"}}})
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "stdout",
+                   properties: {"text" => "1002 hello\n"})
       assert_text '1002 hello'
 
       # Check that we haven't changed scroll position
@@ -83,26 +74,26 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   end
 
   test "pipeline instance arv-refresh-on-log-event" do
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
     # Do something and check that the pane reloads.
-    p = PipelineInstance.create({state: "RunningOnServer",
-                                  components: {
-                                    c1: {
-                                      script: "test_hash.py",
-                                      script_version: "1de84a854e2b440dc53bf42f8548afa4c17da332"
-                                    }
-                                  }
-                                })
-
-    visit(page_with_token("admin", "/pipeline_instances/#{p.uuid}"))
+    p = use_token :active do
+      PipelineInstance.create(state: "RunningOnServer",
+                              components: {
+                                c1: {
+                                  script: "test_hash.py",
+                                  script_version: "1de84a854e2b440dc53bf42f8548afa4c17da332"
+                                }
+                              })
+    end
+    visit(page_with_token("active", "/pipeline_instances/#{p.uuid}"))
 
     assert_text 'Active'
     assert page.has_link? 'Pause'
     assert_no_text 'Complete'
     assert page.has_no_link? 'Re-run with latest'
 
-    p.state = "Complete"
-    p.save!
+    use_token :dispatch1 do
+      p.update_attributes!(state: 'Complete')
+    end
 
     assert_no_text 'Active'
     assert page.has_no_link? 'Pause'
@@ -111,35 +102,34 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   end
 
   test "job arv-refresh-on-log-event" do
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
     # Do something and check that the pane reloads.
-    p = Job.where(uuid: api_fixture('jobs')['running_will_be_completed']['uuid']).results.first
-
-    visit(page_with_token("admin", "/jobs/#{p.uuid}"))
+    uuid = api_fixture('jobs')['running_will_be_completed']['uuid']
+    visit(page_with_token("active", "/jobs/#{uuid}"))
 
     assert_no_text 'complete'
     assert_no_text 'Re-run job'
 
-    p.state = "Complete"
-    p.save!
+    use_token :dispatch1 do
+      Job.find(uuid).update_attributes!(state: 'Complete')
+    end
 
     assert_text 'complete'
     assert_text 'Re-run job'
   end
 
   test "dashboard arv-refresh-on-log-event" do
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
-
-    visit(page_with_token("admin", "/"))
+    visit(page_with_token("active", "/"))
 
     assert_no_text 'test dashboard arv-refresh-on-log-event'
 
     # Do something and check that the pane reloads.
-    p = PipelineInstance.create({state: "RunningOnServer",
-                                  name: "test dashboard arv-refresh-on-log-event",
-                                  components: {
-                                  }
-                                })
+    use_token :active do
+      p = PipelineInstance.create({state: "RunningOnServer",
+                                    name: "test dashboard arv-refresh-on-log-event",
+                                    components: {
+                                    }
+                                  })
+    end
 
     assert_text 'test dashboard arv-refresh-on-log-event'
   end
@@ -175,13 +165,10 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     text = "2014-11-07_23:33:51 #{uuid} 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys"
 
     assert_triggers_dom_event 'arv-log-event' do
-      use_token :active do
-        api = ArvadosApiClient.new
-        api.api("logs", "", {log: {
-                    object_uuid: uuid,
-                    event_type: "stderr",
-                    properties: {"text" => text}}})
-      end
+      dispatch_log(owner_uuid: api_fixture('jobs')['running']['owner_uuid'],
+                   object_uuid: uuid,
+                   event_type: "stderr",
+                   properties: {"text" => text})
     end
 
     # Graph should have appeared (even if it hadn't above). It's
@@ -213,65 +200,56 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   end
 
   test "test running job with just a few previous log records" do
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
-    job = Job.where(uuid: api_fixture("jobs")['running']['uuid']).results.first
-    visit page_with_token("admin", "/jobs/#{job.uuid}")
-
-    api = ArvadosApiClient.new
+    job = api_fixture("jobs")['running']
 
     # Create just one old log record
-    api.api("logs", "", {log: {
-                object_uuid: job.uuid,
-                event_type: "stderr",
-                properties: {"text" => "Historic log message"}}})
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Historic log message"})
 
-    click_link("Log")
+    visit page_with_token("active", "/jobs/#{job['uuid']}\#Log")
 
     # Expect "all" historic log records because we have less than
     # default Rails.configuration.running_job_log_records_to_fetch count
     assert_text 'Historic log message'
 
     # Create new log record and expect it to show up in log tab
-    api.api("logs", "", {log: {
-                object_uuid: job.uuid,
-                event_type: "stderr",
-                properties: {"text" => "Log message after subscription"}}})
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Log message after subscription"})
     assert_text 'Log message after subscription'
   end
 
   test "test running job with too many previous log records" do
-    Rails.configuration.running_job_log_records_to_fetch = 5
-
-    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
-    job = Job.where(uuid: api_fixture("jobs")['running']['uuid']).results.first
-
-    visit page_with_token("admin", "/jobs/#{job.uuid}")
-
-    api = ArvadosApiClient.new
-
-    # Create Rails.configuration.running_job_log_records_to_fetch + 1 log records
-    (0..Rails.configuration.running_job_log_records_to_fetch).each do |count|
-      api.api("logs", "", {log: {
-                object_uuid: job.uuid,
-                event_type: "stderr",
-                properties: {"text" => "Old log message #{count}"}}})
+    max = 5
+    Rails.configuration.running_job_log_records_to_fetch = max
+    job = api_fixture("jobs")['running']
+
+    # Create max+1 log records
+    (0..max).each do |count|
+      dispatch_log(owner_uuid: job['owner_uuid'],
+                   object_uuid: job['uuid'],
+                   event_type: "stderr",
+                   properties: {"text" => "Old log message #{count}"})
     end
 
-    # Go to log tab, which results in subscribing to websockets
-    click_link("Log")
+    visit page_with_token("active", "/jobs/#{job['uuid']}\#Log")
 
     # Expect all but the first historic log records,
     # because that was one too many than fetch count.
-    (1..Rails.configuration.running_job_log_records_to_fetch).each do |count|
+    (1..max).each do |count|
       assert_text "Old log message #{count}"
     end
     assert_no_text 'Old log message 0'
 
     # Create one more log record after subscription
-    api.api("logs", "", {log: {
-                object_uuid: job.uuid,
-                event_type: "stderr",
-                properties: {"text" => "Life goes on!"}}})
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Life goes on!"})
+
     # Expect it to show up in log tab
     assert_text 'Life goes on!'
   end
diff --git a/apps/workbench/test/integration/work_units_test.rb b/apps/workbench/test/integration/work_units_test.rb
new file mode 100644 (file)
index 0000000..eded53e
--- /dev/null
@@ -0,0 +1,208 @@
+require 'helpers/fake_websocket_helper'
+require 'integration_helper'
+
+class WorkUnitsTest < ActionDispatch::IntegrationTest
+  include FakeWebsocketHelper
+
+  setup do
+    need_javascript
+  end
+
+  test "scroll all_processes page" do
+      expected_min, expected_max, expected, not_expected = [
+        25, 100,
+        ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3',
+         '/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+         '/jobs/zzzzz-8i9sb-grx15v5mjnsyxk7',
+         '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+         '/container_requests/zzzzz-xvhdp-cr4completedcr2',
+         '/container_requests/zzzzz-xvhdp-cr4requestercn2'],
+        ['/pipeline_instances/zzzzz-d1hrv-scarxiyajtshq3l',
+         '/container_requests/zzzzz-xvhdp-oneof60crs00001']
+      ]
+
+      visit page_with_token('active', "/all_processes")
+
+      page_scrolls = expected_max/20 + 2
+      within('.arv-recent-all-processes') do
+        (0..page_scrolls).each do |i|
+          page.driver.scroll_to 0, 999000
+          begin
+            wait_for_ajax
+          rescue
+          end
+        end
+      end
+
+      # Verify that expected number of processes are found
+      found_items = page.all('tr[data-object-uuid]')
+      found_count = found_items.count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+          "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+        assert page.has_no_text? 'request failed'
+      else
+        assert_equal(true, found_count>=expected_min,
+          "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+          "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+
+      # verify that all expected uuid links are found
+      expected.each do |link|
+        assert_selector "a[href=\"#{link}\"]"
+      end
+
+      # verify that none of the not_expected uuid links are found
+      not_expected.each do |link|
+        assert_no_selector "a[href=\"#{link}\"]"
+      end
+  end
+
+  [
+    ['jobs', 'running_job_with_components', true],
+    ['pipeline_instances', 'components_is_jobspec', false],
+    ['containers', 'running', false],
+    ['container_requests', 'running', true],
+  ].each do |type, fixture, cancelable|
+    test "cancel button for #{type}/#{fixture}" do
+      if cancelable
+        need_selenium 'to cancel'
+      end
+
+      obj = api_fixture(type)[fixture]
+      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
+
+      assert_text 'created_at'
+      if cancelable
+        assert_selector 'button', text: 'Cancel'
+        click_button 'Cancel'
+        wait_for_ajax
+      end
+      assert_no_selector 'button', text: 'Cancel'
+    end
+  end
+
+  [
+    ['jobs', 'running_job_with_components'],
+    ['pipeline_instances', 'has_component_with_completed_jobs'],
+    ['container_requests', 'running'],
+    ['container_requests', 'completed'],
+  ].each do |type, fixture|
+    test "edit description for #{type}/#{fixture}" do
+      obj = api_fixture(type)[fixture]
+      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
+
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('*Textile description for object*')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+
+      # verify description
+      assert page.has_no_text? '*Textile description for object*'
+      assert page.has_text? 'Textile description for object'
+    end
+  end
+
+  [
+    ['Two Part Pipeline Template', 'part-one', 'Provide a value for the following'],
+    ['Workflow with input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+  ].each do |template_name, preview_txt, process_txt|
+    test "run a process using template #{template_name} from dashboard" do
+      visit page_with_token('admin')
+      assert_text 'Recent pipelines and processes' # seeing dashboard now
+
+      within('.recent-processes-actions') do
+        assert page.has_link?('All processes')
+        find('a', text: 'Run a pipeline').click
+      end
+
+      # in the chooser, verify preview and click Next button
+      within('.modal-dialog') do
+        find('.selectable', text: template_name).click
+        assert_text preview_txt
+        find('.btn', text: 'Next: choose inputs').click
+      end
+
+      # in the process page now
+      assert_text process_txt
+      assert_selector 'a', text: template_name
+    end
+  end
+
+  test 'display container state changes in Container Request live log' do
+    use_fake_websocket_driver
+    c = api_fixture('containers')['queued']
+    cr = api_fixture('container_requests')['queued']
+    visit page_with_token('active', '/container_requests/'+cr['uuid'])
+    click_link('Log')
+
+    # The attrs of the "terminal window" text div in the log tab
+    # indicates which objects' events are worth displaying. Events
+    # that arrive too early (before that div exists) are not
+    # shown. For the user's sake, these early logs should also be
+    # retrieved and shown one way or another -- but in this particular
+    # test, we are only interested in logs that arrive by
+    # websocket. Therefore, to avoid races, we wait for the log tab to
+    # display before sending any events.
+    assert_text 'Recent logs'
+
+    [[{
+        event_type: 'dispatch',
+        properties: {
+          text: "dispatch logged a fake message\n",
+        },
+      }, "dispatch logged"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Locked'},
+          new_attributes: {state: 'Queued'},
+        },
+      }, "Container #{c['uuid']} was returned to the queue"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Queued'},
+          new_attributes: {state: 'Locked'},
+        },
+      }, "Container #{c['uuid']} was taken from the queue by a dispatch process"],
+     [{
+        event_type: 'crunch-run',
+        properties: {
+          text: "according to fake crunch-run,\nsome setup stuff happened on the compute node\n",
+        },
+      }, "setup stuff happened"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Locked'},
+          new_attributes: {state: 'Running'},
+        },
+      }, "Container #{c['uuid']} started"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Running'},
+          new_attributes: {state: 'Complete', exit_code: 1},
+        },
+      }, "Container #{c['uuid']} finished with exit code 1 (failure)"],
+     # It's unrealistic for state to change again once it's Complete,
+     # but the logging code doesn't care, so we do it to keep the test
+     # simple.
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Running'},
+          new_attributes: {state: 'Cancelled'},
+        },
+      }, "Container #{c['uuid']} was cancelled"],
+    ].each do |send_event, expect_log_text|
+      assert_no_text(expect_log_text)
+      fake_websocket_event(send_event.merge(object_uuid: c['uuid']))
+      assert_text(expect_log_text)
+    end
+  end
+end
index 785912d3242e303825fce26975fa38b314ac1e88..f0c811aac20d5fb28720783ee97adc7428624fbd 100644 (file)
@@ -5,10 +5,20 @@ require 'uri'
 require 'yaml'
 
 def available_port for_what
-  Addrinfo.tcp("0.0.0.0", 0).listen do |srv|
-    port = srv.connect_address.ip_port
-    STDERR.puts "Using port #{port} for #{for_what}"
-    return port
+  begin
+    Addrinfo.tcp("0.0.0.0", 0).listen do |srv|
+      port = srv.connect_address.ip_port
+      # Selenium needs an additional locking port, check if it's available
+      # and retry if necessary.
+      if for_what == 'selenium'
+        locking_port = port - 1
+        Addrinfo.tcp("0.0.0.0", locking_port).listen.close
+      end
+      STDERR.puts "Using port #{port} for #{for_what}"
+      return port
+    end
+  rescue Errno::EADDRINUSE, Errno::EACCES
+    retry
   end
 end
 
@@ -34,6 +44,11 @@ Capybara.register_driver :poltergeist_debug do |app|
   Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(inspector: true)
 end
 
+Capybara.register_driver :poltergeist_with_fake_websocket do |app|
+  js = File.expand_path '../support/fake_websocket.js', __FILE__
+  Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(extensions: [js])
+end
+
 Capybara.register_driver :poltergeist_without_file_api do |app|
   js = File.expand_path '../support/remove_file_api.js', __FILE__
   Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(extensions: [js])
index 3adece15c53b8f9fc134fda2e53d1d8d2ae02083..8b51b3fa7de791a23e129005ad51de01c597b769 100644 (file)
@@ -42,7 +42,7 @@ class CollectionsPerfTest < ActionDispatch::IntegrationTest
       visit page_with_token('active', "/collections/#{new_collection.uuid}")
       Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
 
-      assert_text new_collection.uuid
+      assert_selector "input[value=\"#{new_collection.uuid}\"]"
       assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
     end
   end
@@ -62,7 +62,7 @@ class CollectionsPerfTest < ActionDispatch::IntegrationTest
       visit page_with_token('active', "/collections/#{new_collection.uuid}")
       Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
 
-      assert_text new_collection.uuid
+      assert_selector "input[value=\"#{new_collection.uuid}\"]"
       assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
 
       # edit description
diff --git a/apps/workbench/test/support/fake_websocket.js b/apps/workbench/test/support/fake_websocket.js
new file mode 100644 (file)
index 0000000..b10f10f
--- /dev/null
@@ -0,0 +1,15 @@
+sockets = [];
+window.WebSocket = function(url) {
+    sockets.push(this);
+    window.setTimeout(function() {
+        sockets.map(function(s) {
+            s.onopen();
+        });
+        sockets.splice(0);
+    }, 1);
+}
+
+window.WebSocket.prototype.send = function(msg) {
+    // Uncomment for debugging:
+    // console.log("fake WebSocket: send: "+msg);
+}
index 78ef2d21f1a15a106c000710c440984b5a210b16..72b5a89e58a0c8b0c7cd1576d65f92741772963f 100644 (file)
@@ -31,15 +31,17 @@ class ActiveSupport::TestCase
   # Note: You'll currently still have to declare fixtures explicitly
   # in integration tests -- they do not yet inherit this setting
   fixtures :all
-  def use_token token_name
-    was = Thread.current[:arvados_api_token]
+  def use_token(token_name)
+    user_was = Thread.current[:user]
+    token_was = Thread.current[:arvados_api_token]
     auth = api_fixture('api_client_authorizations')[token_name.to_s]
     Thread.current[:arvados_api_token] = auth['api_token']
     if block_given?
       begin
         yield
       ensure
-        Thread.current[:arvados_api_token] = was
+        Thread.current[:user] = user_was
+        Thread.current[:arvados_api_token] = token_was
       end
     end
   end
@@ -81,7 +83,7 @@ module ApiFixtureLoader
         file = file[0, trim_index] if trim_index
         YAML.load(file)
       end
-      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key] }
+      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key].deep_dup }
     end
   end
   def api_fixture(name, *keys)
index c737982fc664db1e9dc5a9408383584c23bad251..550f2188c1a98a6762871abde0c438a9fb505d84 100644 (file)
@@ -12,6 +12,10 @@ class WorkUnitTest < ActiveSupport::TestCase
     [PipelineInstance, 'pipeline_with_tagged_collection_input', "pwu", 1, "Ready", nil, 0.0],
     [Container, 'requester', 'cwu', 1, "Complete", true, 1.0],
     [ContainerRequest, 'cr_for_requester', 'cwu', 1, "Complete", true, 1.0],
+    [ContainerRequest, 'queued', 'cwu', 0, "Queued", nil, 0.0],   # priority 1
+    [ContainerRequest, 'canceled_with_queued_container', 'cwu', 0, "Ready", nil, 0.0],
+    [ContainerRequest, 'canceled_with_locked_container', 'cwu', 0, "Ready", nil, 0.0],
+    [ContainerRequest, 'canceled_with_running_container', 'cwu', 0, "Running", nil, 0.0],
   ].each do |type, fixture, label, num_children, state, success, progress|
     test "children of #{fixture}" do
       use_token 'active'
@@ -35,6 +39,19 @@ class WorkUnitTest < ActiveSupport::TestCase
     end
   end
 
+  [
+    ['cr_for_failed', 'Failed', 33],
+    ['completed', 'Complete', 0],
+  ].each do |cr_fixture, state, exit_code|
+    test "Completed ContainerRequest state = #{state} with exit_code = #{exit_code}" do
+      use_token 'active'
+      obj = find_fixture(ContainerRequest, cr_fixture)
+      wu = obj.work_unit
+      assert_equal state, wu.state_label
+      assert_equal exit_code, wu.exit_code
+    end
+  end
+
   [
     [Job, 'running_job_with_components', 1, 1, nil],
     [Job, 'queued', nil, nil, 1],
@@ -91,4 +108,18 @@ class WorkUnitTest < ActiveSupport::TestCase
       end
     end
   end
+
+  test 'can_cancel?' do
+    use_token 'active' do
+      assert find_fixture(Job, 'running').work_unit.can_cancel?
+      refute find_fixture(Container, 'running').work_unit.can_cancel?
+      assert find_fixture(ContainerRequest, 'running').work_unit.can_cancel?
+    end
+    use_token 'spectator' do
+      refute find_fixture(ContainerRequest, 'running_anonymous_accessible').work_unit.can_cancel?
+    end
+    use_token 'admin' do
+      assert find_fixture(ContainerRequest, 'running_anonymous_accessible').work_unit.can_cancel?
+    end
+  end
 end
diff --git a/build/README b/build/README
new file mode 100644 (file)
index 0000000..4182544
--- /dev/null
@@ -0,0 +1,30 @@
+Scripts in this directory:
+
+run-tests.sh                             Run unit and integration test suite.
+
+run-build-test-packages-one-target.sh    Entry point, wraps
+                                         run-build-packages-one-target.sh to
+                                         perform package building and testing
+                                         inside Docker.
+
+run-build-packages-one-target.sh         Build packages for one target inside Docker.
+
+run-build-packages-all-targets.sh        Run run-build-packages-one-target.sh
+                                         for every target.
+
+run-build-packages.sh                    Actually build packages.  Intended to run
+                                         inside Docker container with proper
+                                         build environment.
+
+run-build-packages-sso.sh                Build single-sign-on server packages.
+
+run-build-packages-python-and-ruby.sh    Build Python and Ruby packages suitable
+                                         for upload to PyPi and Rubygems.
+
+run-build-docker-images.sh               Build arvbox Docker images.
+
+run-build-docker-jobs-image.sh           Build arvados/jobs Docker image.
+
+run-library.sh                           A library of functions shared by the
+                                         various scripts in this
+                                         directory.
\ No newline at end of file
diff --git a/build/go-python-package-scripts/postinst b/build/go-python-package-scripts/postinst
new file mode 100755 (executable)
index 0000000..051c8bd
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+set -e
+
+# NOTE: This package name detection will only work on Debian.
+# If this postinst script ever starts doing work on Red Hat,
+# we'll need to adapt this code accordingly.
+script="$(basename "${0}")"
+pkg="${script%.postinst}"
+systemd_unit="${pkg}.service"
+
+case "${1}" in
+    configure)
+        if [ -d /lib/systemd/system ]
+        then
+            # Python packages put all data files in /usr, so we copy
+            # them to /lib at install time.
+            py_unit="/usr/share/doc/${pkg}/${pkg}.service"
+            if [ -e "${py_unit}" ]
+            then
+                cp "${py_unit}" /lib/systemd/system/
+            fi
+        fi
+
+        if [ -e /run/systemd/system ]; then
+            eval "$(systemctl -p UnitFileState show "${systemd_unit}")"
+            case "${UnitFileState}" in
+                disabled)
+                    # Failing to enable or start the service is not a
+                    # package error, so don't let errors here
+                    # propagate up.
+                    systemctl enable "${systemd_unit}" || true
+                    systemctl start "${systemd_unit}" || true
+                    ;;
+                enabled)
+                    systemctl daemon-reload || true
+                    systemctl reload-or-try-restart "${systemd_unit}" || true
+                    ;;
+            esac
+        fi
+        ;;
+esac
diff --git a/build/go-python-package-scripts/prerm b/build/go-python-package-scripts/prerm
new file mode 100755 (executable)
index 0000000..c6ec18c
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+# NOTE: This package name detection will only work on Debian.
+# If this prerm script ever starts doing work on Red Hat,
+# we'll need to adapt this code accordingly.
+script="$(basename "${0}")"
+pkg="${script%.prerm}"
+systemd_unit="${pkg}.service"
+
+case "${1}" in
+    remove)
+        if [ -e /run/systemd/system ]; then
+            systemctl stop "${systemd_unit}" || true
+            systemctl disable "${systemd_unit}" || true
+        fi
+
+        # Unit files from Python packages get installed by postinst so
+        # we have to remove them explicitly here.
+        py_unit="/usr/share/doc/${pkg}/${pkg}.service"
+        if [ -e "${py_unit}" ]
+        then
+            rm "/lib/systemd/system/${pkg}.service" || true
+        fi
+        ;;
+esac
index 2180b871f7700d0a918f6557269177b2d2ff8217..18694ed1844af6cb2fa87dd5f12b863df6402e7c 100644 (file)
@@ -24,7 +24,7 @@ ubuntu1404/generated: common-generated-all
        test -d ubuntu1404/generated || mkdir ubuntu1404/generated
        cp -rlt ubuntu1404/generated common-generated/*
 
-GOTARBALL=go1.6.2.linux-amd64.tar.gz
+GOTARBALL=go1.7.1.linux-amd64.tar.gz
 
 common-generated-all: common-generated/$(GOTARBALL)
 
index 570dde162c466d3dbdf105316af242e0f00f3710..8ea81f45bd0ceaa17d08a9a7ff90719072b2ff08 100644 (file)
@@ -5,7 +5,7 @@ MAINTAINER Brett Smith <brett@curoverse.com>
 RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install RVM
@@ -26,7 +26,8 @@ RUN scl enable python33 "easy_install-3.3 pip" && scl enable python27 "easy_inst
 RUN ln -s /usr/lib64/liblzma.so.0 /usr/lib64/lzma.so.5
 
 RUN cd /tmp && \
-    curl -OL 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' && \
+    (curl -OLf 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' || \
+     curl -OLf 'http://repoforge.eecs.wsu.edu/redhat/el6/en/x86_64/rpmforge/RPMS/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm') && \
     rpm -ivh rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm && \
     sed -i 's/enabled = 0/enabled = 1/' /etc/yum.repos.d/rpmforge.repo
 
index 311aaa26638ad005efd6116020c0a1c690acf9e4..4fcd640cbbd9859e23ef4b8cd3a7c7dedef2c2f8 100644 (file)
@@ -5,7 +5,7 @@ MAINTAINER Brett Smith <brett@curoverse.com>
 RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install RVM
index ddad5426046f19de1e1c7821b8eb8031184df7e5..7632c944224149e178a13132f8b7365181b343bf 100644 (file)
@@ -13,7 +13,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 ENV WORKSPACE /arvados
index 80f06a224bb4516ce483b39a39829916985014f7..977cd2462cac25ca46c649d0102015bbe43ebc9d 100644 (file)
@@ -13,7 +13,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 ENV WORKSPACE /arvados
index 2f628b0d1f91db8a14fa15bc99e6f7cb9eb30756..b0dd9065a0bf7b892d615c9a9be2f595b1c18163 100644 (file)
@@ -13,7 +13,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 ENV WORKSPACE /arvados
index b9c003ac796613c631dcb070a6fc84a6257e7228..91c5e5b46eb85aab20ae3cf40dd2c5f2518c66e9 100644 (file)
@@ -13,7 +13,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.6.2.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.7.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 ENV WORKSPACE /arvados
index 874820a941793d28f60cb026c93840b39607863c..d38507a2416bbfb38cdb735339b7d5c9a297bcd3 100644 (file)
@@ -14,7 +14,8 @@ RUN touch /var/lib/rpm/* && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 RUN cd /tmp && \
-    curl -OL 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' && \
+    (curl -OLf 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' || \
+     curl -OLf 'http://repoforge.eecs.wsu.edu/redhat/el6/en/x86_64/rpmforge/RPMS/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm') && \
     rpm -ivh rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm && \
     sed -i 's/enabled = 0/enabled = 1/' /etc/yum.repos.d/rpmforge.repo
 
index 0a5841dae23b6e7479437fe5413324199726597e..d03169457e7d3a1a33a535e50deed79b3f398828 100755 (executable)
@@ -125,7 +125,7 @@ timer_reset
 # clean up the docker build environment
 cd "$WORKSPACE"
 
-tools/arvbox/bin/arvbox build dev
+tools/arvbox/bin/arvbox rebuild localdemo
 ECODE=$?
 
 if [[ "$ECODE" != "0" ]]; then
@@ -133,7 +133,7 @@ if [[ "$ECODE" != "0" ]]; then
     EXITCODE=$(($EXITCODE + $ECODE))
 fi
 
-tools/arvbox/bin/arvbox build localdemo
+tools/arvbox/bin/arvbox build dev
 
 ECODE=$?
 
@@ -151,8 +151,8 @@ timer_reset
 if [[ "$ECODE" != "0" ]]; then
     title "upload arvados images SKIPPED because build failed"
 else
-    if [[ $upload == true ]]; then 
-        ## 20150526 nico -- *sometimes* dockerhub needs re-login 
+    if [[ $upload == true ]]; then
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login
         ## even though credentials are already in .dockercfg
         docker login -u arvados
 
index 15f788163ed604ea4592daae668c3dbe9e888923..22f6f54288741d2ed4723e9260234b469fb24591 100755 (executable)
@@ -118,17 +118,11 @@ timer_reset
 
 # clean up the docker build environment
 cd "$WORKSPACE"
-cd docker
-rm -f jobs-image
-rm -f config.yml
-
-# Get test config.yml file
-cp $HOME/docker/config.yml .
-
+cd docker/jobs
 if [[ ! -z "$tags" ]]; then
-  COMMIT=${tags/,*/} ./build.sh jobs-image
+    docker build --build-arg COMMIT=${tags/,*/} -t arvados/jobs .
 else
-  ./build.sh jobs-image
+    docker build -t arvados/jobs .
 fi
 
 ECODE=$?
@@ -147,8 +141,8 @@ timer_reset
 if [[ "$ECODE" != "0" ]]; then
     title "upload arvados images SKIPPED because build failed"
 else
-    if [[ $upload == true ]]; then 
-        ## 20150526 nico -- *sometimes* dockerhub needs re-login 
+    if [[ $upload == true ]]; then
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login
         ## even though credentials are already in .dockercfg
         docker login -u arvados
 
index 759af048a4d58a29806fbceb1f60da62c4a539df..53dcec4c4f3f109490f3896ae353585b19926094 100755 (executable)
@@ -100,30 +100,33 @@ case "$TARGET" in
         FORMAT=deb
         PYTHON_BACKPORTS=(python-gflags==2.0 google-api-python-client==1.4.2 \
             oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
-            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            rsa uritemplate httplib2 ws4py pykka six  \
             ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
             'pycurl<7.21.5' contextlib2 pyyaml 'rdflib>=4.2.0' \
-            shellescape mistune typing avro ruamel.ordereddict)
+            shellescape mistune typing avro ruamel.ordereddict
+            cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
         ;;
     debian8)
         FORMAT=deb
         PYTHON_BACKPORTS=(python-gflags==2.0 google-api-python-client==1.4.2 \
             oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
-            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            rsa uritemplate httplib2 ws4py pykka six  \
             ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
             'pycurl<7.21.5' pyyaml 'rdflib>=4.2.0' \
-            shellescape mistune typing avro ruamel.ordereddict)
+            shellescape mistune typing avro ruamel.ordereddict
+            cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
         ;;
     ubuntu1204)
         FORMAT=deb
         PYTHON_BACKPORTS=(python-gflags==2.0 google-api-python-client==1.4.2 \
             oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
-            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            rsa uritemplate httplib2 ws4py pykka six  \
             ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
             contextlib2 'pycurl<7.21.5' pyyaml 'rdflib>=4.2.0' \
-            shellescape mistune typing avro isodate ruamel.ordereddict)
+            shellescape mistune typing avro isodate ruamel.ordereddict
+            cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
         ;;
     ubuntu1404)
@@ -131,7 +134,8 @@ case "$TARGET" in
         PYTHON_BACKPORTS=(pyasn1==0.1.7 pyasn1-modules==0.0.5 llfuse==0.41.1 ciso8601 \
             google-api-python-client==1.4.2 six uritemplate oauth2client==1.5.2 httplib2 \
             rsa 'pycurl<7.21.5' backports.ssl_match_hostname pyyaml 'rdflib>=4.2.0' \
-            shellescape mistune typing avro ruamel.ordereddict)
+            shellescape mistune typing avro ruamel.ordereddict
+            cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 requests websocket-client)
         ;;
     centos6)
@@ -146,12 +150,12 @@ case "$TARGET" in
         PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/site-packages
         PYTHON_BACKPORTS=(python-gflags==2.0 google-api-python-client==1.4.2 \
             oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
-            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            rsa uritemplate httplib2 ws4py pykka six  \
             ciso8601 pycrypto backports.ssl_match_hostname 'pycurl<7.21.5' \
-            python-daemon lockfile llfuse==0.41.1 'pbr<1.0' pyyaml \
+            python-daemon llfuse==0.41.1 'pbr<1.0' pyyaml \
             'rdflib>=4.2.0' shellescape mistune typing avro requests \
-            isodate pyparsing sparqlwrapper html5lib keepalive \
-            ruamel.ordereddict)
+            isodate pyparsing sparqlwrapper html5lib==0.9999999 keepalive \
+            ruamel.ordereddict cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
         export PYCURL_SSL_LIBRARY=nss
         ;;
@@ -166,12 +170,12 @@ case "$TARGET" in
         PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/site-packages
         PYTHON_BACKPORTS=(python-gflags==2.0 google-api-python-client==1.4.2 \
             oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
-            rsa uritemplate httplib2 ws4py pykka pyexecjs jsonschema \
+            rsa uritemplate httplib2 ws4py pykka  \
             ciso8601 pycrypto 'pycurl<7.21.5' \
             python-daemon llfuse==0.41.1 'pbr<1.0' pyyaml \
             'rdflib>=4.2.0' shellescape mistune typing avro \
-            isodate pyparsing sparqlwrapper html5lib keepalive \
-            ruamel.ordereddict)
+            isodate pyparsing sparqlwrapper html5lib==0.9999999 keepalive \
+            ruamel.ordereddict cachecontrol)
         PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
         export PYCURL_SSL_LIBRARY=nss
         ;;
@@ -444,10 +448,12 @@ cd $WORKSPACE/packages/$TARGET
 rm -rf "$WORKSPACE/sdk/cwl/build"
 fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/cwl/arvados_cwl_runner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados CWL runner" --iteration 3
 
+fpm_build lockfile "" "" python 0.12.2 --epoch 1
+
 # schema_salad. This is a python dependency of arvados-cwl-runner,
 # but we can't use the usual PYTHONPACKAGES way to build this package due to the
-# intricacies of how version numbers get generated in setup.py: we need version
-# 1.7.20160316203940. If we don't explicitly list that version with the -v
+# intricacies of how version numbers get generated in setup.py: we need a specific version,
+# e.g. 1.7.20160316203940. If we don't explicitly list that version with the -v
 # argument to fpm, and instead specify it as schema_salad==1.7.20160316203940, we get
 # a package with version 1.7. That's because our gittagger hack is not being
 # picked up by self.distribution.get_version(), which is called from
@@ -459,14 +465,18 @@ fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curover
 # So we build this thing separately.
 #
 # Ward, 2016-03-17
-fpm_build schema_salad "" "" python 1.12.20160610104117
+fpm_build schema_salad "" "" python 1.18.20160930145650 --depends "${PYTHON2_PKG_PREFIX}-lockfile >= 1:0.12.2-2"
 
 # And schema_salad now depends on ruamel-yaml, which apparently has a braindead setup.py that requires special arguments to build (otherwise, it aborts with 'error: you have to install with "pip install ."'). Sigh.
 # Ward, 2016-05-26
-fpm_build ruamel.yaml "" "" python "" --python-setup-py-arguments "--single-version-externally-managed"
+fpm_build ruamel.yaml "" "" python 0.12.4 --python-setup-py-arguments "--single-version-externally-managed"
+
+# Dependency of cwltool.  Fpm doesn't produce a package with the correct version
+# number unless we build it explicitly
+fpm_build cwltest "" "" python 1.0.20160907111242
 
 # And for cwltool we have the same problem as for schema_salad. Ward, 2016-03-17
-fpm_build cwltool "" "" python 1.0.20160609160402
+fpm_build cwltool "" "" python 1.0.20160930152149
 
 # FPM eats the trailing .0 in the python-rdflib-jsonld package when built with 'rdflib-jsonld>=0.3.0'. Force the version. Ward, 2016-03-25
 fpm_build rdflib-jsonld "" "" python 0.3.0
index 32cf2057f19d3608ab530b68dbf5d4e46053a016..73a99dabd7b3626582a418040e1ce6713a68ada9 100755 (executable)
@@ -100,7 +100,18 @@ package_go_binary() {
 
     cd $WORKSPACE/packages/$TARGET
     go get "git.curoverse.com/arvados.git/$src_path"
-    fpm_build "$GOPATH/bin/$basename=/usr/bin/$prog" "$prog" 'Curoverse, Inc.' dir "$version" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=$description" "$WORKSPACE/$license_file=/usr/share/doc/$prog/$license_file"
+
+    declare -a switches=()
+    systemd_unit="$WORKSPACE/${src_path}/${prog}.service"
+    if [[ -e "${systemd_unit}" ]]; then
+        switches+=(
+            --after-install "${WORKSPACE}/build/go-python-package-scripts/postinst"
+            --before-remove "${WORKSPACE}/build/go-python-package-scripts/prerm"
+            "${systemd_unit}=/lib/systemd/system/${prog}.service")
+    fi
+    switches+=("$WORKSPACE/${license_file}=/usr/share/doc/$prog/${license_file}")
+
+    fpm_build "$GOPATH/bin/${basename}=/usr/bin/${prog}" "${prog}" 'Curoverse, Inc.' dir "${version}" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=${description}" "${switches[@]}"
 }
 
 default_iteration() {
@@ -202,6 +213,7 @@ fpm_build () {
               --python-package-name-prefix "$PYTHON2_PKG_PREFIX" \
               --prefix "$PYTHON2_PREFIX" \
               --python-install-lib "$PYTHON2_INSTALL_LIB" \
+              --python-install-data . \
               --exclude "${PYTHON2_INSTALL_LIB#/}/tests" \
               --depends "$PYTHON2_PACKAGE"
           # Fix --iteration for #9242.
@@ -218,6 +230,7 @@ fpm_build () {
               --python-package-name-prefix "$PYTHON3_PKG_PREFIX" \
               --prefix "$PYTHON3_PREFIX" \
               --python-install-lib "$PYTHON3_INSTALL_LIB" \
+              --python-install-data . \
               --exclude "${PYTHON3_INSTALL_LIB#/}/tests" \
               --depends "$PYTHON3_PACKAGE"
           # Fix --iteration for #9242.
@@ -252,6 +265,14 @@ fpm_build () {
   # that will take precedence, as desired.
   COMMAND_ARR+=(--iteration "$default_iteration_value")
 
+  if [[ python = "$PACKAGE_TYPE" ]] && [[ -e "${PACKAGE}/${PACKAGE_NAME}.service" ]]
+  then
+      COMMAND_ARR+=(
+          --after-install "${WORKSPACE}/build/go-python-package-scripts/postinst"
+          --before-remove "${WORKSPACE}/build/go-python-package-scripts/prerm"
+      )
+  fi
+
   # Append --depends X and other arguments specified by fpm-info.sh in
   # the package source dir. These are added last so they can override
   # the arguments added by this script.
index 30a80f527afabcee038350a963a077268b02aaa2..2797ec31093fc5183289123aa916efcaf051533f 100755 (executable)
@@ -58,7 +58,10 @@ https://arvados.org/projects/arvados/wiki/Running_tests
 
 Available tests:
 
-apps/workbench
+apps/workbench (*)
+apps/workbench_units (*)
+apps/workbench_functionals (*)
+apps/workbench_integration (*)
 apps/workbench_benchmark
 apps/workbench_profile
 doc
@@ -93,6 +96,9 @@ tools/crunchstat-summary
 tools/keep-rsync
 tools/keep-block-check
 
+(*) apps/workbench is shorthand for apps/workbench_units +
+    apps/workbench_functionals + apps/workbench_integration
+
 EOF
 
 # First make sure to remove any ARVADOS_ variables from the calling
@@ -205,7 +211,13 @@ do
             ;;
         --skip)
             skipwhat="$1"; shift
-            skip[$skipwhat]=1
+            if [[ "$skipwhat" == "apps/workbench" ]]; then
+              skip["apps/workbench_units"]=1
+              skip["apps/workbench_functionals"]=1
+              skip["apps/workbench_integration"]=1
+            else
+              skip[$skipwhat]=1
+            fi
             ;;
         --only)
             only="$1"; skip[$1]=""; shift
@@ -342,9 +354,12 @@ setup_ruby_environment() {
         # complaint about not being in first place already.
         rvm use @default 2>/dev/null
 
-        # Create (if needed) and switch to an @arvados-tests
-        # gemset. (Leave the choice of ruby to the caller.)
-        rvm use @arvados-tests --create \
+        # Create (if needed) and switch to an @arvados-tests-* gemset,
+        # salting the gemset name so it doesn't interfere with
+        # concurrent builds in other workspaces. Leave the choice of
+        # ruby to the caller.
+        gemset="arvados-tests-$(echo -n "${WORKSPACE}" | md5sum | head -c16)"
+        rvm use "@${gemset}" --create \
             || fatal 'rvm gemset setup'
 
         rvm env
@@ -397,9 +412,9 @@ setup_virtualenv() {
     fi
     if [[ $("$venvdest/bin/python" --version 2>&1) =~ \ 3\.[012]\. ]]; then
         # pip 8.0.0 dropped support for python 3.2, e.g., debian wheezy
-        "$venvdest/bin/pip" install 'setuptools>=18' 'pip>=7,<8'
+        "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7,<8'
     else
-        "$venvdest/bin/pip" install 'setuptools>=18' 'pip>=7'
+        "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7'
     fi
     # ubuntu1404 can't seem to install mock via tests_require, but it can do this.
     "$venvdest/bin/pip" install 'mock>=1.0' 'pbr<1.7.0'
@@ -486,7 +501,13 @@ do_test() {
 
 do_test_once() {
     unset result
-    if [[ -z "${skip[$1]}" ]] && ( [[ -z "$only" ]] || [[ "$only" == "$1" ]] )
+    to_test=$1
+    if (( [[ "$only" == "apps/workbench" ]] ) &&
+       ( [[ "$to_test" == "apps/workbench_units" ]] || [[ "$to_test" == "apps/workbench_functionals" ]] ||
+         [[ "$to_test" == "apps/workbench_integration" ]])); then
+      to_test="apps/workbench"
+    fi
+    if [[ -z "${skip[$1]}" ]] && ( [[ -z "$only" ]] || [[ "$only" == "$to_test" ]] )
     then
         title "Running $1 tests"
         timer_reset
@@ -499,6 +520,8 @@ do_test_once() {
             # mode makes Go show the wrong line numbers when reporting
             # compilation errors.
             go get -t "git.curoverse.com/arvados.git/$1" || return 1
+            cd "$WORKSPACE/$1" || return 1
+            gofmt -e -d . | egrep . && result=1
             if [[ -n "${testargs[$1]}" ]]
             then
                 # "go test -check.vv giturl" doesn't work, but this
@@ -509,7 +532,7 @@ do_test_once() {
                 # empty, so use this form in such cases:
                 go test ${short:+-short} ${coverflags[@]} "git.curoverse.com/arvados.git/$1"
             fi
-            result="$?"
+            result=${result:-$?}
             if [[ -f "$WORKSPACE/tmp/.$covername.tmp" ]]
             then
                 go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
@@ -517,10 +540,22 @@ do_test_once() {
             fi
         elif [[ "$2" == "pip" ]]
         then
-            # $3 can name a path directory for us to use, including trailing
-            # slash; e.g., the bin/ subdirectory of a virtualenv.
-            cd "$WORKSPACE/$1" \
-                && "${3}python" setup.py ${short:+--short-tests-only} test ${testargs[$1]}
+            tries=0
+            cd "$WORKSPACE/$1" && while :
+            do
+                tries=$((${tries}+1))
+                # $3 can name a path directory for us to use, including trailing
+                # slash; e.g., the bin/ subdirectory of a virtualenv.
+                "${3}python" setup.py ${short:+--short-tests-only} test ${testargs[$1]}
+                result=$?
+                if [[ ${tries} < 3 && ${result} == 137 ]]
+                then
+                    printf '\n*****\n%s tests killed -- retrying\n*****\n\n' "$1"
+                    continue
+                else
+                    break
+                fi
+            done
         elif [[ "$2" != "" ]]
         then
             "test_$2"
@@ -714,6 +749,7 @@ gostuff=(
     sdk/go/manifest
     sdk/go/streamer
     sdk/go/crunchrunner
+    lib/crunchstat
     services/arv-git-httpd
     services/crunchstat
     services/keep-web
@@ -803,12 +839,27 @@ do
     do_test "$g" go
 done
 
-test_workbench() {
+test_workbench_units() {
+    start_nginx_proxy_services \
+        && cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench_units workbench_units
+
+test_workbench_functionals() {
+    start_nginx_proxy_services \
+        && cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench_functionals workbench_functionals
+
+test_workbench_integration() {
     start_nginx_proxy_services \
         && cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]}
 }
-do_test apps/workbench workbench
+do_test apps/workbench_integration workbench_integration
+
 
 test_workbench_benchmark() {
     start_nginx_proxy_services \
index 71ba68d4232699efc540af50b23758a2bf6e6bef..89699f5eaf765aa997a223b2704934c4d5b38da7 100755 (executable)
@@ -12,14 +12,16 @@ import arvados
 import arvados_cwl
 import arvados.collection
 import arvados.util
-from cwltool.process import shortname
 import cwltool.main
 import logging
 import os
 import json
 import argparse
+import re
+import functools
+
 from arvados.api import OrderedJsonModel
-from cwltool.process import adjustFiles
+from cwltool.process import shortname, adjustFileObjs, adjustDirObjs, getListing, normalizeFilesDirs
 from cwltool.load_tool import load_tool
 
 # Print package versions
@@ -30,26 +32,34 @@ api = arvados.api("v1")
 try:
     job_order_object = arvados.current_job()['script_parameters']
 
+    pdh_path = re.compile(r'^[0-9a-f]{32}\+\d+(/.+)?$')
+
     def keeppath(v):
-        if arvados.util.keep_locator_pattern.match(v):
-            return "file://%s/%s" % (os.environ['TASK_KEEPMOUNT'], v)
+        if pdh_path.match(v):
+            return "keep:%s" % v
         else:
             return v
 
-    job_order_object["cwl:tool"] = keeppath(job_order_object["cwl:tool"])
+    def keeppathObj(v):
+        v["location"] = keeppath(v["location"])
+
+    job_order_object["cwl:tool"] = "file://%s/%s" % (os.environ['TASK_KEEPMOUNT'], job_order_object["cwl:tool"])
 
     for k,v in job_order_object.items():
         if isinstance(v, basestring) and arvados.util.keep_locator_pattern.match(v):
             job_order_object[k] = {
                 "class": "File",
-                "path": keeppath(v)
+                "location": "keep:%s" % v
             }
 
-    adjustFiles(job_order_object, keeppath)
+    adjustFileObjs(job_order_object, keeppathObj)
+    adjustDirObjs(job_order_object, keeppathObj)
+    normalizeFilesDirs(job_order_object)
+    adjustDirObjs(job_order_object, functools.partial(getListing, arvados_cwl.fsaccess.CollectionFsAccess("", api_client=api)))
 
-    runner = arvados_cwl.ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()), work_api="jobs")
+    runner = arvados_cwl.ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()))
 
-    t = load_tool(job_order_object, runner.arvMakeTool)
+    t = load_tool(job_order_object, runner.arv_make_tool)
 
     args = argparse.Namespace()
     args.project_uuid = arvados.current_job()["owner_uuid"]
@@ -60,18 +70,19 @@ try:
     args.ignore_docker_for_reuse = False
     args.basedir = os.getcwd()
     args.cwl_runner_job={"uuid": arvados.current_job()["uuid"], "state": arvados.current_job()["state"]}
-    outputObj = runner.arvExecutor(t, job_order_object, **vars(args))
+    outputObj = runner.arv_executor(t, job_order_object, **vars(args))
 
     files = {}
-    def capture(path):
+    def capture(fileobj):
+        path = fileobj["location"]
         sp = path.split("/")
         col = sp[0][5:]
         if col not in files:
             files[col] = set()
         files[col].add("/".join(sp[1:]))
-        return path
+        fileobj["location"] = path
 
-    adjustFiles(outputObj, capture)
+    adjustFileObjs(outputObj, capture)
 
     final = arvados.collection.Collection()
 
@@ -80,10 +91,10 @@ try:
             for f in c:
                 final.copy(f, f, c, True)
 
-    def makeRelative(path):
-        return "/".join(path.split("/")[1:])
+    def makeRelative(fileobj):
+        fileobj["location"] = "/".join(fileobj["location"].split("/")[1:])
 
-    adjustFiles(outputObj, makeRelative)
+    adjustFileObjs(outputObj, makeRelative)
 
     with final.open("cwl.output.json", "w") as f:
         json.dump(outputObj, f, indent=4)
index 4115bc9616521b342a757cac5e44b588a85c58c6..5ed5af5cb58c3757fe36758b5d8bea8fa6d4413c 100644 (file)
@@ -43,13 +43,16 @@ navbar:
       - user/tutorials/tutorial-keep-mount.html.textile.liquid
       - user/topics/keep.html.textile.liquid
       - user/topics/arv-copy.html.textile.liquid
-    - Run a pipeline on the command line:
+    - Using Common Workflow Language:
+      - user/cwl/cwl-runner.html.textile.liquid
+      - user/cwl/cwl-style.html.textile.liquid
+    - Working on the command line:
       - user/topics/running-pipeline-command-line.html.textile.liquid
       - user/topics/arv-run.html.textile.liquid
-    - Working with Arvados Repositories:
+    - Working with git repositories:
       - user/tutorials/add-new-repository.html.textile.liquid
       - user/tutorials/git-arvados-guide.html.textile.liquid
-    - Develop a new pipeline:
+    - Develop an Arvados pipeline:
       - user/tutorials/intro-crunch.html.textile.liquid
       - user/tutorials/running-external-program.html.textile.liquid
       - user/topics/crunch-tools-overview.html.textile.liquid
@@ -104,6 +107,8 @@ navbar:
       - api/methods/api_clients.html.textile.liquid
       - api/methods/authorized_keys.html.textile.liquid
       - api/methods/collections.html.textile.liquid
+      - api/methods/container_requests.html.textile.liquid
+      - api/methods/containers.html.textile.liquid
       - api/methods/groups.html.textile.liquid
       - api/methods/humans.html.textile.liquid
       - api/methods/jobs.html.textile.liquid
@@ -120,11 +125,14 @@ navbar:
       - api/methods/traits.html.textile.liquid
       - api/methods/users.html.textile.liquid
       - api/methods/virtual_machines.html.textile.liquid
+      - api/methods/workflows.html.textile.liquid
     - Schema:
       - api/schema/ApiClientAuthorization.html.textile.liquid
       - api/schema/ApiClient.html.textile.liquid
       - api/schema/AuthorizedKey.html.textile.liquid
       - api/schema/Collection.html.textile.liquid
+      - api/schema/Container.html.textile.liquid
+      - api/schema/ContainerRequest.html.textile.liquid
       - api/schema/Group.html.textile.liquid
       - api/schema/Human.html.textile.liquid
       - api/schema/Job.html.textile.liquid
@@ -141,6 +149,7 @@ navbar:
       - api/schema/Trait.html.textile.liquid
       - api/schema/User.html.textile.liquid
       - api/schema/VirtualMachine.html.textile.liquid
+      - api/schema/Workflow.html.textile.liquid
   installguide:
     - Overview:
       - install/index.html.textile.liquid
@@ -159,6 +168,12 @@ navbar:
       - install/configure-azure-blob-storage.html.textile.liquid
       - install/install-keepproxy.html.textile.liquid
       - install/install-keep-web.html.textile.liquid
+    - Install Crunch v2 on SLURM:
+      - install/crunch2-slurm/install-prerequisites.html.textile.liquid
+      - install/crunch2-slurm/install-compute-node.html.textile.liquid
+      - install/crunch2-slurm/install-dispatch.html.textile.liquid
+      - install/crunch2-slurm/install-test.html.textile.liquid
+    - Install Crunch v1:
       - install/install-crunch-dispatch.html.textile.liquid
       - install/install-compute-node.html.textile.liquid
     - Helpful hints:
diff --git a/doc/_includes/_container_runtime_constraints.liquid b/doc/_includes/_container_runtime_constraints.liquid
new file mode 100644 (file)
index 0000000..fbf4b74
--- /dev/null
@@ -0,0 +1,9 @@
+Runtime constraints
+
+Runtime constraints restrict the container's access to compute resources and the outside world (in addition to its explicitly stated inputs and output).
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
+|ram|integer|Number of ram bytes to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.|
+|vcpus|integer|Number of cores to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.|
+|API|boolean|When set, ARVADOS_API_HOST and ARVADOS_API_TOKEN will be set, and container will have networking enabled to access the Arvados API server.|Optional.|
diff --git a/doc/_includes/_install_compute_docker.liquid b/doc/_includes/_install_compute_docker.liquid
new file mode 100644 (file)
index 0000000..1a2e21c
--- /dev/null
@@ -0,0 +1,45 @@
+h2. Install Docker
+
+Compute nodes must have Docker installed to run containers.  This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported).  Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
+
+For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
+
+h2(#configure_docker_daemon). Configure the Docker daemon
+
+Crunch runs Docker containers with relatively little configuration.  You may need to start the Docker daemon with specific options to make sure these jobs run smoothly in your environment.  This section highlights options that are useful to most installations.  Refer to the "Docker daemon reference":https://docs.docker.com/reference/commandline/daemon/ for complete information about all available options.
+
+The best way to configure these options varies by distribution.
+
+* If you're using our backported @docker.io@ package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker.io@.
+* If you're using another Debian-based package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker@.
+* On Red Hat-based distributions, you can list these options in the @other_args@ setting in @/etc/sysconfig/docker@.
+
+h3. Default ulimits
+
+Docker containers inherit ulimits from the Docker daemon.  However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job.  You may want to increase default limits for compute containers by passing @--default-ulimit@ options to the Docker daemon.  For example, to allow containers to open 10,000 files, set @--default-ulimit nofile=10000:10000@.
+
+h3. DNS
+
+Your containers must be able to resolve the hostname of your API server and any hostnames returned in Keep service records.  If these names are not in public DNS records, you may need to specify a DNS resolver for the containers by setting the @--dns@ address to an IP address of an appropriate nameserver.  You may specify this option more than once to use multiple nameservers.
+
+h2. Configure Linux cgroups accounting
+
+Linux can report what compute resources are used by processes in a specific cgroup or Docker container.  Crunch can use these reports to share that information with users running compute work.  This can help pipeline authors debug and optimize their workflows.
+
+To enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@.
+
+On Debian-based systems, open the file @/etc/default/grub@ in an editor.  Find where the string @GRUB_CMDLINE_LINUX@ is set.  Add @cgroup_enable=memory swapaccount=1@ to that string.  Save the file and exit the editor.  Then run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo update-grub</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems, run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1'</span>
+</code></pre>
+</notextile>
+
+Finally, reboot the system to make these changes effective.
diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
new file mode 100644 (file)
index 0000000..2bf3152
--- /dev/null
@@ -0,0 +1,17 @@
+h2. Configure FUSE
+
+FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
+
+<notextile>
+<pre>
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+</pre>
+</notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
new file mode 100644 (file)
index 0000000..5671a54
--- /dev/null
@@ -0,0 +1,41 @@
+h2. Configure the Docker cleaner
+
+The arvados-docker-cleaner program removes least recently used Docker images as needed to keep disk usage below a configured limit.
+
+{% include 'notebox_begin' %}
+This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or run it with @--remove-stopped-containers never@.
+{% include 'notebox_end' %}
+
+Create a file @/etc/systemd/system/arvados-docker-cleaner.service@ in an editor.  Include the text below as its contents.  Make sure to edit the @ExecStart@ line appropriately for your compute node.
+
+<notextile>
+<pre><code>[Service]
+# Most deployments will want a quota that's at least 10G.  From there,
+# a larger quota can help reduce compute overhead by preventing reloading
+# the same Docker image repeatedly, but will leave less space for other
+# files on the same storage (usually Docker volumes).  Make sure the quota
+# is less than the total space available for Docker images.
+# If your deployment uses a Python 3 Software Collection, uncomment the
+# ExecStart line below, and delete the following one:
+# ExecStart=scl enable python33 "python3 -m arvados_docker.cleaner --quota <span class="userinput">20G</span>"
+ExecStart=python3 -m arvados_docker.cleaner --quota <span class="userinput">20G</span>
+Restart=always
+RestartPreventExitStatus=2
+
+[Install]
+WantedBy=default.target
+
+[Unit]
+After=docker.service
+</code></pre>
+</notextile>
+
+Then enable and start the service:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl enable arvados-docker-cleaner.service</span>
+~$ <span class="userinput">sudo systemctl start arvados-docker-cleaner.service</span>
+</code></pre>
+</notextile>
+
+If you are using a different daemon supervisor, or if you want to test the daemon in a terminal window, use the command on the @ExecStart@ line above.
diff --git a/doc/_includes/_mount_types.liquid b/doc/_includes/_mount_types.liquid
new file mode 100644 (file)
index 0000000..3ad810d
--- /dev/null
@@ -0,0 +1,63 @@
+Mount types
+
+The "mounts" hash is the primary mechanism for adding data to the container at runtime (beyond what is already in the container image).
+
+Each value of the "mounts" hash is itself a hash, whose "kind" key determines the handler used to attach data to the container.
+
+table(table table-bordered table-condensed).
+|_. Mount type|_. Kind|_. Description|_. Examples|
+|Arvados data collection|@collection@|@"portable_data_hash"@ _or_ @"uuid"@ _may_ be provided. If not provided, a new collection will be created. This is useful when @"writable":true@ and the container's @output_path@ is (or is a subdirectory of) this mount target.
+@"writable"@ may be provided with a @true@ or @false@ to indicate the path must (or must not) be writable. If not specified, the system can choose.
+@"path"@ may be provided, and defaults to @"/"@.
+At container startup, the target path will have the same directory structure as the given path within the collection. Even if the files/directories are writable in the container, modifications will _not_ be saved back to the original collections when the container ends.|<pre><code>{
+ "kind":"collection",
+ "uuid":"...",
+ "path":"/foo.txt"
+}
+{
+ "kind":"collection",
+ "uuid":"..."
+}</code></pre>|
+|Git tree|@git_tree@|One of { @"git-url"@, @"repository_name"@, @"uuid"@ } must be provided.
+One of { @"commit"@, @"revisions"@ } must be provided.
+"path" may be provided. The default path is "/".
+At container startup, the target path will have the source tree indicated by the given revision. The @.git@ metadata directory _will not_ be available: typically the system will use @git-archive@ rather than @git-checkout@ to prepare the target directory.
+- If a value is given for @"revisions"@, it will be resolved to a set of commits (as desribed in the "ranges" section of git-revisions(1)) and the container request will be satisfiable by any commit in that set.
+- If a value is given for @"commit"@, it will be resolved to a single commit, and the tree resulting from that commit will be used.
+- @"path"@ can be used to select a subdirectory or a single file from the tree indicated by the selected commit.
+- Multiple commits can resolve to the same tree: for example, the file/directory given in @"path"@ might not have changed between commits A and B.
+- The resolved mount (found in the Container record) will have only the "kind" key and a "blob" or "tree" key indicating the 40-character hash of the git tree/blob used.|<pre><code>{
+ "kind":"git_tree",
+ "uuid":"zzzzz-s0uqq-xxxxxxxxxxxxxxx",
+ "commit":"master"
+}
+{
+ "kind":"git_tree",
+ "uuid":"zzzzz-s0uqq-xxxxxxxxxxxxxxx",
+ "commit_range":"bugfix^..master",
+ "path":"/crunch_scripts/grep"
+}</code></pre>|
+|Temporary directory|@tmp@|@"capacity"@: capacity (in bytes) of the storage device.
+@"device_type"@ (optional, default "network"): one of @{"ram", "ssd", "disk", "network"}@ indicating the acceptable level of performance.
+At container startup, the target path will be empty. When the container finishes, the content will be discarded. This will be backed by a storage mechanism no slower than the specified type.|<pre><code>{
+ "kind":"tmp",
+ "capacity":100000000000
+}
+{
+ "kind":"tmp",
+ "capacity":1000000000,
+ "device_type":"ram"
+}</code></pre>|
+|Keep|@keep@|Expose all readable collections via arv-mount.
+Requires suitable runtime constraints.|<pre><code>{
+ "kind":"keep"
+}</code></pre>|
+|Mounted file or directory|@file@|@"path"@: absolute path (inside the container) of a file or directory that is (or is inside) another mount target.
+Can be used for "stdin" and "stdout" targets.|<pre><code>{
+ "kind":"file",
+ "path":"/mounted_tmp/a.out"
+}</code></pre>|
+|JSON document|@json@|A JSON-encoded string, array, or object.|<pre>{
+ "kind":"json",
+ "content":{"foo":"bar"}
+}</pre>|
diff --git a/doc/_includes/_pipeline_deprecation_notice.liquid b/doc/_includes/_pipeline_deprecation_notice.liquid
new file mode 100644 (file)
index 0000000..682511f
--- /dev/null
@@ -0,0 +1,3 @@
+{% include 'notebox_begin' %}
+Arvados pipeline templates are deprecated.  The recommended way to develop new workflows for Arvados is using the "Common Workflow Language":{{site.baseurl}}/user/cwl/cwl-runner.html.
+{% include 'notebox_end' %}
diff --git a/doc/api/methods/container_requests.html.textile.liquid b/doc/api/methods/container_requests.html.textile.liquid
new file mode 100644 (file)
index 0000000..2603079
--- /dev/null
@@ -0,0 +1,75 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "container_requests"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/container_requests@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2(#create). create
+
+Create a new ContainerRequest.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|container_request|object|See "ContainerRequest resource":{{site.baseurl}}/api/schema/ContainerRequest.html|request body||
+
+The request body must include the required attributes command, container_image, cwd, and output_path. It can also inlcude other attributes such as environment, mounts, and runtime_constraints.
+
+h2. delete
+
+Delete an existing ContainerRequest.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ContainerRequest in question.|path||
+
+h2. get
+
+Get a ContainerRequest's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ContainerRequest in question.|path||
+
+h2. list
+
+List container_requests.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of container_requests to return.|query||
+|order|string|Order in which to return matching container_requests.|query||
+|filters|array|Conditions for filtering container_requests.|query||
+
+See the create method documentation for more information about ContainerRequest-specific filters.
+
+h2. update
+
+Update attributes of an existing ContainerRequest.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ContainerRequest in question.|path||
+|container_request|object||query||
+
+{% include 'notebox_begin' %}
+Setting the priority of a committed container_request to 0 may cancel a running container assigned for it.
+See "Canceling a ContainerRequest":{{site.baseurl}}/api/schema/ContainerRequest.html#cancel_container for further details.
+{% include 'notebox_end' %}
diff --git a/doc/api/methods/containers.html.textile.liquid b/doc/api/methods/containers.html.textile.liquid
new file mode 100644 (file)
index 0000000..c39b092
--- /dev/null
@@ -0,0 +1,76 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "containers"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/containers@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2(#create). create
+
+Create a new Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|container|object|See "Container resource":{{site.baseurl}}/api/schema/Container.html|request body||
+
+h2. delete
+
+Delete an existing Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+
+h2. get
+
+Get a Container's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+
+h2. list
+
+List containers.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of containers to return.|query||
+|order|string|Order in which to return matching containers.|query||
+|filters|array|Conditions for filtering containers.|query||
+
+See the create method documentation for more information about Container-specific filters.
+
+h2. update
+
+Update attributes of an existing Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+|container|object||query||
+
+h2. auth
+
+Get the api_client_authorization record indicated by this container's auth_uuid, which belongs to the container's locked_by_uuid.
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
index 9f20a88a9519d09eb5d7fe040c93706379bc089d..cd9633db427aa1807d4a600f6533225f543e4b34 100644 (file)
@@ -29,6 +29,8 @@ table(table table-bordered table-condensed).
 
 Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections.  If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
 
+Note: Use filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @["pipeline_instances.state", "=", "Complete"]@ to filter @pipeline_instances@ where @state@ is @Complete@. All other types of items owned by this group will be unimpacted by this filter and will still be included.
+
 h2. create
 
 Create a new Group.
diff --git a/doc/api/methods/workflows.html.textile.liquid b/doc/api/methods/workflows.html.textile.liquid
new file mode 100644 (file)
index 0000000..95be013
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "workflows"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/workflows@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|workflow|object|See "Workflow resource":{{site.baseurl}}/api/schema/Workflow.html|request body||
+
+h2. delete
+
+Delete an existing Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+
+h2. get
+
+Get a Workflow's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+
+h2. list
+
+List workflows.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of workflows to return.|query||
+|order|string|Order in which to return matching workflows.|query||
+|filters|array|Conditions for filtering workflows.|query||
+
+h2. update
+
+Update attributes of an existing Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+|workflow|object||query||
diff --git a/doc/api/schema/Container.html.textile.liquid b/doc/api/schema/Container.html.textile.liquid
new file mode 100644 (file)
index 0000000..d29d420
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Container
+
+...
+
+A Container:
+* Precisely describes the environment in which a Crunch2 process should run. For example, git trees, data collections, and docker images are stored as content addresses. This makes it possible to reason about the difference between two processes, and to replay a process at a different time and place.
+* Container records are created by the system to fulfill container requests.
+
+h2. Methods
+
+See "containers":{{site.baseurl}}/api/methods/containers.html
+
+h2. Resource
+
+Each Container offers the following attributes, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|state|string|The allowed states are "Queued", "Locked", "Running", "Cancelled" and "Complete".|See "Container states":#container_states for more details.|
+|started_at|datetime|When this container started running.|Null if container has not yet started.|
+|finished_at|datetime|When this container finished.|Null if container has not yet finished.|
+|log|string|Portable data hash of the collection containing logs from a completed container run.|Null if the container is not yet finished.|
+|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|Must be equal to a ContainerRequest's environment in order to satisfy the ContainerRequest.|
+|cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest|
+|command|array of strings|Command to execute.| Must be equal to a ContainerRequest's command in order to satisfy the ContainerRequest.|
+|output_path|string|Path to a directory or file inside the container that should be preserved as this container's output when it finishes.|Must be equal to a ContainerRequest's output_path in order to satisfy the ContainerRequest.|
+|mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|See "Mount types":#mount_types for more details.|
+|runtime_constraints|hash|Compute resources, and access to the outside world, that are / were available to the container.
+Generally this will contain additional keys that are not present in any corresponding ContainerRequests: for example, even if no ContainerRequests specified constraints on the number of CPU cores, the number of cores actually used will be recorded here.|e.g.,
+<pre><code>{
+  "ram":12000000000,
+  "vcpus":2,
+  "API":true
+}</code></pre>See "Runtime constraints":#runtime_constraints for more details.|
+|output|string|Portable data hash of the output collection.|Null if the container is not yet finished.|
+|container_image|string|Portable data hash of a collection containing the docker image used to run the container.||
+|progress|number|A number between 0.0 and 1.0 describing the fraction of work done.||
+|priority|integer|Priority assigned by the system, taking into account the priorities of all associated ContainerRequests.||
+|exit_code|integer|Process exit code.|Null if state!="Complete"|
+|auth_uuid|string|UUID of a token to be passed into the container itself, used to access Keep-backed mounts, etc.|Null if state∉{"Locked","Running"}|
+|locked_by_uuid|string|UUID of a token, indicating which dispatch process changed state to Locked. If null, any token can be used to lock. If not null, only the indicated token can modify this container.|Null if state∉{"Locked","Running"}|
+
+h2(#container_states). Container states
+
+table(table table-bordered table-condensed).
+|_. State|_. Sgnificance|_. Allowed next|
+|Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled|
+|Locked|A dispatcher has "taken" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled|
+|Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled|
+|Complete|Container was running, and the contained process/command has exited.|-|
+|Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-|
+
+h2(#mount_types). {% include 'mount_types' %}
+
+h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
diff --git a/doc/api/schema/ContainerRequest.html.textile.liquid b/doc/api/schema/ContainerRequest.html.textile.liquid
new file mode 100644 (file)
index 0000000..48c624a
--- /dev/null
@@ -0,0 +1,70 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: ContainerRequest
+
+...
+
+A ContainerRequest:
+* Is a client's expression of interest in knowing the outcome of a computational process.
+* The system is responsible for finding suitable containers and assigning them to container_requests.
+* The client's description of the ContainerRequest is less precise than of a Container: a ContainerRequest describes container constraints which can have different interpretations over time. For example, a ContainerRequest with a {"kind":"git_tree","commit_range":"abc123..master",...} mount might be satisfiable by any of several different source trees, and this set of satisfying source trees can change when the repository's "master" branch is updated.
+
+h2. Methods
+
+See "container_requests":{{site.baseurl}}/api/methods/container_requests.html
+
+h2. Resource
+
+Each ContainerRequest offers the following attributes, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+All attributes are optional, unless otherwise marked as required.
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|name|string|The name of the container_request.||
+|description|string|The description of the container_request.||
+|properties|hash|Client-defined structured data that does not affect how the container is run.||
+|state|string|The allowed states are "Uncommitted", "Committed", and "Final".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the "Final" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).|
+|requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.|
+|container_uuid|string|The uuid of the container that satisfies this container_request. The system will find and reuse any preexisting Container that matches this ContainerRequest's criteria. See "Container reuse":#container_reuse for more details.|Currently, container reuse is the default behavior and a mechanism to skip reuse is not supported.|
+|container_count_max|integer|Maximum number of containers to start, i.e., the maximum number of "attempts" to be made.||
+|mounts|hash|Objects to attach to the container's filesystem and stdin/stdout.|See "Mount types":#mount_types for more details.|
+|runtime_constraints|hash|Restrict the container's access to compute resources and the outside world.|Required when in "Committed" state. e.g.,<pre><code>{
+  "ram":12000000000,
+  "vcpus":2,
+  "API":true
+}</code></pre>See "Runtime constraints":#runtime_constraints for more details.|
+|container_image|string|Portable data hash of a collection containing the docker image to run the container.|Required.|
+|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.||
+|cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Required.|
+|command|array of strings|Command to execute in the container.|Required. e.g., @["echo","hello"]@|
+|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be, or be inside, one of the mount targets. For best performance, point output_path to a writable collection mount.|Required.|
+|priority|integer|Higher value means spend more resources on this container_request, i.e., go ahead of other queued containers, bring up more nodes etc.|Priority 0 means a container should not be run on behalf of this request. Clients are expected to submit ContainerRequests with zero priority in order to prevew the container that will be used to satisfy it. Priority can be null if and only if state!="Committed".|
+|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
+|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.||
+
+h2(#mount_types). {% include 'mount_types' %}
+
+h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
+
+h2(#container_reuse). Container reuse
+
+When a ContainerRequest is "Committed", the system will try to find and reuse any preexisting Container with the same exact command, cwd, environment, output_path, container_image, mounts, and runtime_constraints as this ContainerRequest. The serialized fields environment, mounts and runtime_constraints are sorted to facilitate comparison.
+
+The system will use the following scheme to determine which Container to consider for reuse: A Container with the same exact command, cwd, environment, output_path, container_image, mounts, and runtime_constraints as this ContainerRequest and,
+* The oldest successfully finished container, i.e., in state "Complete" with exit_code of 0. If matching containers with different outputs are found, the system will forgo reusing any of these finished containers and instead look for suitable containers in other states
+* The oldest "Running" container with the highest progress, i.e., the container that is most likely to finish first
+* The oldest "Locked" container with the highest priority, i.e., the container that is most likely to start first
+* The oldest "Queued" container with the highest priority, i.e, the container that is most likely to start first
+
+{% include 'notebox_begin' %}
+Currently, container reuse is the default behavior and a mechanism to skip reuse is not supported.
+{% include 'notebox_end' %}
+
+h2(#cancel_container). Canceling a ContainerRequest
+
+A ContainerRequest may be canceled by setting it's priority to 0, using an update call.
+
+When a ContainerRequest is canceled, it will still reflect the state of the Container it is associated with via the container_uuid attribute. If that Container is being reused by any other container_requests that are still active, i.e., not yet canceled, that Container may continue to run or be scheduled to run by the system in future. However, if no other container_requests are using that Contianer, then the Container will get canceled as well.
diff --git a/doc/api/schema/Workflow.html.textile.liquid b/doc/api/schema/Workflow.html.textile.liquid
new file mode 100644 (file)
index 0000000..05cd998
--- /dev/null
@@ -0,0 +1,23 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Workflow
+
+...
+
+A *Workflow* is a definition of work to be performed by a Crunch2 process. It defines the steps and inputs for the process.
+
+h2. Methods
+
+See "workflows":{{site.baseurl}}/api/methods/workflows.html
+
+h2. Resource
+
+Each Workflow offers the following optional attributes, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|If not specified, will be set to any "name" from the "definition" attribute.||
+|description|string|If not specified, will be set to any "description" from the "definition" attribute.||
+|definition|string|A "Common Workflow Language" document.|Visit "Common Workflow Language":http://www.commonwl.org/ for details.|
index 96a838909096e01a393228c3d3b697a144f03ca0..9a80fd7531ba0fc50479e4200b4b77974d5bca21 100644 (file)
@@ -67,3 +67,13 @@ Here we create a default project for the standard Arvados Docker images, and giv
 }
 EOF</span>
 </code></pre></notextile>
+
+h3. Download and tag the latest arvados/jobs docker image
+
+The @arvados-cwl-runner@ needs access to an arvados/jobs image that is tagged as 'latest'. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-keepdocker --pull arvados/jobs latest</span>
+</code></pre></notextile>
+
+If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
diff --git a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid b/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
new file mode 100644 (file)
index 0000000..19f8662
--- /dev/null
@@ -0,0 +1,39 @@
+---
+layout: default
+navsection: installguide
+title: Set up a compute node
+...
+
+h2. Install dependencies
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% include 'note_python_sc' %}
+
+On CentOS 6 and RHEL 6:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install python27-python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+On other Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+{% include 'install_compute_docker' %}
+
+{% include 'install_compute_fuse' %}
+
+{% include 'install_docker_cleaner' %}
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
new file mode 100644 (file)
index 0000000..1d07873
--- /dev/null
@@ -0,0 +1,111 @@
+---
+layout: default
+navsection: installguide
+title: Install the SLURM dispatcher
+
+...
+
+The SLURM dispatcher can run on any node that can submit requests to both the Arvados API server and the SLURM controller.  It is not resource-intensive, so you can run it on the API server node.
+
+h2. Install the dispatcher
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install crunch-dispatch-slurm</span>
+~$ <span class="userinput">sudo systemctl enable crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+h2. Create a dispatcher token
+
+Create a privileged Arvados API token for use by the dispatcher. If you have multiple dispatch processes, you should give each one a different token.  *On the API server*, run:
+
+<notextile>
+<pre><code>apiserver:~$ <span class="userinput">cd /var/www/arvados-api/current</span>
+apiserver:/var/www/arvados-api/current$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production bundle exec script/create_superuser_token.rb</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre>
+</notextile>
+
+h2. Configure the dispatcher
+
+Set up crunch-dispatch-slurm's configuration directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
+~$ <span class="userinput">sudo install -d -o -root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authenticate to your Arvados API server, using the token you generated in the previous step.  Follow this YAML format:
+
+<notextile>
+<pre><code class="userinput">Client:
+  APIHost: <b>zzzzz.arvadosapi.com</b>
+  AuthToken: <b>zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</b>
+</code></pre>
+</notextile>
+
+This is the only configuration required by crunch-dispatch-slurm.  The subsections below describe optional configuration flags you can set inside the main configuration object.
+
+h3. PollPeriod
+
+crunch-dispatch-slurm polls the API server periodically for new containers to run.  The @PollPeriod@ option controls how often this poll happens.  Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@.  For example:
+
+<notextile>
+<pre><code class="userinput">"PollPeriod": "3m30s"
+</code></pre>
+</notextile>
+
+h3. SbatchArguments
+
+When crunch-dispatch-slurm invokes @sbatch@, you can add switches to the command by specifying @SbatchArguments@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @SbatchArguments@ to an array of strings.  For example:
+
+<notextile>
+<pre><code class="userinput">"SbatchArguments": ["--partition=PartitionName"]
+</code></pre>
+</notextile>
+
+h3. CrunchRunCommand: Dispatch to SLURM cgroups
+
+If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups.  This provides consistent enforcement of resource constraints.  To do this, add the following to your crunch-dispatch-slurm configuration:
+
+<notextile>
+<pre><code class="userinput">"CrunchRunCommand": ["crunch-run", "-cgroup-parent-subsystem=<b>memory</b>"]
+</code></pre>
+</notextile>
+
+The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected.  The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM.
+
+{% include 'notebox_begin' %}
+
+Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice.  This causes an error when specifying a cgroup parent created outside systemd, such as those created by SLURM.
+
+You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use SLURM's cgroups as container parents.  To do this, "configure the Docker daemon on all compute nodes":install-compute-node.html#configure_docker_daemon to run with the option @--exec-opt native.cgroupdriver=cgroupfs@.
+
+{% include 'notebox_end' %}
+
+h2. Restart the dispatcher
+
+{% include 'notebox_begin' %}
+
+The crunch-dispatch-slurm package includes configuration files for systemd.  If you're using a different init system, you'll need to configure a service to start and stop a @crunch-dispatch-slurm@ process as desired.  The process should run from a directory where the @crunch@ user has write permission on all compute nodes, such as its home directory or @/tmp@.  You do not need to specify any additional switches or environment variables.
+
+{% include 'notebox_end' %}
+
+Restart the dispatcher to run with your new configuration:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid b/doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid
new file mode 100644 (file)
index 0000000..c4dc929
--- /dev/null
@@ -0,0 +1,9 @@
+---
+layout: default
+navsection: installguide
+title: Crunch v2 SLURM prerequisites
+...
+
+Crunch v2 containers can be dispatched to a SLURM cluster.  The dispatcher sends work to the cluster using SLURM's @sbatch@ command, so it works in a variety of SLURM configurations.
+
+In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
new file mode 100644 (file)
index 0000000..d51cfce
--- /dev/null
@@ -0,0 +1,109 @@
+---
+layout: default
+navsection: installguide
+title: Test SLURM dispatch
+...
+
+h2. Test compute node setup
+
+You should now be able to submit SLURM jobs that run in Docker containers.  On the node where you're running the dispatcher, you can test this by running:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo -u <b>crunch</b> srun -N1 docker run busybox echo OK
+</code></pre>
+</notextile>
+
+If it works, this command should print @OK@ (it may also show some status messages from SLURM and/or Docker).  If it does not print @OK@, double-check your compute node setup, and that the @crunch@ user can submit SLURM jobs.
+
+h2. Test the dispatcher
+
+On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo journalctl -o cat -fu crunch-dispatch-slurm.service</span>
+</code></pre>
+</notextile>
+
+*On your shell server*, submit a simple container request:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv container_request create --container-request '{
+  "name":            "test",
+  "state":           "Committed",
+  "priority":        1,
+  "container_image": "arvados/jobs:latest",
+  "command":         ["echo", "Hello, Crunch!"],
+  "output_path":     "/out",
+  "mounts": {
+    "/out": {
+      "kind":        "tmp",
+      "capacity":    1000
+    }
+  },
+  "runtime_constraints": {
+    "vcpus": 1,
+    "ram": 8388608
+  }
+}'</span>
+</code></pre>
+</notextile>
+
+This command should return a record with a @container_uuid@ field.  Once crunch-dispatch-slurm polls the API server for new containers to run, you should see it dispatch that same container.  It will log messages like:
+
+<notextile>
+<pre><code>2016/08/05 13:52:54 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 started
+2016/08/05 13:53:04 About to submit queued container zzzzz-dz642-hdp2vpu9nq14tx0
+2016/08/05 13:53:04 sbatch succeeded: Submitted batch job 8102
+</code></pre>
+</notextile>
+
+If you do not see crunch-dispatch-slurm try to dispatch the container, double-check that it is running and that the API hostname and token in @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ are correct.
+
+Before the container finishes, SLURM's @squeue@ command will show the new job in the list of queued and running jobs.  For example, you might see:
+
+<notextile>
+<pre><code>~$ <span class="userinput">squeue --long</span>
+Fri Aug  5 13:57:50 2016
+  JOBID PARTITION     NAME     USER    STATE       TIME TIMELIMIT  NODES NODELIST(REASON)
+   8103   compute zzzzz-dz   crunch  RUNNING       1:56 UNLIMITED      1 compute0
+</code></pre>
+</notextile>
+
+The job's name corresponds to the container's UUID.  You can get more information about it by running, e.g., <notextile><code>scontrol show job Name=<b>UUID</b></code></notextile>.
+
+When the container finishes, the dispatcher will log that, with the final result:
+
+<notextile>
+<pre><code>2016/08/05 13:53:14 Container zzzzz-dz642-hdp2vpu9nq14tx0 now in state "Complete" with locked_by_uuid ""
+2016/08/05 13:53:14 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 finished
+</code></pre>
+</notextile>
+
+After the container finishes, you can get the container record by UUID *from a shell server* to see its results:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv get <b>zzzzz-dz642-hdp2vpu9nq14tx0</b></span>
+{
+ ...
+ "exit_code":0,
+ "log":"a01df2f7e5bc1c2ad59c60a837e90dc6+166",
+ "output":"d41d8cd98f00b204e9800998ecf8427e+0",
+ "state":"Complete",
+ ...
+}
+</code></pre>
+</notextile>
+
+You can use standard Keep tools to view the container's output and logs from their corresponding fields.  For example, to see the logs from the collection referenced in the @log@ field:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b></span>
+./crunch-run.txt
+./stderr.txt
+./stdout.txt
+~$ <span class="userinput">arv keep get <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b>/stdout.txt</span>
+2016-08-05T13:53:06.201011Z Hello, Crunch!
+</code></pre>
+</notextile>
+
+If the container does not dispatch successfully, refer to the crunch-dispatch-slurm logs for information about why it failed.
index f55bceb561555bf845254fa2c7e9a9a0d6a99fc3..b4d0d596f275d83401d1ba36d018c9e376f9665e 100644 (file)
@@ -32,29 +32,7 @@ On Debian-based systems:
 </code></pre>
 </notextile>
 
-h2. Install Docker
-
-Compute nodes must have Docker installed to run jobs inside containers.  This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported).  Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
-
-For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
-
-h2. Configure Docker
-
-Crunch runs jobs in Docker containers with relatively little configuration.  You may need to start the Docker daemon with specific options to make sure these jobs run smoothly in your environment.  This section highlights options that are useful to most installations.  Refer to the "Docker daemon reference":https://docs.docker.com/reference/commandline/daemon/ for complete information about all available options.
-
-The best way to configure these options varies by distribution.
-
-* If you're using our backported @docker.io@ package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker.io@.
-* If you're using another Debian-based package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker@.
-* On Red Hat-based distributions, you can list these options in the @other_args@ setting in @/etc/sysconfig/docker@.
-
-h3. Default ulimits
-
-Docker containers inherit ulimits from the Docker daemon.  However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job.  You may want to increase default limits for compute jobs by passing @--default-ulimit@ options to the Docker daemon.  For example, to allow jobs to open 10,000 files, set @--default-ulimit nofile=10000:10000@.
-
-h3. DNS
-
-Your containers must be able to resolve the hostname in the ARVADOS_API_HOST environment variable (provided by the Crunch dispatcher) and any hostnames returned in Keep service records.  If these names are not in public DNS records, you may need to set a DNS resolver for the containers by specifying the @--dns@ address with the IP address of an appropriate nameserver.  You may specify this option more than once to use multiple nameservers.
+{% include 'install_compute_docker' %}
 
 h2. Set up SLURM
 
@@ -64,63 +42,9 @@ h2. Copy configuration files from the dispatcher (API server)
 
 The @slurm.conf@ and @/etc/munge/munge.key@ files need to be identical across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":install-crunch-dispatch.html step to this compute node.
 
-h2. Configure FUSE
-
-Install this file as @/etc/fuse.conf@:
-
-<notextile>
-<pre>
-# Set the maximum number of FUSE mounts allowed to non-root users.
-# The default is 1000.
-#
-#mount_max = 1000
-
-# Allow non-root users to specify the 'allow_other' or 'allow_root'
-# mount options.
-#
-user_allow_other
-</pre>
-</notextile>
+{% include 'install_compute_fuse' %}
 
-h2. Configure the Docker cleaner
-
-The arvados-docker-cleaner program removes least recently used docker images as needed to keep disk usage below a configured limit.
-
-{% include 'notebox_begin' %}
-This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or run it with @--remove-stopped-containers never@.
-{% include 'notebox_end' %}
-
-Install runit to supervise the Docker cleaner daemon.  {% include 'install_runit' %}
-
-Configure runit to run the image cleaner using a suitable quota for your compute nodes and workload:
-
-<notextile>
-<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/sv</span>
-~$ <span class="userinput">cd /etc/sv</span>
-/etc/sv$ <span class="userinput">sudo mkdir arvados-docker-cleaner; cd arvados-docker-cleaner</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo mkdir log log/main</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo sh -c 'cat &gt;log/run' &lt;&lt;'EOF'
-#!/bin/sh
-exec svlogd -tt main
-EOF</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo sh -c 'cat &gt;run' &lt;&lt;'EOF'
-#!/bin/sh
-if [ -d /opt/rh/python33 ]; then
-  source scl_source enable python33
-fi
-exec python3 -m arvados_docker.cleaner --quota <b>50G</b>
-EOF</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo chmod +x run log/run</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo ln -s "$(pwd)" /etc/service/</span>
-</code></pre>
-</notextile>
-
-If you are using a different daemon supervisor, or if you want to test the daemon in a terminal window, an equivalent shell command to run arvados-docker-cleaner is:
-
-<notextile>
-<pre><code><span class="userinput">python3 -m arvados_docker.cleaner --quota <b>50G</b></span>
-</code></pre>
-</notextile>
+{% include 'install_docker_cleaner' %}
 
 h2. Add a Crunch user account
 
index 6548422f4f8d0492cfac61a25257c365f238bcde..102a3f470ee661d076b14b7c209eacdee525e415 100644 (file)
@@ -35,28 +35,66 @@ Verify that Keepstore is functional:
 
 <notextile>
 <pre><code>~$ <span class="userinput">keepstore -h</span>
-2015/05/08 13:41:16 keepstore starting, pid 2565
+2016/07/01 14:06:21 keepstore starting, pid 32339
 Usage of ./keepstore:
-  -azure-storage-account-key-file="": File containing the account key used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-account-name="": Azure storage account name used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-container-volume=[]: Use the given container as a storage volume. Can be given multiple times.
-  -azure-storage-replication=3: Replication level to report to clients when data is stored in an Azure container.
-  -blob-signature-ttl=1209600: Lifetime of blob permission signatures. Modifying the ttl will invalidate all existing signatures. See services/api/config/application.default.yml.
-  -blob-signing-key-file="": File containing the secret key for generating and verifying blob permission signatures.
-  -data-manager-token-file="": File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
-  -enforce-permissions=false: Enforce permission signatures on requests.
-  -listen=":25107": Listening address, in the form "host:port". e.g., 10.0.1.24:8000. Omit the host part to listen on all interfaces.
-  -max-buffers=128: Maximum RAM to use for data buffers, given in multiples of block size (64 MiB). When this limit is reached, HTTP requests requiring buffers (like GET and PUT) will wait for buffer space to be released.
+  -azure-max-get-bytes int
+       Maximum bytes to request in a single GET request. If smaller than 67108864, use multiple concurrent range requests to retrieve a block. (default 67108864)
+  -azure-storage-account-key-file string
+       File containing the account key used for subsequent --azure-storage-container-volume arguments.
+  -azure-storage-account-name string
+       Azure storage account name used for subsequent --azure-storage-container-volume arguments.
+  -azure-storage-container-volume value
+       Use the given container as a storage volume. Can be given multiple times. (default [])
+  -azure-storage-replication int
+       Replication level to report to clients when data is stored in an Azure container. (default 3)
+  -blob-signature-ttl int
+       Lifetime of blob permission signatures in seconds. Modifying the ttl will invalidate all existing signatures. See services/api/config/application.default.yml. (default 1209600)
+  -blob-signing-key-file string
+       File containing the secret key for generating and verifying blob permission signatures.
+  -data-manager-token-file string
+       File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
+  -enforce-permissions
+       Enforce permission signatures on requests.
+  -listen string
+       Listening address, in the form "host:port". e.g., 10.0.1.24:8000. Omit the host part to listen on all interfaces. (default ":25107")
+  -max-buffers int
+       Maximum RAM to use for data buffers, given in multiples of block size (64 MiB). When this limit is reached, HTTP requests requiring buffers (like GET and PUT) will wait for buffer space to be released. (default 128)
   -max-requests int
-   Maximum concurrent requests. When this limit is reached, new requests will receive 503 responses. Note: this limit does not include idle connections from clients using HTTP keepalive, so it does not strictly limit the number of concurrent connections. (default 2 * max-buffers)
-  -never-delete=false: If set, nothing will be deleted. HTTP 405 will be returned for valid DELETE requests.
-  -permission-key-file="": Synonym for -blob-signing-key-file.
-  -permission-ttl=0: Synonym for -blob-signature-ttl.
-  -pid="": Path to write pid file during startup. This file is kept open and locked with LOCK_EX until keepstore exits, so `fuser -k pidfile` is one way to shut down. Exit immediately if there is an error opening, locking, or writing the pid file.
-  -readonly=false: Do not write, delete, or touch anything on the following volumes.
-  -serialize=false: Serialize read and write operations on the following volumes.
-  -volume=[]: Local storage directory. Can be given more than once to add multiple directories. If none are supplied, the default is to use all directories named "keep" that exist in the top level directory of a mount point at startup time. Can be a comma-separated list, but this is deprecated: use multiple -volume arguments instead.
-  -volumes=[]: Deprecated synonym for -volume.
+       Maximum concurrent requests. When this limit is reached, new requests will receive 503 responses. Note: this limit does not include idle connections from clients using HTTP keepalive, so it does not strictly limit the number of concurrent connections. (default 2 * max-buffers)
+  -never-delete
+       If true, nothing will be deleted. Warning: the relevant features in keepstore and data manager have not been extensively tested. You should leave this option alone unless you can afford to lose data. (default true)
+  -permission-key-file string
+       Synonym for -blob-signing-key-file.
+  -permission-ttl int
+       Synonym for -blob-signature-ttl.
+  -pid fuser -k pidfile
+       Path to write pid file during startup. This file is kept open and locked with LOCK_EX until keepstore exits, so fuser -k pidfile is one way to shut down. Exit immediately if there is an error opening, locking, or writing the pid file.
+  -readonly
+       Do not write, delete, or touch anything on the following volumes.
+  -s3-access-key-file string
+       File containing the access key used for subsequent -s3-bucket-volume arguments.
+  -s3-bucket-volume value
+       Use the given bucket as a storage volume. Can be given multiple times. (default [])
+  -s3-endpoint string
+       Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use "https://storage.googleapis.com".
+  -s3-region string
+       AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are ["ap-southeast-1" "eu-west-1" "us-gov-west-1" "sa-east-1" "cn-north-1" "ap-northeast-1" "ap-southeast-2" "eu-central-1" "us-east-1" "us-west-1" "us-west-2"].
+  -s3-replication int
+       Replication level reported to clients for subsequent -s3-bucket-volume arguments. (default 2)
+  -s3-secret-key-file string
+       File containing the secret key used for subsequent -s3-bucket-volume arguments.
+  -s3-unsafe-delete
+       EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.
+  -serialize
+       Serialize read and write operations on the following volumes.
+  -trash-check-interval duration
+       Time duration at which the emptyTrash goroutine will check and delete expired trashed blocks. Default is one day. (default 24h0m0s)
+  -trash-lifetime duration
+       Time duration after a block is trashed during which it can be recovered using an /untrash request
+  -volume value
+       Local storage directory. Can be given more than once to add multiple directories. If none are supplied, the default is to use all directories named "keep" that exist in the top level directory of a mount point at startup time. Can be a comma-separated list, but this is deprecated: use multiple -volume arguments instead. (default [])
+  -volumes value
+       Deprecated synonym for -volume. (default [])
 </code></pre>
 </notextile>
 
index 7ef45aa30dbf090347739731aca0d1514128601a..8cde514f68f769a7ab336bb606f0e1ff8743b0c4 100644 (file)
@@ -6,7 +6,7 @@ title: "Installation"
 
 ...
 
-To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
+Arvados CLI tools are written in Ruby and Python.  To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.  The @arv@ command also relies on other Arvados tools.  To get those, install the @arvados-python-client@ and @arvados-cwl-runner@ packages, either from PyPI or source.
 
 h3. Prerequisites: Ruby, Bundler, and curl libraries
 
@@ -16,15 +16,21 @@ Install curl libraries with your system's package manager. For example, on Debia
 
 <notextile>
 <pre>
-$ <code class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev</code>
+~$ <code class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev</code>
 </pre>
 </notextile>
 
-h3. Option 1: Install with RubyGems
+h3. Option 1: Install from RubyGems and PyPI
 
 <notextile>
 <pre>
-$ <code class="userinput">sudo -i gem install arvados-cli</code>
+~$ <code class="userinput">sudo -i gem install arvados-cli</code>
+</pre>
+</notextile>
+
+<notextile>
+<pre>
+~$ <code class="userinput">pip install arvados-python-client arvados-cwl-runner</code>
 </pre>
 </notextile>
 
@@ -32,9 +38,13 @@ h3. Option 2: Build and install from source
 
 <notextile>
 <pre>
-$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/cli</code>
-$ <code class="userinput">gem build arvados-cli.gemspec</code>
-$ <code class="userinput">sudo -i gem install arvados-cli-*.gem</code>
+~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+~$ <code class="userinput">cd arvados/sdk/cli</code>
+~/arvados/sdk/cli$ <code class="userinput">gem build arvados-cli.gemspec</code>
+~/arvados/sdk/cli$ <code class="userinput">sudo -i gem install arvados-cli-*.gem</code>
+~/arvados/sdk/cli$ <code class="userinput">cd ../python</code>
+~/arvados/sdk/python$ <code class="userinput">python setup.py install</code>
+~/arvados/sdk/python$ <code class="userinput">cd ../cwl</code>
+~/arvados/sdk/cwl$ <code class="userinput">python setup.py install</code>
 </pre>
 </notextile>
index 11b1172399f2ebb217bb3a9b5ffef8b2a492f356..48f72d3a7dedf35c48ff219ba29a1a59b17fc4f1 100644 (file)
@@ -60,7 +60,7 @@ h3. Implementing your code to use SDK
 <code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExampleWithPrompt.java</code> can be
         used to make calls to API server interactively.
 
-Please use these implementations to see how you would want use the SDK from your java program.
+Please use these implementations to see how you would use the SDK from your java program.
 
 Also, refer to <code class="userinput">$ARVADOS_HOME/arvados/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java</code>
 for more sample API invocation examples.
@@ -73,7 +73,7 @@ make various <code class="userinput">call</code> requests.
 * To compile the examples
 <notextile>
 <pre>
-$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
 ArvadosSDKJavaExample*.java</code>
 This results in the generation of the ArvadosSDKJavaExample*.class files
 in the same directory as the java files
@@ -83,9 +83,9 @@ in the same directory as the java files
 * To run the samples
 <notextile>
 <pre>
-$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
 ArvadosSDKJavaExample</code>
-$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
 ArvadosSDKJavaExampleWithPrompt</code>
 </pre>
 </notextile>
index a6a0b565c9cb3bcdf8b5a03a6eb79370ce34ab47..0b0f77d377e209afc7c0f2a7717f9a0360293a6d 100644 (file)
@@ -6,9 +6,7 @@ title: "Python SDK"
 
 ...
 
-The Python SDK provides a generic set of wrappers so you can make API calls easily. It performs some validation before connecting to the API server: for example, it refuses to do an API call if a required parameter is missing.
-
-The library also includes some conveniences for use in Crunch scripts; see "Crunch utility libraries":crunch-utility-libraries.html for details.
+The Python SDK provides access from Python to the Arvados API and Keep.  It also includes a number of command line tools for using and administering Arvados and Keep, and some conveniences for use in Crunch scripts; see "Crunch utility libraries":crunch-utility-libraries.html for details.
 
 h3. Installation
 
@@ -61,7 +59,7 @@ Install the @python-setuptools@ package from your distribution.  Then run the fo
 <notextile>
 <pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
 ~$ <span class="userinput">cd arvados/sdk/python</span>
-~$ <span class="userinput">python2.7 setup.py install</span>
+~/arvados/sdk/python$ <span class="userinput">python2.7 setup.py install</span>
 </code></pre>
 </notextile>
 
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-input-local.yml b/doc/user/cwl/bwa-mem/bwa-mem-input-local.yml
new file mode 100755 (executable)
index 0000000..9939bcb
--- /dev/null
@@ -0,0 +1,14 @@
+#!/usr/bin/env cwltool
+cwl:tool: bwa-mem.cwl
+reference:
+  class: File
+  location: 19.fasta.bwt
+read_p1:
+  class: File
+  location: HWI-ST1027_129_D0THKACXX.1_1.fastq
+read_p2:
+  class: File
+  location: HWI-ST1027_129_D0THKACXX.1_2.fastq
+group_id: arvados_tutorial
+sample_id: HWI-ST1027_129
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-input.yml b/doc/user/cwl/bwa-mem/bwa-mem-input.yml
new file mode 100755 (executable)
index 0000000..af248c9
--- /dev/null
@@ -0,0 +1,14 @@
+#!/usr/bin/env cwl-runner
+cwl:tool: bwa-mem.cwl
+reference:
+  class: File
+  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt
+read_p1:
+  class: File
+  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_1.fastq
+read_p2:
+  class: File
+  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_2.fastq
+group_id: arvados_tutorial
+sample_id: HWI-ST1027_129
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-template.yml b/doc/user/cwl/bwa-mem/bwa-mem-template.yml
new file mode 100644 (file)
index 0000000..448f765
--- /dev/null
@@ -0,0 +1,4 @@
+reference:
+  class: File
+  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem.cwl b/doc/user/cwl/bwa-mem/bwa-mem.cwl
new file mode 100755 (executable)
index 0000000..ba3b6a0
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+
+hints:
+  DockerRequirement:
+    dockerPull: biodckr/bwa
+
+baseCommand: [bwa, mem]
+
+arguments:
+  - {prefix: "-t", valueFrom: $(runtime.cores)}
+  - {prefix: "-R", valueFrom: "@RG\tID:$(inputs.group_id)\tPL:$(inputs.PL)\tSM:$(inputs.sample_id)"}
+
+inputs:
+  reference:
+    type: File
+    inputBinding:
+      position: 1
+      valueFrom: $(self.dirname)/$(self.nameroot)
+    secondaryFiles:
+      - ^.ann
+      - ^.amb
+      - ^.pac
+      - ^.sa
+    doc: The index files produced by `bwa index`
+  read_p1:
+    type: File
+    inputBinding:
+      position: 2
+    doc: The reads, in fastq format.
+  read_p2:
+    type: File?
+    inputBinding:
+      position: 3
+    doc:  For mate paired reads, the second file (optional).
+  group_id: string
+  sample_id: string
+  PL: string
+
+stdout: $(inputs.read_p1.nameroot).sam
+
+outputs:
+  aligned_sam:
+    type: stdout
diff --git a/doc/user/cwl/cwl-runner.html.textile.liquid b/doc/user/cwl/cwl-runner.html.textile.liquid
new file mode 100644 (file)
index 0000000..c00f475
--- /dev/null
@@ -0,0 +1,297 @@
+---
+layout: default
+navsection: userguide
+title: Using Common Workflow Language
+...
+
+The "Common Workflow Language (CWL)":http://commonwl.org is a multi-vendor open standard for describing analysis tools and workflows that are portable across a variety of platforms.  CWL is the recommended way to develop and run workflows for Arvados.  Arvados supports the "CWL v1.0":http://commonwl.org/v1.0 specification.
+
+{% include 'tutorial_expectations' %}
+
+h2. Setting up
+
+The @arvados-cwl-runner@ client is installed by default on Arvados shell nodes.  However, if you do not have @arvados-cwl-runner@, you may install it using @pip@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">virtualenv ~/venv</span>
+~$ <span class="userinput">. ~/venv/bin/activate</span>
+~$ <span class="userinput">pip install arvados-cwl-runner</span>
+</code></pre>
+</notextile>
+
+h3. Docker
+
+Certain features of @arvados-cwl-runner@ require access to Docker.  You can determine if you have access to Docker by running @docker version@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker version</span>
+Client:
+ Version:      1.9.1
+ API version:  1.21
+ Go version:   go1.4.2
+ Git commit:   a34a1d5
+ Built:        Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch:      linux/amd64
+
+Server:
+ Version:      1.9.1
+ API version:  1.21
+ Go version:   go1.4.2
+ Git commit:   a34a1d5
+ Built:        Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch:      linux/amd64
+</code></pre>
+</notextile>
+
+If this returns an error, contact the sysadmin of your cluster for assistance.  Alternatively, if you have Docker installed on your local workstation, you may follow the instructions above to install @arvados-cwl-runner@.
+
+h3. Getting the example files
+
+The tutorial files are located in the documentation section of the Arvados source repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados</span>
+~$ <span class="userinput">cd arvados/doc/user/cwl/bwa-mem</span>
+</code></pre>
+</notextile>
+
+The tutorial data is hosted on "https://cloud.curoverse.com":https://cloud.curoverse.com (also referred to by the identifier *qr1hi*).  If you are using a different Arvados instance, you may need to copy the data to your own instance.  The easiest way to do this is with "arv-copy":{{site.baseurl}}/user/topics/arv-copy.html (this requires signing up for a free cloud.curoverse.com account).
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst settings 2463fa9efeb75e099685528b3b9071e0+438</span>
+~$ <span class="userinput">arv-copy --src qr1hi --dst settings ae480c5099b81e17267b7445e35b4bc7+180</span>
+</code></pre>
+</notextile>
+
+If you do not wish to create an account on "https://cloud.curoverse.com":https://cloud.curoverse.com, you may download the files anonymously and upload them to your local Arvados instance:
+
+"https://cloud.curoverse.com/collections/2463fa9efeb75e099685528b3b9071e0+438":https://cloud.curoverse.com/collections/2463fa9efeb75e099685528b3b9071e0+438
+
+"https://cloud.curoverse.com/collections/ae480c5099b81e17267b7445e35b4bc7+180":https://cloud.curoverse.com/collections/ae480c5099b81e17267b7445e35b4bc7+180
+
+h2. Submitting a workflow to an Arvados cluster
+
+Use @arvados-cwl-runner@ to submit CWL workflows to Arvados.  After submitting the job, it will wait for the workflow to complete and print out the final result to standard output.  Note that once submitted, the workflow runs entirely on Arvados, so even if you interrupt @arvados-cwl-runner@ or log out, the workflow will continue to run.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+To submit a workflow and exit immediately, use the @--no-wait@ option.  This will print out the uuid of the job that was submitted to standard output.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --no-wait bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Uploaded to qr1hi-4zz18-eqnfwrow8aysa9q
+2016-06-30 15:07:52 arvados.cwl-runner[12480] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+qr1hi-8i9sb-fm2n3b1w0l6bskg
+</code></pre>
+</notextile>
+
+To run a workflow with local control, use @--local@.  This means that the host where you run @arvados-cwl-runner@ will be responsible for submitting jobs. With @--local@, if you interrupt @arvados-cwl-runner@ or log out, the workflow will be terminated.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --local bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 10:05:19 arvados.cwl-runner[16290] INFO: Pipeline instance qr1hi-d1hrv-92wcu6ldtio74r4
+2016-07-01 10:05:28 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Queued
+2016-07-01 10:05:29 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Running
+2016-07-01 10:05:45 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Complete
+2016-07-01 10:05:46 arvados.cwl-runner[16290] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "size": 30738986,
+        "path": "keep:15f56bad0aaa7364819bf14ca2a27c63+88/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File"
+    }
+}
+</code></pre>
+</notextile>
+
+h2. Work reuse
+
+Workflows submitted with @arvados-cwl-runner@ will take advantage of Arvados job reuse.  If you submit a workflow which is identical to one that has run before, it will short cut the execution and return the result of the previous run.  This also applies to individual workflow steps.  For example, a two step workflow where the first step has run before will reuse results for first step and only execute the new second step.  You can disable this behavior with @--disable-reuse@.
+
+h2. Referencing files
+
+When running a workflow on an Arvados cluster, the input files must be stored in Keep.  There are several ways this can happen.
+
+A URI reference to Keep uses the @keep:@ scheme followed by the portable data hash, collection size, and path to the file inside the collection.  For example, @keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@.
+
+If you reference a file in "arv-mount":{{site.baseurl}}/user/tutorials/tutorial-keep-mount.html, such as @/home/example/keep/by_id/2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@, then @arvados-cwl-runner@ will automatically determine the appropriate Keep URI reference.
+
+If you reference a local file which is not in @arv-mount@, then @arvados-cwl-runner@ will upload the file to Keep and use the Keep URI reference from the upload.
+
+h2. Registering a workflow with Workbench
+
+Use @--create-template@ to register a CWL workflow with Arvados Workbench.  This enables you to run workflows by clicking on the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a pipeline...</span> on the Workbench Dashboard.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --create-template bwa-mem.cwl</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Upload local files: "bwa-mem.cwl"
+2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Uploaded to qr1hi-4zz18-7e0hedrmkuyoei3
+2016-07-01 12:21:01 arvados.cwl-runner[15796] INFO: Created template qr1hi-p5p6p-rjleou1dwr167v5
+qr1hi-p5p6p-rjleou1dwr167v5
+</code></pre>
+</notextile>
+
+You can provide a partial input file to set default values for the workflow input parameters:
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --create-template bwa-mem.cwl bwa-mem-template.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Upload local files: "bwa-mem.cwl"
+2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Uploaded to qr1hi-4zz18-0f91qkovk4ml18o
+2016-07-01 14:09:50 arvados.cwl-runner[3730] INFO: Created template qr1hi-p5p6p-0deqe6nuuyqns2i
+qr1hi-p5p6p-0deqe6nuuyqns2i
+</code></pre>
+</notextile>
+
+h2. Making workflows directly executable
+
+You can make a workflow file directly executable (@cwl-runner@ should be an alias to @arvados-cwl-runner@) by adding the following line to the top of the file:
+
+<notextile>
+<pre><code>#!/usr/bin/env cwl-runner
+</code></pre>
+</notextile>
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">./bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+You can even make an input file directly executable the same way with the following two lines at the top:
+
+<notextile>
+<pre><code>#!/usr/bin/env cwl-runner
+cwl:tool: <span class="userinput">bwa-mem.cwl</span>
+</code></pre>
+</notextile>
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">./bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+h2. Developing workflows
+
+For an introduction and and detailed documentation about writing CWL, see the "User Guide":http://commonwl.org/v1.0/UserGuide.html and the "Specification":http://commonwl.org/v1.0 .
+
+To run on Arvados, a workflow should provide a @DockerRequirement@ in the @hints@ section.
+
+When developing a workflow, it is often helpful to run it on the local host to avoid the overhead of submitting to the cluster.  To execute a workflow only on the local host (without submitting jobs to an Arvados cluster) you can use the @cwltool@ command.  Note that you must also have the input data accessible on the local host.  You can use @arv-get@ to fetch the data from Keep.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arv-get 2463fa9efeb75e099685528b3b9071e0+438/ .</span>
+156 MiB / 156 MiB 100.0%
+~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arv-get ae480c5099b81e17267b7445e35b4bc7+180/ .</span>
+23 MiB / 23 MiB 100.0%
+~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">cwltool bwa-mem-input.yml bwa-mem-input-local.yml</span>
+cwltool 1.0.20160629140624
+[job bwa-mem.cwl] /home/example/arvados/doc/user/cwl/bwa-mem$ docker \
+    run \
+    -i \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.ann:/var/lib/cwl/job979368791_bwa-mem/19.fasta.ann:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.sa:/var/lib/cwl/job979368791_bwa-mem/19.fasta.sa:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.amb:/var/lib/cwl/job979368791_bwa-mem/19.fasta.amb:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.pac:/var/lib/cwl/job979368791_bwa-mem/19.fasta.pac:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.bwt:/var/lib/cwl/job979368791_bwa-mem/19.fasta.bwt:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem:/var/spool/cwl:rw \
+    --volume=/tmp/tmpgzyou9:/tmp:rw \
+    --workdir=/var/spool/cwl \
+    --read-only=true \
+    --log-driver=none \
+    --user=1001 \
+    --rm \
+    --env=TMPDIR=/tmp \
+    --env=HOME=/var/spool/cwl \
+    biodckr/bwa \
+    bwa \
+    mem \
+    -t \
+    1 \
+    -R \
+    '@RG       ID:arvados_tutorial     PL:illumina     SM:HWI-ST1027_129' \
+    /var/lib/cwl/job979368791_bwa-mem/19.fasta \
+    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq \
+    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq > /home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam
+[M::bwa_idx_load_from_disk] read 0 ALT contigs
+[M::process] read 100000 sequences (10000000 bp)...
+[M::mem_pestat] # candidate unique pairs for (FF, FR, RF, RR): (0, 4745, 1, 0)
+[M::mem_pestat] skip orientation FF as there are not enough pairs
+[M::mem_pestat] analyzing insert size distribution for orientation FR...
+[M::mem_pestat] (25, 50, 75) percentile: (154, 181, 214)
+[M::mem_pestat] low and high boundaries for computing mean and std.dev: (34, 334)
+[M::mem_pestat] mean and std.dev: (185.63, 44.88)
+[M::mem_pestat] low and high boundaries for proper pairs: (1, 394)
+[M::mem_pestat] skip orientation RF as there are not enough pairs
+[M::mem_pestat] skip orientation RR as there are not enough pairs
+[M::mem_process_seqs] Processed 100000 reads in 9.848 CPU sec, 9.864 real sec
+[main] Version: 0.7.12-r1039
+[main] CMD: bwa mem -t 1 -R @RG        ID:arvados_tutorial     PL:illumina     SM:HWI-ST1027_129 /var/lib/cwl/job979368791_bwa-mem/19.fasta /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq
+[main] Real time: 10.061 sec; CPU: 10.032 sec
+Final process status is success
+{
+    "aligned_sam": {
+        "size": 30738959,
+        "path": "/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0c668cca45fef02397bb5302880526d300ee4dac",
+        "class": "File"
+    }
+}
+</code></pre>
+</notextile>
+
+If you get the error @JavascriptException: Long-running script killed after 20 seconds.@ this may be due to the Dockerized Node.js engine taking too long to start.  You may address this by installing Node.js locally (run @apt-get install nodejs@ on Debian or Ubuntu) or by specifying a longer timeout with the @--eval-timeout@ option.  For example, run the workflow with @cwltool --eval-timeout=40@ for a 40-second timeout.
diff --git a/doc/user/cwl/cwl-style.html.textile.liquid b/doc/user/cwl/cwl-style.html.textile.liquid
new file mode 100644 (file)
index 0000000..5c6d049
--- /dev/null
@@ -0,0 +1,168 @@
+---
+layout: default
+navsection: userguide
+title: Best Practices for writing CWL
+...
+
+* Build a reusable library of components.  Share tool wrappers and subworkflows between projects.  Make use of and contribute to "community maintained workflows and tools":https://github.com/common-workflow-language/workflows and tool registries such as "Dockstore":http://dockstore.org .
+
+* When combining a parameter value with a string, such as adding a filename extension, write @$(inputs.file.basename).ext@ instead of @$(inputs.file.basename + 'ext')@.  The first form is evaluated as a simple text substitution, the second form (using the @+@ operator) is evaluated as an arbitrary Javascript expression and requires that you declare @InlineJavascriptRequirement@.
+
+* Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@ unless you specifically need them.  Don't include them "just in case" because they change the default behavior and may imply extra overhead.
+
+* Don't write CWL scripts that access the Arvados SDK.  This is non-portable; a script that access Arvados directly won't work with @cwltool@ or crunch v2.
+
+* CommandLineTools wrapping custom scripts should represent the script as an input parameter with the script file as a default value.  Use @secondaryFiles@ for scripts that consist of multiple files.  For example:
+
+<pre>
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: python
+inputs:
+  script:
+    type: File
+    inputBinding: {position: 1}
+    default:
+      class: File
+      location: bclfastq.py
+      secondaryFiles:
+        - class: File
+          location: helper1.py
+        - class: File
+          location: helper2.py
+  inputfile:
+    type: File
+    inputBinding: {position: 2}
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: "*.fastq"
+</pre>
+
+* You can get the designated temporary directory using @$(runtime.tmpdir)@ in your CWL file, or from the @$TMPDIR@ environment variable in your script.
+
+* Similarly, you can get the designated output directory using $(runtime.outdir), or from the @HOME@ environment variable in your script.
+
+* Use @ExpressionTool@ to efficiently rearrange input files between steps of a Workflow.  For example, the following expression accepts a directory containing files paired by @_R1_@ and @_R2_@ and produces an array of Directories containing each pair.
+
+<pre>
+class: ExpressionTool
+cwlVersion: v1.0
+inputs:
+  inputdir: Directory
+outputs:
+  out: Directory[]
+requirements:
+  InlineJavascriptRequirement: {}
+expression: |
+  ${
+    var samples = {};
+    for (var i = 0; i < inputs.inputdir.listing.length; i++) {
+      var file = inputs.inputdir.listing[i];
+      var groups = file.basename.match(/^(.+)(_R[12]_)(.+)$/);
+      if (groups) {
+        if (!samples[groups[1]]) {
+          samples[groups[1]] = [];
+        }
+        samples[groups[1]].push(file);
+      }
+    }
+    var dirs = [];
+    for (var key in samples) {
+      dirs.push({"class": "Directory",
+                 "basename": key,
+                 "listing": [samples[key]]});
+    }
+    return {"out": dirs};
+  }
+</pre>
+
+* Avoid specifying resource requirements in CommandLineTool.  Prefer to specify them in the workflow.  You can provide a default resource requirement in the top level @hints@ section, and individual steps can override it with their own resource requirement.
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp: File
+hints:
+  ResourceRequirement:
+    ramMin: 1000
+    coresMin: 1
+    tmpdirMin: 45000
+steps:
+  step1:
+    in: {inp: inp}
+    out: [out]
+    run: tool1.cwl
+  step2:
+    in: {inp: step1/inp}
+    out: [out]
+    run: tool2.cwl
+    hints:
+      ResourceRequirement:
+        ramMin: 2000
+        coresMin: 2
+        tmpdirMin: 90000
+</pre>
+
+* Instead of scattering separate steps, prefer to scatter over a subworkflow.
+
+With the following pattern, @step1@ has to wait for all samples to complete before @step2@ can start computing on any samples.  This means a single long-running sample can prevent the rest of the workflow from moving on:
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp: File
+steps:
+  step1:
+    in: {inp: inp}
+    scatter: inp
+    out: [out]
+    run: tool1.cwl
+  step2:
+    in: {inp: step1/inp}
+    scatter: inp
+    out: [out]
+    run: tool2.cwl
+  step3:
+    in: {inp: step2/inp}
+    scatter: inp
+    out: [out]
+    run: tool3.cwl
+</pre>
+
+Instead, scatter over a subworkflow.  In this pattern, a sample can proceed to @step2@ as soon as @step1@ is done, independently of any other samples.
+Example: (note, the subworkflow can also be put in a separate file)
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+steps:
+  step1:
+    in: {inp: inp}
+    scatter: inp
+    out: [out]
+    run:
+      class: Workflow
+      inputs:
+        inp: File
+      outputs:
+        out:
+          type: File
+          outputSource: step3/out
+      steps:
+        step1:
+          in: {inp: inp}
+          out: [out]
+          run: tool1.cwl
+        step2:
+          in: {inp: step1/inp}
+          out: [out]
+          run: tool2.cwl
+        step3:
+          in: {inp: step2/inp}
+          out: [out]
+          run: tool3.cwl
+</pre>
index 58ad868e5e50083576ade0dd9854ebc309dc580f..4ede3b97e31252d9d9654dde47e67acf337d4104 100644 (file)
@@ -12,7 +12,7 @@ Webshell gives you access to an arvados virtual machine from your browser with n
 
 In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Virtual machines* to see the list of virtual machines you can access.  If you do not have access to any virtual machines, please click on <span class="btn btn-sm btn-primary">Send request for shell access</span> or send an email to "support@curoverse.com":mailto:support@curoverse.com.
 
-Each row in the Virtual Machines panel lists the hostname of the VM, along with a <code>Log in as *you*</code> button under the column "Web shell beta". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.
+Each row in the Virtual Machines panel lists the hostname of the VM, along with a <code>Log in as *you*</code> button under the column "Web shell". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.
 
 !{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/vm-access-with-webshell.png!
 
index 1ec80a619b7368ecc0b1b44ddaa74c001374ac92..ed0a126a41cf6d07f018ff7fe00cad49d5b32fd8 100644 (file)
@@ -13,9 +13,9 @@ h2. arv-copy
 
 @arv-copy@ allows users to copy collections and pipeline templates from one cluster to another. By default, @arv-copy@ will recursively go through a template and copy all dependencies associated with the object.
 
-For example, let's copy from our <a href="https://cloud.curoverse.com/">beta cloud instance *qr1hi*</a> to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
+For example, let's copy from the <a href="https://cloud.curoverse.com/">cloud instance *qr1hi*</a> to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
 
-In order for the clusters to be able to communicate with each other, you must create custom configuration files for both clusters. In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*. Copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ in both of your clusters. Then, create two configuration files, one for each cluster. The names of the files must have the format of *uuid_prefix.conf*. In our example, let's make two files, one for *qr1hi* and one for *dst_cluster*. From your *Current token* page in *qr1hi* and *dst_cluster*, copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
+In order to communicate with both clusters, you must create custom configuration files for each cluster. In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*. Copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ in both of your clusters. Then, create two configuration files, one for each cluster. The names of the files must have the format of *uuid_prefix.conf*. In our example, let's make two files, one for *qr1hi* and one for *dst_cluster*. From your *Current token* page in *qr1hi* and *dst_cluster*, copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
 
 !{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/api-token-host.png!
 
@@ -39,7 +39,7 @@ First, select the uuid of the collection you want to copy from the source cluste
 Now copy the collection from *qr1hi* to *dst_cluster*. We will use the uuid @qr1hi-4zz18-tci4vn4fa95w0zx@ as an example. You can find this collection in the <a href="https://cloud.curoverse.com/collections/qr1hi-4zz18-tci4vn4fa95w0zx">lobSTR v.3 project on cloud.curoverse.com</a>.
 <notextile>
 <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster qr1hi-4zz18-tci4vn4fa95w0zx</span>
-qr1hi-4zz18-tci4vn4fa95w0zx: 6.1M / 6.1M 100.0% 
+qr1hi-4zz18-tci4vn4fa95w0zx: 6.1M / 6.1M 100.0%
 arvados.arv-copy[1234] INFO: Success: created copy with uuid dst_cluster-4zz18-8765943210cdbae
 </code></pre>
 </notextile>
@@ -48,7 +48,7 @@ The output of arv-copy displays the uuid of the collection generated in the dest
 
 For example, this will copy the collection to project dst_cluster-j7d0g-a894213ukjhal12 in the destination cluster.
 
-<notextile> <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --project-uuid dst_cluster-j7d0g-a894213ukjhal12 qr1hi-4zz18-tci4vn4fa95w0zx</span> 
+<notextile> <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --project-uuid dst_cluster-j7d0g-a894213ukjhal12 qr1hi-4zz18-tci4vn4fa95w0zx</span>
 </code></pre>
 </notextile>
 
@@ -66,7 +66,7 @@ arvados.arv-copy[19694] INFO: Success: created copy with uuid dst_cluster-p5p6p-
 </code></pre>
 </notextile>
 
-New branches in the destination git repo will be created for each branch used in the pipeline template. For example, if your source branch was named ac21f0d45a76294aaca0c0c0fdf06eb72d03368d, your new branch will be named @git_git_qr1hi_arvadosapi_com_reponame_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d@. 
+New branches in the destination git repo will be created for each branch used in the pipeline template. For example, if your source branch was named ac21f0d45a76294aaca0c0c0fdf06eb72d03368d, your new branch will be named @git_git_qr1hi_arvadosapi_com_reponame_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d@.
 
 By default, if you copy a pipeline template recursively, you will find that the template as well as all the dependencies are in your home project.
 
index 994f437b5b3d7cb5cd7299a60dd85774c03707de..0e19be12b6613895aa6045fa8f55cbe9e63e03db 100644 (file)
@@ -4,6 +4,8 @@ navsection: userguide
 title: "Tools for writing Crunch pipelines"
 ...
 
+{% include 'pipeline_deprecation_notice' %}
+
 Arvados includes a number of tools to help you develop pipelines and jobs for Crunch.  This overview explains each tool's intended use to help you choose the right one.
 
 h2. Use the "arv-run command-line utility":arv-run.html
index 9f10fe43df97b6b87e454663fd92d911db917fcc..9a2e12c09677beb59495b95404caea2c8622be5a 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: userguide
-title: "Running a pipeline on the command line"
+title: "Running an Arvados pipeline"
 ...
 
 This tutorial demonstrates how to use the command line to run the same pipeline as described in "running a pipeline using Workbench.":{{site.baseurl}}/user/tutorials/tutorial-pipeline-workbench.html
index 6d0058b5e950e8c1b0866158ee815a859fcef4a4..3a7f85cc9a1f96972b05ea5cf71a08b601099bd2 100644 (file)
@@ -4,6 +4,8 @@ navsection: userguide
 title: "Concurrent Crunch tasks"
 ...
 
+{% include 'pipeline_deprecation_notice' %}
+
 In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically create concurrent jobs by creating a separate task per file.  For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file.  This tutorial will demonstrate how to create Crunch tasks directly.
 
 Start by entering the @crunch_scripts@ directory of your Git repository:
index 90dc1970a79f483ad328e56f0fd616a42c8998f5..ef4634ee742dd26a35ea3f39c5414c2bc383127f 100644 (file)
@@ -4,6 +4,8 @@ navsection: userguide
 title: "Writing a pipeline template"
 ...
 
+{% include 'pipeline_deprecation_notice' %}
+
 This tutorial demonstrates how to construct a two stage pipeline template that uses the "bwa mem":http://bio-bwa.sourceforge.net/ tool to produce a "Sequence Alignment/Map (SAM)":https://samtools.github.io/ file, then uses the "Picard SortSam tool":http://picard.sourceforge.net/command-line-overview.shtml#SortSam to produce a BAM (Binary Alignment/Map) file.
 
 {% include 'tutorial_expectations' %}
@@ -64,7 +66,7 @@ For more information and examples for writing pipelines, see the "pipeline templ
 
 h2. Re-using your pipeline run
 
-Arvados allows users to re-use jobs that have the same inputs in order to save computing time and resources. Users are able to change a job downstream without re-computing earlier jobs. This section shows which version control parameters should be tuned to make sure Arvados will not re-compute your jobs. 
+Arvados allows users to re-use jobs that have the same inputs in order to save computing time and resources. Users are able to change a job downstream without re-computing earlier jobs. This section shows which version control parameters should be tuned to make sure Arvados will not re-compute your jobs.
 
 Note: Job reuse can only happen if all input collections do not change.
 
index bf73c8cc1943dce1bd22f9df03756edb947a8111..d4caafef5cd4342583118ac96cad0376df10cd9a 100644 (file)
@@ -5,6 +5,8 @@ navmenu: Tutorials
 title: "Writing a Crunch script"
 ...
 
+{% include 'pipeline_deprecation_notice' %}
+
 This tutorial demonstrates how to write a script using Arvados Python SDK.  The Arvados SDK supports access to advanced features not available using the @run-command@ wrapper, such as scheduling concurrent tasks across nodes.
 
 {% include 'tutorial_expectations' %}
index b17f951e74c84867e63de156557efea21ff308f9..47e8dc750cf0691e8f859468cbc433e50ea358de 100644 (file)
@@ -5,6 +5,8 @@ navmenu: Tutorials
 title: "Running on an Arvados cluster"
 ...
 
+{% include 'pipeline_deprecation_notice' %}
+
 This tutorial demonstrates how to create a pipeline to run your crunch script on an Arvados cluster.  Cluster jobs can scale out to multiple nodes, and use @git@ and @docker@ to store the complete system snapshot required to achieve reproducibilty.
 
 {% include 'tutorial_expectations' %}
diff --git a/docker/.gitignore b/docker/.gitignore
deleted file mode 100644 (file)
index ff626a3..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-*-image
-build/
diff --git a/docker/README.md b/docker/README.md
deleted file mode 100644 (file)
index 9c03e1b..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-Deploying Arvados in Docker Containers
-======================================
-
-This file explains how to build and deploy Arvados servers in Docker
-containers, so that they can be run easily in different environments
-(a dedicated server, a developer's laptop, a virtual machine,
-etc).
-
-Prerequisites
--------------
-
-* Docker
-
-  Docker is a Linux container management system. It is a very young system but
-  is being developed rapidly.
-  [Installation packages](http://www.docker.io/gettingstarted/)
-  are available for several platforms.
-  
-  If a prebuilt docker package is not available for your platform, the
-  short instructions for installing it are:
-  
-  1. Create a `docker` group and add yourself to it.
-
-     <pre>
-     $ sudo addgroup docker
-     $ sudo adduser `whoami` docker
-     </pre>
-
-     Log out and back in.
-        
-  2. Add a `cgroup` filesystem and mount it:
-
-     <pre>
-     $ mkdir -p /cgroup
-     $ grep cgroup /etc/fstab
-     none   /cgroup    cgroup    defaults    0    0
-     $ sudo mount /cgroup
-        </pre>
-        
-  3. [Download and run a docker binary from docker.io.](http://docs.docker.io/en/latest/installation/binaries/)
-
-* Ruby (version 1.9.3 or greater)
-
-* sudo privileges to run `debootstrap`
-
-Building
---------
-
-Type `./build.sh` to configure and build the following Docker images:
-
-   * arvados/api       - the Arvados API server
-   * arvados/compute   - Arvados compute node image
-   * arvados/doc       - Arvados documentation
-   * arvados/keep      - Keep, the Arvados content-addressable filesystem
-   * arvados/keepproxy - Keep proxy
-   * arvados/shell     - Arvados shell node image
-   * arvados/sso       - the Arvados single-signon authentication server
-   * arvados/workbench - the Arvados console
-
-`build.sh` will generate reasonable defaults for all configuration
-settings.  If you want more control over the way Arvados is
-configured, first copy `config.yml.example` to `config.yml` and edit
-it with appropriate configuration settings, and then run `./build.sh`.
-
-Running
--------
-
-The `arvdock` script in this directory is used to start, stop and
-restart Arvados servers on your machine.  The simplest and easiest way
-to use it is `./arvdock start` to start the full complement of Arvados
-servers, and `./arvdock stop` and `./arvdock restart` to stop and
-restart all servers, respectively.
-
-Developers who are working on individual servers can start, stop or
-restart just those containers, e.g.:
-
-* `./arvdock start --api --sso` to start just the API and SSO services.
-* `./arvdock stop --keep` to stop just the Keep services.
-* `./arvdock restart --workbench=8000` restarts just the Workbench service on port 8000.
-
-For a full set of arguments, use `./arvdock --help`.
diff --git a/docker/api/.gitolite.rc b/docker/api/.gitolite.rc
deleted file mode 100644 (file)
index 855e103..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-# configuration variables for gitolite
-
-# This file is in perl syntax.  But you do NOT need to know perl to edit it --
-# just mind the commas, use single quotes unless you know what you're doing,
-# and make sure the brackets and braces stay matched up!
-
-# (Tip: perl allows a comma after the last item in a list also!)
-
-# HELP for commands can be had by running the command with "-h".
-
-# HELP for all the other FEATURES can be found in the documentation (look for
-# "list of non-core programs shipped with gitolite" in the master index) or
-# directly in the corresponding source file.
-
-my $repo_aliases;
-my $aliases_src = "$ENV{HOME}/.gitolite/arvadosaliases.pl";
-if ($ENV{HOME} && (-e $aliases_src)) {
-    $repo_aliases = do $aliases_src;
-}
-$repo_aliases ||= {};
-
-%RC = (
-
-    # ------------------------------------------------------------------
-
-    # default umask gives you perms of '0700'; see the rc file docs for
-    # how/why you might change this
-    UMASK                           =>  0022,
-
-    # look for "git-config" in the documentation
-    GIT_CONFIG_KEYS                 =>  '',
-
-    # comment out if you don't need all the extra detail in the logfile
-    LOG_EXTRA                       =>  1,
-
-    # roles.  add more roles (like MANAGER, TESTER, ...) here.
-    #   WARNING: if you make changes to this hash, you MUST run 'gitolite
-    #   compile' afterward, and possibly also 'gitolite trigger POST_COMPILE'
-    ROLES => {
-        READERS                     =>  1,
-        WRITERS                     =>  1,
-    },
-
-    REPO_ALIASES => $repo_aliases,
-
-    # ------------------------------------------------------------------
-
-    # rc variables used by various features
-
-    # the 'info' command prints this as additional info, if it is set
-        # SITE_INFO                 =>  'Please see http://blahblah/gitolite for more help',
-
-    # the 'desc' command uses this
-        # WRITER_CAN_UPDATE_DESC    =>  1,
-    # the 'readme' command uses this
-        # WRITER_CAN_UPDATE_README  =>  1,
-
-    # the CpuTime feature uses these
-        # display user, system, and elapsed times to user after each git operation
-        # DISPLAY_CPU_TIME          =>  1,
-        # display a warning if total CPU times (u, s, cu, cs) crosses this limit
-        # CPU_TIME_WARN_LIMIT       =>  0.1,
-
-    # the Mirroring feature needs this
-        # HOSTNAME                  =>  "foo",
-
-    # if you enabled 'Shell', you need this
-        # SHELL_USERS_LIST          =>  "$ENV{HOME}/.gitolite.shell-users",
-
-    # ------------------------------------------------------------------
-
-    # suggested locations for site-local gitolite code (see cust.html)
-
-        # this one is managed directly on the server
-        # LOCAL_CODE                =>  "$ENV{HOME}/local",
-
-        # or you can use this, which lets you put everything in a subdirectory
-        # called "local" in your gitolite-admin repo.  For a SECURITY WARNING
-        # on this, see http://gitolite.com/gitolite/cust.html#pushcode
-        # LOCAL_CODE                =>  "$rc{GL_ADMIN_BASE}/local",
-
-    # ------------------------------------------------------------------
-
-    # List of commands and features to enable
-
-    ENABLE => [
-
-        # COMMANDS
-
-            # These are the commands enabled by default
-            'help',
-            'desc',
-            'info',
-            'perms',
-            'writable',
-
-            # Uncomment or add new commands here.
-            # 'create',
-            # 'fork',
-            # 'mirror',
-            # 'readme',
-            # 'sskm',
-            # 'D',
-
-        # These FEATURES are enabled by default.
-
-            # essential (unless you're using smart-http mode)
-            'ssh-authkeys',
-
-            # creates git-config enties from gitolite.conf file entries like 'config foo.bar = baz'
-            'git-config',
-
-            # creates git-daemon-export-ok files; if you don't use git-daemon, comment this out
-            'daemon',
-
-            # creates projects.list file; if you don't use gitweb, comment this out
-            'gitweb',
-
-        # These FEATURES are disabled by default; uncomment to enable.  If you
-        # need to add new ones, ask on the mailing list :-)
-
-        # user-visible behaviour
-
-            # prevent wild repos auto-create on fetch/clone
-            # 'no-create-on-read',
-            # no auto-create at all (don't forget to enable the 'create' command!)
-            # 'no-auto-create',
-
-            # access a repo by another (possibly legacy) name
-            'Alias',
-
-            # give some users direct shell access
-            # 'Shell',
-
-            # set default roles from lines like 'option default.roles-1 = ...', etc.
-            # 'set-default-roles',
-
-            # show more detailed messages on deny
-            # 'expand-deny-messages',
-
-        # system admin stuff
-
-            # enable mirroring (don't forget to set the HOSTNAME too!)
-            # 'Mirroring',
-
-            # allow people to submit pub files with more than one key in them
-            # 'ssh-authkeys-split',
-
-            # selective read control hack
-            # 'partial-copy',
-
-            # manage local, gitolite-controlled, copies of read-only upstream repos
-            # 'upstream',
-
-            # updates 'description' file instead of 'gitweb.description' config item
-            # 'cgit',
-
-            # allow repo-specific hooks to be added
-            # 'repo-specific-hooks',
-
-        # performance, logging, monitoring...
-
-            # be nice
-            # 'renice 10',
-
-            # log CPU times (user, system, cumulative user, cumulative system)
-            # 'CpuTime',
-
-        # syntactic_sugar for gitolite.conf and included files
-
-            # allow backslash-escaped continuation lines in gitolite.conf
-            # 'continuation-lines',
-
-            # create implicit user groups from directory names in keydir/
-            # 'keysubdirs-as-groups',
-
-            # allow simple line-oriented macros
-            # 'macros',
-
-    ],
-
-);
-
-# ------------------------------------------------------------------------------
-# per perl rules, this should be the last line in such a file:
-1;
-
-# Local variables:
-# mode: perl
-# End:
-# vim: set syn=perl:
diff --git a/docker/api/Dockerfile b/docker/api/Dockerfile
deleted file mode 100644 (file)
index 6a3428c..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Arvados API server Docker container.
-
-FROM arvados/passenger
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-# Install postgres and apache.
-RUN apt-get update -q
-RUN apt-get install -qy \
-    procps postgresql postgresql-server-dev-9.1 slurm-llnl munge \
-    supervisor sudo libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
-    libjson-perl cron openssh-server
-
-ADD munge.key /etc/munge/
-RUN chown munge:munge /etc/munge/munge.key && chmod 600 /etc/munge/munge.key
-ADD generated/slurm.conf /etc/slurm-llnl/
-
-RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
-# /for crunch-dispatch
-
-RUN /bin/mkdir -p /usr/src/arvados/services
-ADD generated/api.tar.gz /usr/src/arvados/services/
-
-# Install generated config files
-ADD generated/database.yml /usr/src/arvados/services/api/config/database.yml
-ADD generated/omniauth.rb /usr/src/arvados/services/api/config/initializers/omniauth.rb
-RUN /bin/cp /usr/src/arvados/services/api/config/environments/production.rb.example /usr/src/arvados/services/api/config/environments/production.rb
-ADD generated/application.yml /usr/src/arvados/services/api/config/application.yml
-
-# Configure Rails databases.
-ENV RAILS_ENV production
-ADD generated/config_databases.sh /tmp/config_databases.sh
-ADD generated/superuser_token /tmp/superuser_token
-RUN /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
-    sh /tmp/config_databases.sh && \
-    rm /tmp/config_databases.sh && \
-    /etc/init.d/postgresql start && \
-    cd /usr/src/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec rake db:structure:load && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec rake db:seed && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
-    /usr/local/rvm/bin/rvm-exec default ./script/create_superuser_token.rb $(cat /tmp/superuser_token) && \
-    chown www-data:www-data config.ru && \
-    chown www-data:www-data log -R && \
-    mkdir -p tmp && \
-    chown www-data:www-data tmp -R
-
-# Install a token for root
-RUN mkdir -p /root/.config/arvados; echo "ARVADOS_API_HOST=api" >> /root/.config/arvados/settings.conf && echo "ARVADOS_API_HOST_INSECURE=yes" >> /root/.config/arvados/settings.conf && echo "ARVADOS_API_TOKEN=$(cat /tmp/superuser_token)" >> /root/.config/arvados/settings.conf && chmod 600 /root/.config/arvados/settings.conf
-
-# Set up directory for job commit repo
-RUN mkdir -p /var/lib/arvados
-# Add crunch user
-RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid 4005 crunch && chown crunch:crunch /home/crunch
-
-# Create keep and compute node objects
-ADD generated/keep_server_0.json /root/
-ADD generated/keep_server_1.json /root/
-ADD keep_proxy.json /root/
-
-# Set up update-gitolite.rb
-RUN mkdir /usr/local/arvados/config -p
-ADD generated/arvados-clients.yml /usr/src/arvados/services/api/config/
-ADD .gitolite.rc /usr/local/arvados/config/
-RUN ln /usr/src/arvados/services/api/script/arvados-git-sync.rb /usr/local/bin/
-
-# Supervisor.
-ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
-ADD generated/setup.sh /usr/local/bin/setup.sh
-ADD generated/setup-gitolite.sh /usr/local/bin/setup-gitolite.sh
-ADD crunch-dispatch-run.sh /usr/local/bin/crunch-dispatch-run.sh
-ADD munge.sh /usr/local/bin/munge.sh
-ADD passenger.sh /usr/local/bin/passenger.sh
-
-# Start the supervisor.
-CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/api/apache2_foreground.sh b/docker/api/apache2_foreground.sh
deleted file mode 100755 (executable)
index fc6028e..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/bash
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
-
-source /etc/apache2/envvars
-/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/api/apache2_vhost.in b/docker/api/apache2_vhost.in
deleted file mode 100644 (file)
index 344e36d..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# VirtualHost definition for the Arvados API server
-
-<VirtualHost *:80>
-  ServerName @@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
-  ServerAdmin sysadmin@curoverse.com
-
-  RedirectPermanent / https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/error.log
-  CustomLog ${APACHE_LOG_DIR}/access.log combined
-
-</VirtualHost>
-
-<VirtualHost *:443>
-  ServerName @@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
-  ServerAdmin sysadmin@curoverse.com
-
-  RailsEnv production
-  RackBaseURI /
-  RailsAppSpawnerIdleTime 1200
-
-  # Enable streaming
-  PassengerBufferResponse off
-
-  # Index file and Document Root (where the public files are located)
-  DirectoryIndex index.html
-  DocumentRoot /usr/src/arvados/services/api/public
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/ssl_error.log
-  CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined
-
-  <Directory /usr/src/arvados/services/api/public>
-    Options Indexes FollowSymLinks MultiViews IncludesNoExec
-    AllowOverride None
-    Order allow,deny
-    allow from all
-  </Directory>
-
-  <IfModule mod_ssl.c>
-    SSLEngine on
-    # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
-    # SSLCACertificateFile    /etc/ssl/certs/startcom.ca.pem
-    SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem
-    SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
-    SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
-  </IfModule>
-
-</VirtualHost>
diff --git a/docker/api/application.yml.in b/docker/api/application.yml.in
deleted file mode 100644 (file)
index 97eb66f..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copy this file to application.yml and edit to suit.
-#
-# Consult application.default.yml for the full list of configuration
-# settings.
-#
-# The order of precedence is:
-# 1. config/environments/{RAILS_ENV}.rb (deprecated)
-# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
-# 3. Section in application.yml called "common"
-# 4. Section in application.default.yml corresponding to RAILS_ENV
-# 5. Section in application.default.yml called "common"
-
-development:
-  # The blob_signing_key is a string of alphanumeric characters used
-  # to sign permission hints for Keep locators. It must be identical
-  # to the permission key given to Keep.  If you run both apiserver
-  # and Keep in development, change this to a hardcoded string and
-  # make sure both systems use the same value.
-  blob_signing_key: ~
-
-production:
-  host: api.@@ARVADOS_DOMAIN@@
-
-  git_repo_ssh_base: "git@api.@@ARVADOS_DOMAIN@@:"
-
-  # Docker setup doesn't include arv-git-httpd yet.
-  git_repo_https_base: false
-
-  # At minimum, you need a nice long randomly generated secret_token here.
-  # Use a long string of alphanumeric characters (at least 36).
-  secret_token: @@API_SECRET@@
-
-  # blob_signing_key is required and must be identical to the
-  # permission secret provisioned to Keep.
-  # Use a long string of alphanumeric characters (at least 36).
-  blob_signing_key: @@KEEP_SIGNING_SECRET@@
-
-  uuid_prefix: @@API_HOSTNAME@@
-
-  # compute_node_domain: example.org
-  # compute_node_nameservers:
-  #   - 127.0.0.1
-  #   - 192.168.1.1
-  #
-  permit_create_collection_with_unsigned_manifest: true
-  git_repositories_dir: /home/git/repositories
-  crunch_job_wrapper: :slurm_immediate
-  action_mailer.raise_delivery_errors: false
-  action_mailer.perform_deliveries: false
-
-  workbench_address: @@API_WORKBENCH_ADDRESS@@
-
-  auto_setup_new_users: true
-
-  auto_admin_first_user: true
-
-  auto_setup_new_users_with_repository: true
-
-  auto_setup_new_users_with_vm_uuid: @@API_HOSTNAME@@-2x53u-csbtkecoa669vkz
-
-test:
-  uuid_prefix: zzzzz
-  secret_token: <%= rand(2**512).to_s(36) %>
-
-common:
-  #git_repositories_dir: /var/cache/git
-  #git_internal_dir: /var/cache/arvados/internal.git
diff --git a/docker/api/apt.arvados.org.list b/docker/api/apt.arvados.org.list
deleted file mode 100644 (file)
index 7eb8716..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# apt.arvados.org
-deb http://apt.arvados.org/ wheezy main
diff --git a/docker/api/arvados-clients.yml.in b/docker/api/arvados-clients.yml.in
deleted file mode 100644 (file)
index 6741328..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-production:
-  gitolite_url: 'git@api.@@ARVADOS_DOMAIN@@:gitolite-admin.git'
-  gitolite_tmp: 'gitolite-tmp'
-  arvados_api_host: 'api'
-  arvados_api_token: '@@API_SUPERUSER_SECRET@@'
-  arvados_api_host_insecure: true
diff --git a/docker/api/config_databases.sh.in b/docker/api/config_databases.sh.in
deleted file mode 100755 (executable)
index b548c21..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#! /bin/sh
-
-# Configure postgresql in a docker instance.
-
-/bin/su postgres -c '/usr/lib/postgresql/9.1/bin/postgres --single -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf' <<EOF
-alter role postgres with encrypted password '@@POSTGRES_ROOT_PW@@';
-
-create user @@ARVADOS_DEV_USER@@ with encrypted password '@@ARVADOS_DEV_PW@@';
-create database @@ARVADOS_DEV_DB@@ with owner @@ARVADOS_DEV_USER@@;
-
-create user @@ARVADOS_TEST_USER@@ with createdb encrypted password '@@ARVADOS_TEST_PW@@';
-
-create user @@ARVADOS_PROD_USER@@ with encrypted password '@@ARVADOS_PROD_PW@@';
-create database @@ARVADOS_PROD_DB@@ with owner @@ARVADOS_PROD_USER@@;
-EOF
diff --git a/docker/api/crunch-dispatch-run.sh b/docker/api/crunch-dispatch-run.sh
deleted file mode 100755 (executable)
index 5103b1d..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-set -e
-export PATH="$PATH":/usr/src/arvados/services/crunch
-export PERLLIB=/usr/src/arvados/sdk/perl/lib
-export ARVADOS_API_HOST=api
-export ARVADOS_API_HOST_INSECURE=yes
-export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
-
-if [[ ! -e $CRUNCH_DISPATCH_LOCKFILE ]]; then
-  touch $CRUNCH_DISPATCH_LOCKFILE
-fi
-
-export CRUNCH_JOB_BIN=/usr/src/arvados/services/crunch/crunch-job
-export HOME=`pwd`
-fuser -TERM -k $CRUNCH_DISPATCH_LOCKFILE || true
-
-# Give the compute nodes some time to start up
-sleep 5
-
-cd /usr/src/arvados/services/api
-export RAILS_ENV=production
-/usr/local/rvm/bin/rvm-exec default bundle install
-exec /usr/local/rvm/bin/rvm-exec default bundle exec ./script/crunch-dispatch.rb 2>&1
-
diff --git a/docker/api/database.yml.in b/docker/api/database.yml.in
deleted file mode 100644 (file)
index 5990319..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-development:
-  adapter: postgresql
-  encoding: utf8
-  database: @@ARVADOS_DEV_DB@@
-  username: @@ARVADOS_DEV_USER@@
-  password: @@ARVADOS_DEV_PW@@
-  host: localhost
-
-test:
-  adapter: postgresql
-  encoding: utf8
-  template: template0
-  database: @@ARVADOS_TEST_DB@@
-  username: @@ARVADOS_TEST_USER@@
-  password: @@ARVADOS_TEST_PW@@
-  host: localhost
-
-production:
-  adapter: postgresql
-  encoding: utf8
-  database: @@ARVADOS_PROD_DB@@
-  username: @@ARVADOS_PROD_USER@@
-  password: @@ARVADOS_PROD_PW@@
-  host: localhost
-
diff --git a/docker/api/keep_proxy.json b/docker/api/keep_proxy.json
deleted file mode 100644 (file)
index 117e590..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "service_host": "localhost",
-  "service_port": 9902,
-  "service_ssl_flag": "false",
-  "service_type": "proxy"
-}
diff --git a/docker/api/keep_server_0.json.in b/docker/api/keep_server_0.json.in
deleted file mode 100644 (file)
index d63c590..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "service_host": "keep_server_0.keep.@@ARVADOS_DOMAIN@@",
-  "service_port": 25107,
-  "service_ssl_flag": "false",
-  "service_type": "disk"
-}
diff --git a/docker/api/keep_server_1.json.in b/docker/api/keep_server_1.json.in
deleted file mode 100644 (file)
index 53d5c64..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "service_host": "keep_server_1.keep.@@ARVADOS_DOMAIN@@",
-  "service_port": 25107,
-  "service_ssl_flag": "false",
-  "service_type": "disk"
-}
diff --git a/docker/api/munge.key b/docker/api/munge.key
deleted file mode 100644 (file)
index 34036a0..0000000
Binary files a/docker/api/munge.key and /dev/null differ
diff --git a/docker/api/munge.sh b/docker/api/munge.sh
deleted file mode 100755 (executable)
index ef10d01..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-rm -rf /var/run/munge
-exec /etc/init.d/munge start
diff --git a/docker/api/omniauth.rb.in b/docker/api/omniauth.rb.in
deleted file mode 100644 (file)
index 8daa300..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-# Change this omniauth configuration to point to your registered provider
-# Since this is a registered application, add the app id and secret here
-APP_ID = '@@SSO_CLIENT_APP_ID@@'
-APP_SECRET = '@@SSO_CLIENT_SECRET@@'
-
-# Update your custom Omniauth provider URL here
-if '@@OMNIAUTH_URL@@' != ''
-  CUSTOM_PROVIDER_URL = '@@OMNIAUTH_URL@@'
-else
-  CUSTOM_PROVIDER_URL = 'https://@@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@'
-end
-
-# This is a development sandbox, we use self-signed certificates
-OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE
-
-Rails.application.config.middleware.use OmniAuth::Builder do
-  provider :josh_id, APP_ID, APP_SECRET, CUSTOM_PROVIDER_URL
-end
diff --git a/docker/api/passenger.sh b/docker/api/passenger.sh
deleted file mode 100755 (executable)
index a62d9d5..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-cd /usr/src/arvados/services/api
-export ARVADOS_WEBSOCKETS=1
-export RAILS_ENV=production
-/usr/local/rvm/bin/rvm-exec default bundle exec rake db:migrate
-exec /usr/local/rvm/bin/rvm-exec default bundle exec passenger start -p443 --ssl --ssl-certificate=/etc/ssl/certs/ssl-cert-snakeoil.pem --ssl-certificate-key=/etc/ssl/private/ssl-cert-snakeoil.key
diff --git a/docker/api/setup-gitolite.sh.in b/docker/api/setup-gitolite.sh.in
deleted file mode 100755 (executable)
index 023ca5d..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-ssh-keygen -q -N '' -t rsa -f /root/.ssh/id_rsa
-
-useradd git
-mkdir /home/git
-
-# Set up gitolite repository
-cp ~root/.ssh/id_rsa.pub ~git/root-authorized_keys.pub
-chown git:git /home/git -R
-su - git -c "mkdir -p ~/bin"
-
-su - git -c "git clone git://github.com/sitaramc/gitolite"
-su - git -c "gitolite/install -ln ~/bin"
-su - git -c "PATH=/home/git/bin:$PATH gitolite setup -pk ~git/root-authorized_keys.pub"
-install -o git -g git -m 600 /usr/local/arvados/config/.gitolite.rc /home/git/
-
-# And make sure that the existing repos are equally readable, or the API server commit model will freak out...
-chmod 755 /home/git/repositories
-chmod +rx /home/git/repositories/*git -R
-
-# Now set up the gitolite repo(s) we use
-mkdir -p /usr/local/arvados/gitolite-tmp/
-# Make ssh store the host key
-ssh -o "StrictHostKeyChecking no" git@api.@@ARVADOS_DOMAIN@@ info
-# Now check out the tree
-git clone git@api.@@ARVADOS_DOMAIN@@:gitolite-admin.git /usr/local/arvados/gitolite-tmp/gitolite-admin/
-cd /usr/local/arvados/gitolite-tmp/gitolite-admin
-mkdir keydir/arvados
-mkdir conf/admin
-mkdir conf/auto
-echo "
-
-@arvados_git_user = arvados_git_user
-
-repo @all
-     RW+                 = @arvados_git_user
-
-" > conf/admin/arvados.conf
-echo '
-include "auto/*.conf"
-include "admin/*.conf"
-' >> conf/gitolite.conf
-
-#su - git -c "ssh-keygen -t rsa"
-cp /root/.ssh/id_rsa.pub keydir/arvados/arvados_git_user.pub
-# Replace the 'root' key with the user key, just in case
-cp /root/.ssh/authorized_keys keydir/root-authorized_keys.pub
-# But also make sure we have the root key installed so it can access all keys
-git add keydir/root-authorized_keys.pub
-git add keydir/arvados/arvados_git_user.pub
-git add conf/admin/arvados.conf
-git add keydir/arvados/
-git add conf/gitolite.conf
-git commit -a -m 'git server setup'
-git push
-
-# Prepopulate the arvados.git repo with our source. Silly, but until we can check out from remote trees,
-# we need this to make the tutorials work.
-su - git -c "git clone --bare git://github.com/curoverse/arvados.git /home/git/repositories/arvados.git"
-
-echo "ARVADOS_API_HOST_INSECURE=yes" > /etc/cron.d/gitolite-update
-echo "*/2 * * * * root /bin/bash -c 'source /etc/profile.d/rvm.sh && /usr/src/arvados/services/api/script/arvados-git-sync.rb production'" >> /etc/cron.d/gitolite-update
-
-# Create/update the repos now
-. /etc/profile.d/rvm.sh
-export ARVADOS_API_HOST=api
-export ARVADOS_API_HOST_INSECURE=yes
-export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
-/usr/local/arvados/update-gitolite.rb production
-
-echo "PATH=/usr/bin:/bin:/sbin" > /etc/cron.d/arvados-repo-update
-echo "*/5 * * * * git cd ~git/repositories/arvados.git; git fetch https://github.com/curoverse/arvados.git master:master" >> /etc/cron.d/arvados-repo-update
diff --git a/docker/api/setup.sh.in b/docker/api/setup.sh.in
deleted file mode 100755 (executable)
index 2c7da92..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-set -x
-
-if test -f /root/finished_arvados_setup ; then
-   exit
-fi
-
-. /etc/profile.d/rvm.sh
-
-export ARVADOS_API_HOST=api
-export ARVADOS_API_HOST_INSECURE=yes
-export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
-export HOME=/root
-
-# Wait for API server to come up.
-while ! arv user current ; do sleep 1 ; done
-
-# Arvados repository object
-all_users_group_uuid="@@API_HOSTNAME@@-j7d0g-fffffffffffffff"
-
-arv user update --uuid @@API_HOSTNAME@@-tpzed-000000000000000 --user '{"username":"root"}'
-repo_uuid=`arv --format=uuid repository create --repository '{"owner_uuid":"@@API_HOSTNAME@@-tpzed-000000000000000", "name":"arvados"}'`
-echo "Arvados repository uuid is $repo_uuid"
-
-read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
-{
- "tail_uuid":"$all_users_group_uuid",
- "head_uuid":"$repo_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF
-
-# Make sure the necessary keep_service objects exist
-arv keep_service list > /tmp/keep_service.list
-
-grep -q keep_server_0 /tmp/keep_service.list
-if [[ "$?" != "0" ]]; then
-  arv keep_service create --keep-service "$(cat /root/keep_server_0.json)"
-fi
-
-grep -q keep_server_1 /tmp/keep_service.list
-if [[ "$?" != "0" ]]; then
-  arv keep_service create --keep-service "$(cat /root/keep_server_1.json)"
-fi
-
-grep -q keep_proxy /tmp/keep_service.list
-if [[ "$?" != "0" ]]; then
-  arv keep_service create --keep-service "$(cat /root/keep_proxy.json)"
-fi
-
-# User repository object
-# user_uuid=`arv --format=uuid user current`
-# repo_uuid=`arv --format=uuid repository create --repository '{"name":"@@ARVADOS_USER_NAME@@","fetch_url":"git@api.dev.arvados:@@ARVADOS_USER_NAME@@.git","push_url":"git@api.dev.arvados:@@ARVADOS_USER_NAME@@.git"}'`
-
-# echo "User repository uuid is $repo_uuid"
-
-# read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
-# {
-#  "tail_uuid":"$user_uuid",
-#  "head_uuid":"$repo_uuid",
-#  "link_class":"permission",
-#  "name":"can_write"
-# }
-# EOF
-
-# # Shell machine object
-shell_uuid=`arv --format=uuid virtual_machine create --virtual-machine '{"hostname":"shell"}'`
-arv virtual_machine create --virtual-machine '{"hostname":"shell.dev", "uuid": "@@API_HOSTNAME@@-2x53u-csbtkecoa669vkz"}'
-
-# read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
-# {
-#  "tail_uuid":"$user_uuid",
-#  "head_uuid":"$shell_uuid",
-#  "link_class":"permission",
-#  "name":"can_login",
-#  "properties": {"username": "@@ARVADOS_USER_NAME@@"}
-# }
-# EOF
-
-touch /root/finished_arvados_setup
diff --git a/docker/api/slurm.conf.in b/docker/api/slurm.conf.in
deleted file mode 100644 (file)
index 7312a0e..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-
-ControlMachine=api
-#SlurmUser=slurmd
-SlurmctldPort=6817
-SlurmdPort=6818
-AuthType=auth/munge
-#JobCredentialPrivateKey=/etc/slurm-llnl/slurm-key.pem
-#JobCredentialPublicCertificate=/etc/slurm-llnl/slurm-cert.pem
-StateSaveLocation=/tmp
-SlurmdSpoolDir=/tmp/slurmd
-SwitchType=switch/none
-MpiDefault=none
-SlurmctldPidFile=/var/run/slurmctld.pid
-SlurmdPidFile=/var/run/slurmd.pid
-ProctrackType=proctrack/pgid
-CacheGroups=0
-ReturnToService=2
-TaskPlugin=task/affinity
-#
-# TIMERS
-SlurmctldTimeout=300
-SlurmdTimeout=300
-InactiveLimit=0
-MinJobAge=300
-KillWait=30
-Waittime=0
-#
-# SCHEDULING
-SchedulerType=sched/backfill
-#SchedulerType=sched/builtin
-SchedulerPort=7321
-#SchedulerRootFilter=
-#SelectType=select/linear
-SelectType=select/cons_res
-SelectTypeParameters=CR_CPU_Memory
-FastSchedule=1
-#
-# LOGGING
-SlurmctldDebug=3
-#SlurmctldLogFile=
-SlurmdDebug=3
-#SlurmdLogFile=
-JobCompType=jobcomp/none
-#JobCompLoc=
-JobAcctGatherType=jobacct_gather/none
-#JobAcctLogfile=
-#JobAcctFrequency=
-#
-# COMPUTE NODES
-NodeName=DEFAULT
-# CPUs=8 State=UNKNOWN RealMemory=6967 Weight=6967
-PartitionName=DEFAULT MaxTime=INFINITE State=UP
-PartitionName=compute Default=YES Shared=yes
-#PartitionName=sysadmin Hidden=YES Shared=yes
-
-NodeName=compute[0-1]
-#NodeName=compute0 RealMemory=6967 Weight=6967
-
-PartitionName=compute Nodes=compute[0-1]
-PartitionName=crypto Nodes=compute[0-1]
diff --git a/docker/api/superuser_token.in b/docker/api/superuser_token.in
deleted file mode 100644 (file)
index 49bb34e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-@@API_SUPERUSER_SECRET@@
diff --git a/docker/api/supervisor.conf b/docker/api/supervisor.conf
deleted file mode 100644 (file)
index b24e552..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-[program:ssh]
-user=root
-command=/etc/init.d/ssh start
-startsecs=0
-
-[program:postgres]
-user=postgres
-command=/usr/lib/postgresql/9.1/bin/postgres -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf
-autorestart=true
-
-[program:passenger]
-command=/usr/local/bin/passenger.sh
-autorestart=true
-
-[program:munge]
-user=root
-command=/usr/local/bin/munge.sh
-startsecs=0
-
-[program:slurm]
-user=root
-command=/etc/init.d/slurm-llnl start
-startsecs=0
-
-[program:cron]
-user=root
-command=/etc/init.d/cron start
-startsecs=0
-
-[program:setup]
-user=root
-command=/usr/local/bin/setup.sh
-startsecs=0
-
-[program:setup-gitolite]
-user=root
-command=/usr/local/bin/setup-gitolite.sh
-startsecs=0
-
-[program:crunch-dispatch]
-user=root
-command=/usr/local/bin/crunch-dispatch-run.sh
-autorestart=true
diff --git a/docker/arv-web/Dockerfile b/docker/arv-web/Dockerfile
deleted file mode 100644 (file)
index 11a9c17..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM arvados/passenger
-MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
-
-ADD apache2_foreground.sh /etc/apache2/foreground.sh
-
-ADD apache2_vhost /etc/apache2/sites-available/arv-web
-RUN \
-  mkdir /var/run/apache2 && \
-  a2dissite default && \
-  a2ensite arv-web && \
-  a2enmod rewrite
-
-EXPOSE 80
-
-CMD ["/etc/apache2/foreground.sh"]
\ No newline at end of file
diff --git a/docker/arv-web/apache2_foreground.sh b/docker/arv-web/apache2_foreground.sh
deleted file mode 100755 (executable)
index 76766a6..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#! /bin/bash
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -HUP -$pgrp" HUP
-trap "kill -TERM -$pgrp; exit" EXIT TERM QUIT
-
-source /etc/apache2/envvars
-/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/arv-web/apache2_vhost b/docker/arv-web/apache2_vhost
deleted file mode 100644 (file)
index 5268201..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-<VirtualHost *:80>
-  # Index file and Document Root (where the public files are located)
-  DirectoryIndex index.html
-  DocumentRoot /mnt/public
-  RackBaseURI /
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/error.log
-  CustomLog ${APACHE_LOG_DIR}/access.log combined
-
-  <Directory /mnt/public>
-    Options Indexes IncludesNoExec
-    Options -MultiViews
-    AllowOverride All
-    Order allow,deny
-    Allow from all
-  </Directory>
-
-</VirtualHost>
diff --git a/docker/arvdock b/docker/arvdock
deleted file mode 100755 (executable)
index 43a384e..0000000
+++ /dev/null
@@ -1,559 +0,0 @@
-#!/bin/bash
-
-DOCKER=`which docker.io`
-
-if [[ "$DOCKER" == "" ]]; then
-    DOCKER=`which docker`
-fi
-
-CURL=`which curl`
-
-COMPUTE_COUNTER=0
-
-ARVADOS_DOMAIN=dev.arvados
-
-function usage {
-    echo >&2
-    echo >&2 "usage: $0 (start|stop|restart|reset|test) [options]"
-    echo >&2
-    echo >&2 "start    run new or restart stopped arvados containers"
-    echo >&2 "stop     stop arvados containers"
-    echo >&2 "restart  stop and then start arvados containers"
-    echo >&2 "reset    stop and delete containers WARNING: this will delete the data inside Arvados!"
-    echo >&2 "test     run tests"
-    echo >&2
-    echo >&2 "$0 options:"
-    echo >&2 "  -b[bridge], --bridge[=bridge] Docker bridge (default bridge docker0)"
-    echo >&2 "  -d[port], --doc[=port]        Documentation server (default port 9898)"
-    echo >&2 "  -w[port], --workbench[=port]  Workbench server (default port 9899)"
-    echo >&2 "  -s[port], --sso[=port]        SSO server (default port 9901)"
-    echo >&2 "  -a[port], --api[=port]        API server (default port 9900)"
-    echo >&2 "  -c, --compute                 Compute nodes (starts 2)"
-    echo >&2 "  -v, --vm                      Shell server"
-    echo >&2 "  -n, --nameserver              Nameserver"
-    echo >&2 "  -k, --keep                    Keep servers"
-    echo >&2 "  -p, --keepproxy               Keepproxy server"
-    echo >&2 "  -h, --help                    Display this help and exit"
-    echo >&2 "      --domain=dns.domain       DNS domain used by containers (default dev.arvados)"
-    echo >&2
-    echo >&2 "  If no options are given, the action is applied to all servers."
-    echo >&2
-    echo >&2 "$0 test [testname] [testname] ..."
-    echo >&2 "  By default, all tests are run."
-}
-
-function ip_address {
-    local container=$1
-    echo `$DOCKER inspect $container  |grep IPAddress |cut -f4 -d\"`
-}
-
-function bridge_ip_address {
-    local bridge_name=$1
-    # FIXME: add a more robust check here.
-    # because ip command could be mising, multiple docker bridges could be there.. etc.
-    echo $(ip --oneline --family inet addr show dev "$bridge_name" | awk '{ print $4 }'| cut -d/ -f1 )
-}
-
-function start_container {
-    bridge_ip=$(bridge_ip_address "$bridge")
-
-    local args="-d -i -t"
-    if [[ "$1" != '' ]]; then
-      local port="$1"
-      args="$args -p $port"
-    fi
-    if [[ "$2" != '' ]]; then
-      local name="$2"
-      if [[ "$name" == "api_server" ]]; then
-        args="$args --dns=$bridge_ip --dns-search=compute.$ARVADOS_DOMAIN --hostname api -P --name $name"
-      elif [[ "$name" == "compute" ]]; then
-        name=$name$COMPUTE_COUNTER
-        # We need --privileged because we run docker-inside-docker on the compute nodes
-        args="$args --dns=$bridge_ip --dns-search=compute.$ARVADOS_DOMAIN --hostname compute$COMPUTE_COUNTER -P --privileged --name $name"
-        let COMPUTE_COUNTER=$(($COMPUTE_COUNTER + 1))
-      else
-        args="$args --dns=$bridge_ip --dns-search=$ARVADOS_DOMAIN --hostname ${name#_server} --name $name"
-      fi
-    fi
-    if [[ "$3" != '' ]]; then
-      local volume="$3"
-      args="$args --volumes-from $volume"
-    fi
-    if [[ "$4" != '' ]]; then
-      local link="$4"
-      args="$args --link $link"
-    fi
-    local image=$5
-
-    `$DOCKER ps |grep -E "\b$name\b" -q`
-    if [[ "$?" == "0" ]]; then
-      echo "You have a running container with name $name -- skipping."
-      return
-    fi
-
-    echo "Starting container: $name"
-    `$DOCKER ps --all |grep -E "\b$name\b" -q`
-    if [[ "$?" == "0" ]]; then
-        echo "  $DOCKER start $name"
-        container=`$DOCKER start $name`
-    else
-        echo "  $DOCKER run $args $image"
-        container=`$DOCKER run $args $image`
-    fi
-
-    if [ "$?" != "0" -o "$container" = "" ]; then
-      echo "Unable to start container"
-      exit 1
-    else
-      echo "Started container: $container"
-    fi
-
-}
-
-# Create a Docker data volume
-function make_keep_volumes () {
-    `$DOCKER ps --all |grep -E "\bkeep_data\b" -q`
-    if [[ "$?" == "0" ]]; then
-      return
-    fi
-    docker create -v /keep-data --name keep_data arvados/keep
-}
-
-function do_start {
-    local start_doc=false
-    local start_sso=false
-    local start_api=false
-    local start_compute=false
-    local start_workbench=false
-    local start_vm=false
-    local start_nameserver=false
-    local start_keep=false
-    local start_keepproxy=false
-    local bridge="docker0"
-    local
-
-    # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-    local TEMP=`getopt -o d::s::b:a::cw::nkpvh \
-                  --long doc::,sso::,api::,bridge:,compute,workbench::,nameserver,keep,keepproxy,vm,help,domain:: \
-                  -n "$0" -- "$@"`
-
-    if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
-
-    # Note the quotes around `$TEMP': they are essential!
-    eval set -- "$TEMP"
-
-    while [ $# -ge 1 ]
-    do
-        case $1 in
-            -b | --bridge)
-                case "$2" in
-                    *)  start_bridge=$2; shift 2 ;;
-                esac
-                ;;
-            -d | --doc)
-                case "$2" in
-                    "") start_doc=9898; shift 2 ;;
-                    *)  start_doc=$2; shift 2 ;;
-                esac
-                ;;
-            -s | --sso)
-                case "$2" in
-                    "") start_sso=9901; shift 2 ;;
-                    *)  start_sso=$2; shift 2 ;;
-                esac
-                ;;
-            -a | --api)
-                case "$2" in
-                    "") start_api=9900; shift 2 ;;
-                    *)  start_api=$2; shift 2 ;;
-                esac
-                ;;
-            -c | --compute)
-                start_compute=2
-                shift
-                ;;
-            -w | --workbench)
-                case "$2" in
-                    "") start_workbench=9899; shift 2 ;;
-                    *)  start_workbench=$2; shift 2 ;;
-                esac
-                ;;
-            -v | --vm)
-                start_vm=true
-                shift
-                ;;
-            -n | --nameserver)
-                start_nameserver=true
-                shift
-                ;;
-            -k | --keep)
-                start_keep=true
-                shift
-                ;;
-            -p | --keepproxy)
-                start_keepproxy=true
-                shift
-                ;;
-            --domain)
-                case "$2" in
-                    *) ARVADOS_DOMAIN="$2"; shift 2 ;;
-                esac
-                ;;
-            --)
-                shift
-                break
-                ;;
-            *)
-                usage
-                exit 1
-                ;;
-        esac
-    done
-
-    # If no options were selected, then start all servers.
-    if [[ $start_doc == false &&
-          $start_sso == false &&
-          $start_api == false &&
-          $start_compute == false &&
-          $start_workbench == false &&
-          $start_vm == false &&
-          $start_nameserver == false &&
-          $start_keep == false &&
-          $start_keepproxy == false ]]
-    then
-        start_doc=9898
-        start_sso=9901
-        start_api=9900
-        start_compute=2
-        start_workbench=9899
-        #start_vm=true
-        start_nameserver=true
-        start_keep=true
-        start_keepproxy=true
-    fi
-
-    if [[ $start_nameserver != false ]]
-    then
-      $DOCKER ps | grep skydns >/dev/null
-      need_skydns="$?"
-
-      $DOCKER ps | grep skydock >/dev/null
-      need_skydock="$?"
-
-      if [[ "$need_skydns" != 0 || "$need_skydock" != 0 ]]
-      then
-          # skydns and skydock need to both be running before everything else.
-          # If they are not running we need to shut everything down and start
-          # over, otherwise DNS will be broken and the containers won't find each other.
-          do_stop
-          need_skydns=1
-          need_skydock=1
-      fi
-
-      # We rely on skydock and skydns for dns discovery between the slurm controller and compute nodes,
-      # so make sure they are running
-      $DOCKER ps | grep skydns >/dev/null
-      if [[ $need_skydns != "0" ]]; then
-        echo "Detecting bridge '$bridge' IP for crosbymichael/skydns"
-        bridge_ip=$(bridge_ip_address "$bridge")
-
-        echo "Starting crosbymichael/skydns container..."
-        $DOCKER rm "skydns" 2>/dev/null
-        echo $DOCKER run -d -p $bridge_ip:53:53/udp --name skydns crosbymichael/skydns -nameserver 8.8.8.8:53 -domain arvados
-        $DOCKER run -d -p $bridge_ip:53:53/udp --name skydns crosbymichael/skydns -nameserver 8.8.8.8:53 -domain arvados
-      fi
-      $DOCKER ps | grep skydock >/dev/null
-      if [[ "$need_skydock" != "0" ]]; then
-        echo "Starting crosbymichael/skydock container..."
-        $DOCKER rm "skydock" 2>/dev/null
-        echo $DOCKER run -d -v /var/run/docker.sock:/docker.sock --name skydock crosbymichael/skydock -ttl 30 -environment dev -s /docker.sock -domain arvados -name skydns
-        $DOCKER run -d -v /var/run/docker.sock:/docker.sock --name skydock crosbymichael/skydock -ttl 30 -environment dev -s /docker.sock -domain arvados -name skydns
-      fi
-    fi
-
-    if [[ $start_sso != false ]]
-    then
-        start_container "$start_sso:443" "sso_server" '' '' "arvados/sso"
-    fi
-
-    if [[ $start_api != false ]]
-    then
-      if [[ $start_sso != false ]]; then
-        start_container "$start_api:443" "api_server" '' "sso_server:sso" "arvados/api"
-      else
-        start_container "$start_api:443" "api_server" '' '' "arvados/api"
-      fi
-    fi
-
-    if [[ $start_compute != false ]]
-    then
-        for i in `seq 0 $(($start_compute - 1))`; do
-          start_container "" "compute" '' "api_server:api" "arvados/compute"
-        done
-    fi
-
-    if [[ $start_keep != false ]]
-    then
-        # create `keep_volumes' array with a list of keep mount points
-        # remove any stale metadata from those volumes before starting them
-        make_keep_volumes
-        start_container "25107:25107" "keep_server_0" \
-            "keep_data" \
-            "api_server:api" \
-            "arvados/keep"
-        start_container "25108:25107" "keep_server_1" \
-            "keep_data" \
-            "api_server:api" \
-            "arvados/keep"
-    fi
-
-    if [[ $start_keepproxy != false ]]
-    then
-        start_container "9902:9100" "keepproxy_server" '' \
-            "api_server:api" \
-            "arvados/keepproxy"
-    fi
-
-    if [[ $start_doc != false ]]
-    then
-        start_container "$start_doc:80" "doc_server" '' '' "arvados/doc"
-    fi
-
-    if [[ $start_vm != false ]]
-    then
-        start_container "" "shell" '' "api_server:api" "arvados/shell"
-    fi
-
-    if [[ $start_workbench != false ]]
-    then
-        start_container "" "workbench_server" '' "" "arvados/workbench"
-    fi
-
-    if [[ $start_api != false ]]
-    then
-        if [[ -f "api/generated/superuser_token" ]]
-        then
-          if [ -d $HOME/.config/arvados ] || mkdir -p $HOME/.config/arvados
-          then
-            cat >$HOME/.config/arvados/settings.conf <<EOF
-ARVADOS_API_HOST=$(ip_address "api_server")
-ARVADOS_API_HOST_INSECURE=yes
-ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
-EOF
-          fi
-        fi
-    fi
-
-    if [ "$(awk '($1 == "nameserver"){print $2; exit}' </etc/resolv.conf)" != "$bridge_ip" ]; then
-        echo
-        echo "******************************************************************"
-        echo "To access Arvados you must add the Arvados nameserver to the top"
-        echo "of your DNS configuration in /etc/resolv.conf:"
-        echo "nameserver $bridge_ip"
-        echo
-        echo "Then run '$0 start' again"
-        echo "******************************************************************"
-        echo
-    else
-        while ! $CURL -k -L -f http://workbench.$ARVADOS_DOMAIN >/dev/null 2>/dev/null ; do
-            echo "Waiting for Arvados to be ready."
-            sleep 1
-        done
-
-        `$DOCKER ps |grep -E "\bdoc_server\b" -q`
-        if [[ "$?" == "0" ]]; then
-            echo
-            echo "******************************************************************"
-            echo "You can access the Arvados documentation at http://doc.$ARVADOS_DOMAIN"
-            echo "******************************************************************"
-            echo
-        fi
-
-        `$DOCKER ps |grep -E "\bworkbench_server\b" -q`
-        if [[ "$?" == "0" ]]; then
-            echo
-            echo "********************************************************************"
-            echo "You can access the Arvados workbench at http://workbench.$ARVADOS_DOMAIN"
-            echo "********************************************************************"
-            echo
-        fi
-    fi
-
-}
-
-function do_stop {
-    local stop_doc=""
-    local stop_sso=""
-    local stop_api=""
-    local stop_compute=""
-    local stop_workbench=""
-    local stop_nameserver=""
-    local stop_vm=""
-    local stop_keep=""
-    local stop_keepproxy=""
-
-    # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-    local TEMP=`getopt -o dsacwnkpvh \
-                  --long doc,sso,api,compute,workbench,nameserver,keep,keepproxy,vm,help,domain:: \
-                  -n "$0" -- "$@"`
-
-    if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
-
-    # Note the quotes around `$TEMP': they are essential!
-    eval set -- "$TEMP"
-
-    while [ $# -ge 1 ]
-    do
-        case $1 in
-            -d | --doc)
-                stop_doc=doc_server ; shift ;;
-            -s | --sso)
-                stop_sso=sso_server ; shift ;;
-            -a | --api)
-                stop_api=api_server ; shift ;;
-            -c | --compute)
-                stop_compute=`$DOCKER ps |grep -E "\bcompute[0-9]+\b" |grep -v api_server |cut -f1 -d ' '` ; shift ;;
-            -w | --workbench)
-                stop_workbench=workbench_server ; shift ;;
-            -n | --nameserver )
-                stop_nameserver="skydock skydns" ; shift ;;
-            -v | --vm )
-                stop_vm="shell" ; shift ;;
-            -k | --keep )
-                stop_keep="keep_server_0 keep_server_1" ; shift ;;
-            -p | --keepproxy )
-                stop_keep="keepproxy_server" ; shift ;;
-            --domain)
-                case "$2" in
-                    *) ARVADOS_DOMAIN="$2"; shift 2 ;;
-                esac
-                ;;
-            --)
-                shift
-                break
-                ;;
-            *)
-                usage
-                exit 1
-                ;;
-        esac
-    done
-
-    # If no options were selected, then stop all servers.
-    if [[ $stop_doc == "" &&
-          $stop_sso == "" &&
-          $stop_api == "" &&
-          $stop_compute == "" &&
-          $stop_workbench == "" &&
-          $stop_vm == "" &&
-          $stop_nameserver == "" &&
-          $stop_keep == "" &&
-          $stop_keepproxy == "" ]]
-    then
-        stop_doc=doc_server
-        stop_sso=sso_server
-        stop_api=api_server
-        stop_compute=`$DOCKER ps |grep -E "\bcompute[0-9]+\b" |grep -v api_server |cut -f1 -d ' '`
-        stop_workbench=workbench_server
-        stop_vm=shell
-        stop_nameserver="skydock skydns"
-        stop_keep="keep_server_0 keep_server_1"
-        stop_keepproxy="keepproxy_server"
-    fi
-
-    $DOCKER stop $stop_doc $stop_sso $stop_api $stop_compute $stop_workbench $stop_nameserver $stop_keep $stop_keepproxy $stop_vm \
-        2>/dev/null
-}
-
-function do_test {
-    local alltests
-    if [ $# -lt 1 ]
-    then
-        alltests="python-sdk api"
-    else
-        alltests="$@"
-    fi
-
-    for testname in $alltests
-    do
-        echo "testing $testname..."
-        case $testname in
-            python-sdk)
-                do_start --api --keep --sso
-                export ARVADOS_API_HOST=$(ip_address "api_server")
-                export ARVADOS_API_HOST_INSECURE=yes
-                export ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
-                python -m unittest discover ../sdk/python
-                ;;
-            api)
-                $DOCKER run -t -i arvados/api \
-                    /usr/src/arvados/services/api/script/rake_test.sh
-                ;;
-            *)
-                echo >&2 "unknown test $testname"
-                ;;
-        esac
-    done
-}
-
-function do_reset {
-    for name in skydock skydns workbench_server shell doc_server keepproxy_server keep_server_0 keep_server_1 compute0 compute1 api_server keepproxy keep_data sso_server
-    do
-        `$DOCKER ps |grep -E "\b$name\b" -q`
-        if [[ "$?" == "0" ]]; then
-            echo "  $DOCKER stop $name"
-            $DOCKER stop $name
-        fi
-        `$DOCKER ps --all |grep -E "\b$name\b" -q`
-        if [[ "$?" == "0" ]]; then
-            echo "  $DOCKER rm $name"
-            $DOCKER rm $name
-        fi
-    done
-}
-
-if [ "$DOCKER" == '' ]
-then
-  echo "Docker not found. Please install it first."
-  exit 2
-fi
-
-if [ "$CURL" == '' ]
-then
-  echo "Curl not found. Please install it first."
-  exit 3
-fi
-
-if [ $# -lt 1 ]
-then
-  usage
-  exit 1
-fi
-
-case $1 in
-    start)
-        shift
-        do_start $@
-        ;;
-    stop)
-        shift
-        do_stop $@
-        ;;
-    restart)
-        shift
-        do_stop $@
-        do_start $@
-        ;;
-    test)
-        shift
-        do_test $@
-        ;;
-    reset)
-        shift
-        do_reset $@
-        ;;
-    *)
-        usage
-        exit 1
-        ;;
-esac
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
deleted file mode 100644 (file)
index 5eeabc8..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Arvados base image (wheezy+rvm+Arvados source) in Docker
-
-# Based on Debian Wheezy
-FROM arvados/debian:wheezy
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Install prerequisite packages for Arvados
-#   * git, curl, rvm
-#   * Arvados source code in /usr/src/arvados, for preseeding gem installation
-
-ADD apt.arvados.org.list /etc/apt/sources.list.d/
-RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
-RUN apt-get update -q
-
-## 2015-06-29 nico
-## KNOWN BUG:  python-oauth2client needs specific versions
-## python-pyasn1=0.1.7 python-pyasn1-modules=0.0.5
-## but apt-get doesn't resolv them correctly. we have to
-## do it by hand here (or add apt_preferences if it gets too hairy)
-RUN apt-get install -qy apt-utils git curl \
-             libcurl3 libcurl3-gnutls libcurl4-openssl-dev locales \
-             postgresql-server-dev-9.1 python-arvados-python-client \
-             python-google-api-python-client python-oauth2client python-pyasn1=0.1.7 python-pyasn1-modules=0.0.5
-
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
-    /bin/sed -ri 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
-    /usr/sbin/locale-gen && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.1 && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
-    /bin/mkdir -p /usr/src/arvados
-
-ADD generated/arvados.tar.gz /usr/src/arvados/
-
-# Update gem. This (hopefully) fixes
-# https://github.com/rubygems/rubygems.org/issues/613.
-RUN /usr/local/rvm/bin/rvm-exec default gem update --system && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler  -v 1.9.9 && \
-    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
-    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
-    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile
diff --git a/docker/base/apt.arvados.org.list b/docker/base/apt.arvados.org.list
deleted file mode 100644 (file)
index 7eb8716..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# apt.arvados.org
-deb http://apt.arvados.org/ wheezy main
diff --git a/docker/bcbio-nextgen/Dockerfile b/docker/bcbio-nextgen/Dockerfile
deleted file mode 100644 (file)
index 8f6e774..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Install Arvados SDK into bcbio-nextgen Docker image.
-#
-# To build bcbio-nextgen:
-#
-# $ git clone https://github.com/chapmanb/bcbio-nextgen.git
-# $ cd bcbio-nextgen
-# $ docker build
-# $ docker tag <image> bcbio-nextgen
-#
-
-FROM bcbio-nextgen
-MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
-
-USER root
-
-# Install Ruby 2.1.0
-RUN apt-get remove --quiet --assume-yes ruby && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.1.0 && \
-    /bin/mkdir -p /usr/src/arvados
-
-ADD generated/arvados.tar.gz /usr/src/arvados/
-ENV GEM_HOME /usr/local/rvm/gems/ruby-2.1.0
-ENV GEM_PATH /usr/local/rvm/gems/ruby-2.1.0:/usr/local/rvm/gems/ruby-2.1.0@global
-ENV PATH /usr/local/rvm/gems/ruby-2.1.0/bin:/usr/local/rvm/gems/ruby-2.1.0@global/bin:/usr/local/rvm/rubies/ruby-2.1.0/bin:/usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-
-# Install dependencies and set up system.
-# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
-RUN /usr/bin/apt-get update && \
-    /usr/bin/apt-get install --quiet --assume-yes python-dev python-llfuse python-pip \
-      libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
-      fuse libattr1-dev libfuse-dev && \
-    /usr/sbin/adduser --disabled-password \
-      --gecos 'Crunch execution user' crunch && \
-    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
-    /bin/ln -s /usr/src/arvados /usr/local/src/arvados
-
-# Install Arvados packages.
-RUN gem update --system && \
-    find /usr/src/arvados/sdk -name '*.gem' -print0 | \
-      xargs -0rn 1 gem install && \
-    cd /usr/src/arvados/services/fuse && \
-    python setup.py install && \
-    cd /usr/src/arvados/sdk/python && \
-    python setup.py install
-
-USER crunch
diff --git a/docker/build.sh b/docker/build.sh
deleted file mode 100755 (executable)
index 77aeb1f..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-#! /bin/bash
-
-# make sure a Ruby version greater than or equal to 1.9.3 is installed before proceeding
-if ! ruby -e 'exit RUBY_VERSION >= "1.9.3"' 2>/dev/null
-then
-    echo "Building the Arvados docker containers requires at least Ruby 1.9.3."
-    echo "Please install ruby 1.9.3 or higher before executing this script."
-    exit 1
-fi
-
-function usage {
-    echo >&2
-    echo >&2 "usage: $0 [options]"
-    echo >&2
-    echo >&2 "Calling $0 without arguments will build all Arvados docker images"
-    echo >&2
-    echo >&2 "$0 options:"
-    echo >&2 "  -h, --help   Print this help text"
-    echo >&2 "  clean        Clear all build information"
-    echo >&2 "  realclean    clean and remove all Arvados Docker images except arvados/debian"
-    echo >&2 "  deepclean    realclean and remove arvados/debian, crosbymichael/skydns and "
-    echo >&2 "               crosbymichael/skydns Docker images"
-    echo >&2
-}
-
-if [ "$1" = '-h' ] || [ "$1" = '--help' ]; then
-  usage
-  exit 1
-fi
-
-build_tools/build.rb
-
-if [[ "$?" == "0" ]]; then
-    DOCKER=`which docker.io`
-
-    if [[ "$DOCKER" == "" ]]; then
-      DOCKER=`which docker`
-    fi
-
-    DOCKER=$DOCKER /usr/bin/make -f build_tools/Makefile $*
-fi
diff --git a/docker/build_tools/Makefile b/docker/build_tools/Makefile
deleted file mode 100644 (file)
index 8a757d0..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-# This is the 'shell hack'. Call make with DUMP=1 to see the effect.
-ifdef DUMP
-OLD_SHELL := $(SHELL)
-SHELL = $(warning [$@])$(OLD_SHELL) -x
-endif
-
-all: skydns-image skydock-image api-image compute-image doc-image workbench-image keep-image keep-proxy-image sso-image shell-image
-
-IMAGE_FILES := $(shell ls *-image 2>/dev/null |grep -v -E 'debian-arvados-image|skydns-image|skydock-image')
-GENERATED_DIRS := $(shell ls */generated 2>/dev/null)
-
-# `make clean' removes the files generated in the build directory
-# but does not remove any docker images generated in previous builds
-clean:
-       @echo "make clean"
-       -@rm -rf build
-       +@[ "$(IMAGE_FILES)" = "" ] || rm -f $(IMAGE_FILES) 2>/dev/null
-       +@[ "$(GENERATED_DIRS)" = "" ] || rm -rf */generated 2>/dev/null
-
-DEBIAN_IMAGE := $(shell $(DOCKER) images -q arvados/debian |head -n1)
-
-REALCLEAN_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e arvados -e api_server -e keep_server -e keep_proxy_server -e doc_server -e workbench_server |cut -f 1 -d' ')
-# Generate a list of docker images tagged as arvados/*
-# but exclude those tagged as arvados/build
-ADI_TEMPFILE := $(shell mktemp)
-ARVADOS_DOCKER_IMAGES := $(shell $(DOCKER) images -q arvados/* |sort > $(ADI_TEMPFILE))
-ABDI_TEMPFILE := $(shell mktemp)
-ARVADOS_BUILD_DOCKER_IMAGES := $(shell $(DOCKER) images -q arvados/build |sort > $(ABDI_TEMPFILE))
-REALCLEAN_IMAGES := $(shell comm -3 $(ADI_TEMPFILE) $(ABDI_TEMPFILE) |grep -v $(DEBIAN_IMAGE) 2>/dev/null)
-DEEPCLEAN_IMAGES := $(shell comm -3 $(ADI_TEMPFILE) $(ABDI_TEMPFILE))
-SKYDNS_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e crosbymichael/skydns -e crosbymichael/skydock |cut -f 1 -d' ')
-SKYDNS_IMAGES := $(shell $(DOCKER) images -q crosbymichael/skyd*)
-
-# `make realclean' will also remove the Arvados docker images (but not the
-# arvados/debian image) and force subsequent makes to build the entire chain
-# from the ground up
-realclean: clean
-       @echo "make realclean"
-       +@[ "`$(DOCKER) ps -q`" = '' ] || $(DOCKER) stop `$(DOCKER) ps -q`
-       +@[ "$(REALCLEAN_CONTAINERS)" = '' ] || $(DOCKER) rm $(REALCLEAN_CONTAINERS)
-       +@[ "$(REALCLEAN_IMAGES)" = '' ] || $(DOCKER) rmi $(REALCLEAN_IMAGES)
-
-# `make deepclean' will remove all Arvados docker images and the skydns/skydock
-# images and force subsequent makes to build the entire chain from the ground up
-deepclean: clean
-       @echo "make deepclean"
-       -@rm -f debian-arvados-image 2>/dev/null
-       -@rm -f skydns-image skydock-image 2>/dev/null
-       +@[ "`$(DOCKER) ps -q`" = '' ] || $(DOCKER) stop `$(DOCKER) ps -q`
-       +@[ "$(REALCLEAN_CONTAINERS)" = '' ] || $(DOCKER) rm $(REALCLEAN_CONTAINERS)
-       +@[ "$(DEEPCLEAN_IMAGES)" = '' ] || $(DOCKER) rmi $(DEEPCLEAN_IMAGES)
-       +@[ "$(SKYDNS_CONTAINERS)" = '' ] || $(DOCKER) rm $(SKYDNS_CONTAINERS)
-       +@[ "$(SKYDNS_IMAGES)" = '' ] || $(DOCKER) rmi $(SKYDNS_IMAGES)
-
-# ============================================================
-# Dependencies for */generated files which are prerequisites
-# for building docker images.
-
-CONFIG_RB = build_tools/config.rb
-
-BUILD = build/.buildstamp
-
-BASE_DEPS = base/Dockerfile config.yml $(BASE_GENERATED)
-
-SLURM_DEPS = slurm/Dockerfile config.yml $(SLURM_GENERATED)
-
-JOBS_DEPS = jobs/Dockerfile
-
-ARV_WEB_DEPS = arv-web/Dockerfile arv-web/apache2_foreground.sh arv-web/apache2_vhost
-
-JAVA_BWA_SAMTOOLS_DEPS = java-bwa-samtools/Dockerfile
-
-API_DEPS = api/* config.yml $(API_GENERATED)
-
-SHELL_DEPS = shell/* config.yml $(SHELL_GENERATED)
-
-COMPUTE_DEPS = compute/* config.yml $(COMPUTE_GENERATED)
-
-DOC_DEPS = doc/Dockerfile $(DOC_GENERATED)
-
-WORKBENCH_DEPS = workbench/Dockerfile \
-                 config.yml \
-                 $(WORKBENCH_GENERATED)
-
-KEEP_DEPS = keep/Dockerfile config.yml $(KEEP_GENERATED)
-
-KEEP_PROXY_DEPS = keepproxy/Dockerfile config.yml $(KEEP_PROXY_GENERATED)
-
-SSO_DEPS = config.yml $(SSO_GENERATED)
-
-BCBIO_NEXTGEN_DEPS = bcbio-nextgen/Dockerfile
-
-BASE_GENERATED = base/generated/arvados.tar.gz
-
-COMPUTE_GENERATED_IN   = compute/*.in
-COMPUTE_GENERATED      = compute/generated/*
-
-KEEP_GENERATED_IN      = keep/*.in
-KEEP_GENERATED         = keep/generated/*
-
-KEEP_PROXY_GENERATED_IN      = keepproxy/*.in
-KEEP_PROXY_GENERATED         = keepproxy/generated/*
-
-API_GENERATED_IN       = api/*.in
-API_GENERATED          = api/generated/*
-
-SHELL_GENERATED_IN     = shell/*.in
-SHELL_GENERATED        = shell/generated/*
-
-SLURM_GENERATED_IN     = slurm/*.in
-SLURM_GENERATED        = slurm/generated/*
-
-WORKBENCH_GENERATED_IN = workbench/*.in
-WORKBENCH_GENERATED    = workbench/generated/*
-
-SSO_GENERATED_IN       = sso/*.in
-SSO_GENERATED          = sso/generated/*
-
-DOC_GENERATED_IN       = doc/*.in
-DOC_GENERATED          = doc/generated/*
-
-KEEP_DEPS += keep/generated/bin/keepproxy
-KEEP_DEPS += keep/generated/bin/keepstore
-keep/generated/bin/%: $(wildcard build/services/%/*.go)
-       mkdir -p keep/generated/src/git.curoverse.com
-       ln -sfn ../../../../.. keep/generated/src/git.curoverse.com/arvados.git
-       GOPATH=$(shell pwd)/keep/generated go get $(@:keep/generated/bin/%=git.curoverse.com/arvados.git/services/%)
-
-KEEP_PROXY_DEPS += keepproxy/generated/bin/keepproxy
-keepproxy/generated/bin/%: $(wildcard build/services/%/*.go)
-       mkdir -p keepproxy/generated/src/git.curoverse.com
-       ln -sfn ../../../../.. keepproxy/generated/src/git.curoverse.com/arvados.git
-       GOPATH=$(shell pwd)/keepproxy/generated go get $(@:keepproxy/generated/bin/%=git.curoverse.com/arvados.git/services/%)
-
-$(BUILD):
-       mkdir -p build
-       rsync -rlp --exclude=docker/ --exclude='**/log/*' --exclude='**/tmp/*' \
-               --chmod=Da+rx,Fa+rX ../ build/
-       find build/ -name \*.gem -delete
-       cd build/services/fuse/ && python setup.py build
-       cd build/sdk/python/ && python setup.py build
-       cd build/sdk/cli && gem build arvados-cli.gemspec
-       cd build/sdk/ruby && gem build arvados.gemspec
-       touch build/.buildstamp
-
-$(SLURM_GENERATED): $(BUILD)
-       $(CONFIG_RB) slurm
-       mkdir -p slurm/generated
-
-$(BASE_GENERATED): $(BUILD)
-       $(CONFIG_RB) base
-       mkdir -p base/generated
-       tar -czf base/generated/arvados.tar.gz -C build .
-
-$(API_GENERATED): $(API_GENERATED_IN)
-       $(CONFIG_RB) api
-
-$(SHELL_GENERATED): $(SHELL_GENERATED_IN)
-       $(CONFIG_RB) shell
-
-$(WORKBENCH_GENERATED): $(WORKBENCH_GENERATED_IN)
-       $(CONFIG_RB) workbench
-
-$(COMPUTE_GENERATED): $(COMPUTE_GENERATED_IN)
-       $(CONFIG_RB) compute
-
-$(SSO_GENERATED): $(SSO_GENERATED_IN)
-       $(CONFIG_RB) sso
-
-$(DOC_GENERATED): $(DOC_GENERATED_IN)
-       $(CONFIG_RB) doc
-
-$(KEEP_GENERATED): $(KEEP_GENERATED_IN)
-       $(CONFIG_RB) keep
-
-$(KEEP_PROXY_GENERATED): $(KEEP_PROXY_GENERATED_IN)
-       $(CONFIG_RB) keepproxy
-
-DOCKER_BUILD = $(DOCKER) build --rm=true
-
-# ============================================================
-# The main Arvados servers: api, doc, workbench, compute
-
-api-image: passenger-image $(BUILD) $(API_DEPS)
-       @echo "Building api-image"
-       mkdir -p api/generated
-       tar -czf api/generated/api.tar.gz -C build/services api
-       $(DOCKER_BUILD) -t arvados/api api
-       date >api-image
-
-shell-image: base-image $(BUILD) $(SHELL_DEPS)
-       @echo "Building shell-image"
-       mkdir -p shell/generated
-       $(DOCKER_BUILD) -t arvados/shell shell
-       date >shell-image
-
-compute-image: slurm-image $(BUILD) $(COMPUTE_DEPS)
-       @echo "Building compute-image"
-       $(DOCKER_BUILD) -t arvados/compute compute
-       date >compute-image
-
-doc-image: base-image $(BUILD) $(DOC_DEPS)
-       @echo "Building doc-image"
-       mkdir -p doc/generated
-       tar -czf doc/generated/doc.tar.gz -C build doc
-       $(DOCKER_BUILD) -t arvados/doc doc
-       date >doc-image
-
-keep-image: debian-arvados-image $(BUILD) $(KEEP_DEPS)
-       @echo "Building keep-image"
-       $(DOCKER_BUILD) -t arvados/keep keep
-       date >keep-image
-
-keep-proxy-image: debian-arvados-image $(BUILD) $(KEEP_PROXY_DEPS)
-       @echo "Building keep-proxy-image"
-       $(DOCKER_BUILD) -t arvados/keepproxy keepproxy
-       date >keep-proxy-image
-
-jobs-image: debian-arvados-image $(BUILD) $(JOBS_DEPS)
-       $(DOCKER_BUILD) --build-arg COMMIT=$(COMMIT) -t arvados/jobs jobs
-       date >jobs-image
-
-java-bwa-samtools-image: jobs-image $(BUILD) $(JAVA_BWA_SAMTOOLS_DEPS)
-       $(DOCKER_BUILD) -t arvados/jobs-java-bwa-samtools java-bwa-samtools
-       date >java-bwa-samtools-image
-
-bcbio-nextgen-image: $(BUILD) $(BASE_GENERATED) $(BCBIO_NEXTGEN_DEPS)
-       rm -rf bcbio-nextgen/generated
-       cp -r base/generated bcbio-nextgen
-       $(DOCKER_BUILD) -t arvados/bcbio-nextgen bcbio-nextgen
-       date >bcbio-nextgen-image
-
-workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
-       @echo "Building workbench-image"
-       mkdir -p workbench/generated
-       tar -czf workbench/generated/workbench.tar.gz -C build/apps workbench
-       $(DOCKER_BUILD) -t arvados/workbench workbench
-       date >workbench-image
-
-sso-image: passenger-image $(SSO_DEPS)
-       @echo "Building sso-image"
-       $(DOCKER_BUILD) -t arvados/sso sso
-       date >sso-image
-
-arv-web-image: passenger-image $(ARV_WEB_DEPS)
-       $(DOCKER_BUILD) -t arvados/arv-web arv-web
-       date >arv-web-image
-
-# ============================================================
-# The arvados/base image is the base Debian image plus packages
-# that are dependencies for every Arvados service.
-
-passenger-image: base-image
-       @echo "Building passenger-image"
-       $(DOCKER_BUILD) -t arvados/passenger passenger
-       date >passenger-image
-
-slurm-image: base-image $(SLURM_DEPS)
-       @echo "Building slurm-image"
-       $(DOCKER_BUILD) -t arvados/slurm slurm
-       date >slurm-image
-
-base-image: debian-arvados-image $(BASE_DEPS)
-       @echo "Building base-image"
-       $(DOCKER_BUILD) -t arvados/base base
-       date >base-image
-
-debian-arvados-image:
-       @echo "Building debian-arvados-image"
-       ./mkimage-debootstrap.sh arvados/debian wheezy http://ftp.us.debian.org/debian/
-       date >debian-arvados-image
-
-skydns-image:
-       @echo "Downloading skydns-image"
-       $(DOCKER) pull crosbymichael/skydns
-       date >skydns-image
-
-skydock-image:
-       @echo "Downloading skydock-image"
-       $(DOCKER) pull crosbymichael/skydock
-       date >skydock-image
diff --git a/docker/build_tools/build.rb b/docker/build_tools/build.rb
deleted file mode 100755 (executable)
index e3309a9..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-#! /usr/bin/env ruby
-
-require 'optparse'
-require 'tempfile'
-require 'yaml'
-
-def main options
-  if not ip_forwarding_enabled?
-    warn "NOTE: IP forwarding must be enabled in the kernel."
-    warn "Turning IP forwarding on now."
-    sudo %w(/sbin/sysctl net.ipv4.ip_forward=1)
-  end
-
-  # Check that:
-  #   * Docker is installed and can be found in the user's path
-  #   * Docker can be run as a non-root user
-  #      - TODO: put the user in the docker group if necessary
-  #      - TODO: mount cgroup automatically
-  #      - TODO: start the docker service if not started
-
-  docker_path = %x(which docker.io).chomp
-
-  if docker_path.empty?
-    docker_path = %x(which docker).chomp
-  end
-
-  if docker_path.empty?
-    warn "Docker not found."
-    warn ""
-    warn "Please make sure that Docker has been installed and"
-    warn "can be found in your PATH."
-    warn ""
-    warn "Installation instructions for a variety of platforms can be found at"
-    warn "http://docs.docker.io/en/latest/installation/"
-    exit 1
-  elsif not docker_ok? docker_path
-    warn "WARNING: docker could not be run."
-    warn "Please make sure that:"
-    warn "  * You have permission to read and write /var/run/docker.sock"
-    warn "  * a 'cgroup' volume is mounted on your machine"
-    warn "  * the docker daemon is running"
-    exit 2
-  end
-
-  # Check that debootstrap is installed.
-  if not debootstrap_ok?
-    warn "Installing debootstrap."
-    sudo '/usr/bin/apt-get', 'install', 'debootstrap'
-  end
-
-  # Generate a config.yml if it does not exist or is empty
-  if not File.size? 'config.yml'
-    print "Generating config.yml.\n"
-    # print "Arvados needs to know the email address of the administrative user,\n"
-    # print "so that when that user logs in they are automatically made an admin.\n"
-    # print "This should be an email address associated with a Google account.\n"
-    # print "\n"
-    # admin_email_address = ""
-    # until is_valid_email? admin_email_address
-    #   print "Enter your Google ID email address here: "
-    #   admin_email_address = gets.strip
-    #   if not is_valid_email? admin_email_address
-    #     print "That doesn't look like a valid email address. Please try again.\n"
-    #   end
-    # end
-
-    # print "Arvados needs to know the shell login name for the administrative user.\n"
-    # print "This will also be used as the name for your git repository.\n"
-    # print "\n"
-    # user_name = ""
-    # until is_valid_user_name? user_name
-    #   print "Enter a shell login name here: "
-    #   user_name = gets.strip
-    #   if not is_valid_user_name? user_name
-    #     print "That doesn't look like a valid shell login name. Please try again.\n"
-    #   end
-    # end
-
-    File.open 'config.yml', 'w' do |config_out|
-      config_out.write "# If a _PW or _SECRET variable is set to an empty string, a password\n"
-      config_out.write "# will be chosen randomly at build time. This is the\n"
-      config_out.write "# recommended setting.\n\n"
-      config = YAML.load_file 'config.yml.example'
-      #config['API_AUTO_ADMIN_USER'] = admin_email_address
-      #config['ARVADOS_USER_NAME'] = user_name
-      config['API_HOSTNAME'] = generate_api_hostname
-      config['API_WORKBENCH_ADDRESS'] = 'false'
-      config.each_key do |var|
-        config_out.write "#{var}: #{config[var]}\n"
-      end
-    end
-  end
-
-  # If all prerequisites are met, go ahead and build.
-  if ip_forwarding_enabled? and
-      docker_ok? docker_path and
-      debootstrap_ok? and
-      File.exists? 'config.yml'
-    exit 0
-  else
-    exit 6
-  end
-end
-
-# sudo
-#   Execute the arg list 'cmd' under sudo.
-#   cmd can be passed either as a series of arguments or as a
-#   single argument consisting of a list, e.g.:
-#     sudo 'apt-get', 'update'
-#     sudo(['/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'])
-#     sudo %w(/usr/bin/apt-get install lxc-docker)
-#
-def sudo(*cmd)
-  # user can pass a single list in as an argument
-  # to allow usage like: sudo %w(apt-get install foo)
-  warn "You may need to enter your password here."
-  if cmd.length == 1 and cmd[0].class == Array
-    cmd = cmd[0]
-  end
-  system '/usr/bin/sudo', *cmd
-end
-
-# is_valid_email?
-#   Returns true if its arg looks like a valid email address.
-#   This is a very very loose sanity check.
-#
-def is_valid_email? str
-  str.match /^\S+@\S+\.\S+$/
-end
-
-# is_valid_user_name?
-#   Returns true if its arg looks like a valid unix username.
-#   This is a very very loose sanity check.
-#
-def is_valid_user_name? str
-  # borrowed from Debian's adduser (version 3.110)
-  str.match /^[_.A-Za-z0-9][-\@_.A-Za-z0-9]*\$?$/
-end
-
-# generate_api_hostname
-#   Generates a 5-character randomly chosen API hostname.
-#
-def generate_api_hostname
-  rand(2**256).to_s(36)[0...5]
-end
-
-# ip_forwarding_enabled?
-#   Returns 'true' if IP forwarding is enabled in the kernel
-#
-def ip_forwarding_enabled?
-  %x(/sbin/sysctl -n net.ipv4.ip_forward) == "1\n"
-end
-
-# debootstrap_ok?
-#   Returns 'true' if debootstrap is installed and working.
-#
-def debootstrap_ok?
-  return system '/usr/sbin/debootstrap --version > /dev/null 2>&1'
-end
-
-# docker_ok?
-#   Returns 'true' if docker can be run as the current user.
-#
-def docker_ok?(docker_path)
-  return system "#{docker_path} images > /dev/null 2>&1"
-end
-
-# install_docker
-#   Determines which Docker package is suitable for this Linux distro
-#   and installs it, resolving any dependencies.
-#   NOTE: not in use yet.
-
-def install_docker
-  linux_distro = %x(lsb_release --id).split.last
-  linux_release = %x(lsb_release --release).split.last
-  linux_version = linux_distro + " " + linux_release
-  kernel_release = `uname -r`
-
-  case linux_distro
-  when 'Ubuntu'
-    if not linux_release.match '^1[234]\.'
-      warn "Arvados requires at least Ubuntu 12.04 (Precise Pangolin)."
-      warn "Your system is Ubuntu #{linux_release}."
-      exit 3
-    end
-    if linux_release.match '^12' and kernel_release.start_with? '3.2'
-      # Ubuntu Precise ships with a 3.2 kernel and must be upgraded.
-      warn "Your kernel #{kernel_release} must be upgraded to run Docker."
-      warn "To do this:"
-      warn "  sudo apt-get update"
-      warn "  sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring"
-      warn "  sudo reboot"
-      exit 4
-    else
-      # install AUFS
-      sudo 'apt-get', 'update'
-      sudo 'apt-get', 'install', "linux-image-extra-#{kernel_release}"
-    end
-
-    # add Docker repository
-    sudo %w(/usr/bin/apt-key adv
-              --keyserver keyserver.ubuntu.com
-              --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9)
-    source_file = Tempfile.new('arv')
-    source_file.write("deb http://get.docker.io/ubuntu docker main\n")
-    source_file.close
-    sudo '/bin/mv', source_file.path, '/etc/apt/sources.list.d/docker.list'
-    sudo %w(/usr/bin/apt-get update)
-    sudo %w(/usr/bin/apt-get install lxc-docker)
-
-    # Set up for non-root access
-    sudo %w(/usr/sbin/groupadd docker)
-    sudo '/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'
-    sudo %w(/usr/sbin/service docker restart)
-  when 'Debian'
-  else
-    warn "Must be running a Debian or Ubuntu release in order to run Docker."
-    exit 5
-  end
-end
-
-
-if __FILE__ == $PROGRAM_NAME
-  options = { :makefile => File.join(File.dirname(__FILE__), 'Makefile') }
-  OptionParser.new do |opts|
-    opts.on('-m', '--makefile MAKEFILE-PATH',
-            'Path to the Makefile used to build Arvados Docker images') do |mk|
-      options[:makefile] = mk
-    end
-  end
-  main options
-end
diff --git a/docker/build_tools/config.rb b/docker/build_tools/config.rb
deleted file mode 100755 (executable)
index 296bc20..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-#! /usr/bin/env ruby
-
-require 'yaml'
-require 'fileutils'
-require 'digest'
-
-abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
-
-# Initialize config settings from config.yml
-config = YAML.load_file('config.yml')
-
-# ============================================================
-# Add dynamically chosen config settings. These settings should
-# be suitable for any installation.
-
-# Any _PW/_SECRET config settings represent passwords/secrets. If they
-# are blank, choose a password. Make sure the generated password
-# doesn't change if config.yml doesn't change. Otherwise, keys won't
-# match any more if (say) keep's files get regenerated but apiserver's
-# don't.
-config.sort.map do |var,val|
-  if (var.end_with?('_PW') || var.end_with?('_SECRET')) && (config[var].nil? || config[var].empty?)
-    config[var] = Digest::SHA1.hexdigest(`hostname` + var + config.to_yaml)
-  end
-end
-
-# ============================================================
-# For each *.in file in the docker directories, substitute any
-# @@variables@@ found in the file with the appropriate config
-# variable. Support up to 10 levels of nesting.
-#
-# TODO(twp): add the *.in files directory to the source tree, and
-# when expanding them, add them to the "generated" directory with
-# the same tree structure as in the original source. Then all
-# the files can be added to the docker container with a single ADD.
-
-if ARGV[0] and ARGV[0].length > 0
-  globdir = ARGV[0]
-else
-  globdir = '*'
-end
-
-FileUtils.rm_r Dir.glob(globdir + '/generated/*')
-
-File.umask(022)
-Dir.glob(globdir + '/*.in') do |template_file|
-  generated_dir = File.join(File.dirname(template_file), 'generated')
-  Dir.mkdir(generated_dir) unless Dir.exists? generated_dir
-  output_path = File.join(generated_dir, File.basename(template_file, '.in'))
-  output_mode = (File.stat(template_file).mode & 0100) ? 0755 : 0644
-  File.open(output_path, "w", output_mode) do |output|
-    File.open(template_file) do |input|
-      input.each_line do |line|
-
-        # This count is used to short-circuit potential
-        # infinite loops of variable substitution.
-        @count = 0
-        while @count < 10
-          @out = line.gsub!(/@@(.*?)@@/) do |var|
-            if config.key?(Regexp.last_match[1])
-              config[Regexp.last_match[1]]
-            else
-              var.gsub!(/@@/, '@_NOT_FOUND_@')
-            end
-          end
-          break if @out.nil?
-          @count += 1
-        end
-
-        output.write(line)
-      end
-    end
-  end
-end
diff --git a/docker/compute/Dockerfile b/docker/compute/Dockerfile
deleted file mode 100644 (file)
index 60b5fa4..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Arvados compute node Docker container.
-
-FROM arvados/slurm
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-RUN apt-get update -q
-## 20150915 nico -- fuse.postint has sporatic failures, spliting this up to see if it helps
-RUN apt-get install -qy fuse
-RUN apt-get install -qy supervisor python-pip python-gflags python-google-api-python-client python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse crunchstat python-arvados-fuse cron dnsmasq
-
-ADD fuse.conf /etc/fuse.conf
-RUN chmod 644 /etc/fuse.conf
-
-RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
-
-# Install Docker from the Arvados package repository (cf. arvados/base)
-RUN apt-get install -qy iptables ca-certificates lxc apt-transport-https docker.io
-
-RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid 4005 crunch && usermod crunch -G fuse,docker && chown crunch:crunch /home/crunch
-
-# Supervisor.
-ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
-ADD generated/setup.sh /usr/local/bin/setup.sh
-ADD wrapdocker /usr/local/bin/wrapdocker.sh
-ADD munge.sh /usr/local/bin/munge.sh
-
-VOLUME /var/lib/docker
-# Start the supervisor.
-CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/compute/fuse.conf b/docker/compute/fuse.conf
deleted file mode 100644 (file)
index 4ed21ba..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# Set the maximum number of FUSE mounts allowed to non-root users.
-# The default is 1000.
-#
-#mount_max = 1000
-
-# Allow non-root users to specify the 'allow_other' or 'allow_root'
-# mount options.
-#
-user_allow_other
-
diff --git a/docker/compute/munge.sh b/docker/compute/munge.sh
deleted file mode 100755 (executable)
index ef10d01..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-rm -rf /var/run/munge
-exec /etc/init.d/munge start
diff --git a/docker/compute/setup.sh.in b/docker/compute/setup.sh.in
deleted file mode 100755 (executable)
index efb2c41..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-. /etc/profile.d/rvm.sh
-
-export ARVADOS_API_HOST=api
-export ARVADOS_API_HOST_INSECURE=yes
-export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
-export HOME=/root
-
-# Wait for API server to come up.
-while ! arv user current ; do sleep 1 ; done
-
-if ! test -f /root/node.json ; then
-    arv node create --node "{\"hostname\": \"$(hostname)\"}" > /root/node.json
-
-    # Make sure /dev/fuse permissions are correct (the device appears after fuse is loaded)
-    chmod 1660 /dev/fuse && chgrp fuse /dev/fuse
-fi
-
-UUID=`grep \"uuid\" /root/node.json  |cut -f4 -d\"`
-PING_SECRET=`grep \"ping_secret\" /root/node.json  |cut -f4 -d\"`
-
-if ! test -f /etc/cron.d/node_ping ; then
-    echo "*/5 * * * * root /usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping" > /etc/cron.d/node_ping
-fi
-
-/usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping?ping_secret=$PING_SECRET
diff --git a/docker/compute/supervisor.conf b/docker/compute/supervisor.conf
deleted file mode 100644 (file)
index b3c715b..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-[program:munge]
-user=root
-command=/usr/local/bin/munge.sh
-startsecs=0
-
-[program:slurm]
-user=root
-command=/etc/init.d/slurm-llnl start
-startsecs=0
-
-[program:cron]
-user=root
-command=/etc/init.d/cron start
-startsecs=0
-
-[program:setup]
-user=root
-command=/usr/local/bin/setup.sh
-startsecs=0
-
-[program:docker]
-user=root
-command=/usr/local/bin/wrapdocker.sh
-
-[program:dnsmasq]
-user=root
-command=/etc/init.d/dnsmasq start
-startsecs=0
diff --git a/docker/compute/wrapdocker b/docker/compute/wrapdocker
deleted file mode 100755 (executable)
index cee1302..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-
-# Borrowed from https://github.com/jpetazzo/dind under Apache2
-# and slightly modified.
-
-# First, make sure that cgroups are mounted correctly.
-CGROUP=/sys/fs/cgroup
-: {LOG:=stdio}
-
-[ -d $CGROUP ] ||
-       mkdir $CGROUP
-
-mountpoint -q $CGROUP ||
-       mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
-               echo "Could not make a tmpfs mount. Did you use -privileged?"
-               exit 1
-       }
-
-if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
-then
-    mount -t securityfs none /sys/kernel/security || {
-        echo "Could not mount /sys/kernel/security."
-        echo "AppArmor detection and -privileged mode might break."
-    }
-fi
-
-# Mount the cgroup hierarchies exactly as they are in the parent system.
-for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
-do
-        [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
-        mountpoint -q $CGROUP/$SUBSYS ||
-                mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
-
-        # The two following sections address a bug which manifests itself
-        # by a cryptic "lxc-start: no ns_cgroup option specified" when
-        # trying to start containers withina container.
-        # The bug seems to appear when the cgroup hierarchies are not
-        # mounted on the exact same directories in the host, and in the
-        # container.
-
-        # Named, control-less cgroups are mounted with "-o name=foo"
-        # (and appear as such under /proc/<pid>/cgroup) but are usually
-        # mounted on a directory named "foo" (without the "name=" prefix).
-        # Systemd and OpenRC (and possibly others) both create such a
-        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
-        # "name=foo". This shouldn't have any adverse effect.
-        echo $SUBSYS | grep -q ^name= && {
-                NAME=$(echo $SUBSYS | sed s/^name=//)
-                ln -s $SUBSYS $CGROUP/$NAME
-        }
-
-        # Likewise, on at least one system, it has been reported that
-        # systemd would mount the CPU and CPU accounting controllers
-        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
-        # but on a directory called "cpu,cpuacct" (note the inversion
-        # in the order of the groups). This tries to work around it.
-        [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
-done
-
-# Note: as I write those lines, the LXC userland tools cannot setup
-# a "sub-container" properly if the "devices" cgroup is not in its
-# own hierarchy. Let's detect this and issue a warning.
-grep -q :devices: /proc/1/cgroup ||
-       echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
-grep -qw devices /proc/1/cgroup ||
-       echo "WARNING: it looks like the 'devices' cgroup is not mounted."
-
-# Now, close extraneous file descriptors.
-pushd /proc/self/fd >/dev/null
-for FD in *
-do
-       case "$FD" in
-       # Keep stdin/stdout/stderr
-       [012])
-               ;;
-       # Nuke everything else
-       *)
-               eval exec "$FD>&-"
-               ;;
-       esac
-done
-popd >/dev/null
-
-
-# If a pidfile is still around (for example after a container restart),
-# delete it so that docker can start.
-rm -rf /var/run/docker.pid
-
-exec docker -d
-
diff --git a/docker/config.yml.example b/docker/config.yml.example
deleted file mode 100644 (file)
index f40c0fe..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-# Configuration for the Rails databases (database names,
-# usernames and passwords).
-
-# Username for your Arvados user. This will be used as your shell login name
-# as well as the name for your git repository.
-ARVADOS_USER_NAME:
-
-# ARVADOS_DOMAIN: the Internet domain of this installation.
-# ARVADOS_DNS_SERVER: the authoritative nameserver for ARVADOS_DOMAIN.
-ARVADOS_DOMAIN: dev.arvados
-ARVADOS_DNS_SERVER:     # e.g. 192.168.0.1
-
-# ==============================
-# API server settings
-# ==============================
-
-# The API server hostname. Must be a 5-character
-# string unique within this installation. This string
-# will also be used as config.uuid_prefix.
-API_HOSTNAME:           # e.g. qr1hi
-
-# The e-mail address of the user you would like to become marked as an admin
-# user on their first login.
-# In the default configuration, authentication happens through the Arvados SSO
-# server, which uses openid against Google's servers, so in that case this
-# should be an address associated with a Google account.
-API_AUTO_ADMIN_USER:
-
-# The location of the Workbench application where users should be
-# redirected if they point their browsers at the API server, e.g.,
-# https://localhost:9899
-API_WORKBENCH_ADDRESS:
-
-# If a _PW variable is set to an empty string, a password
-# will be chosen randomly at build time. This is the
-# recommended setting.
-ARVADOS_DEV_DB: arvados_development
-ARVADOS_DEV_USER: arvados_dev
-ARVADOS_DEV_PW:
-ARVADOS_TEST_DB: arvados_test
-ARVADOS_TEST_USER: arvados_test
-ARVADOS_TEST_PW:
-ARVADOS_PROD_DB: arvados_production
-ARVADOS_PROD_USER: arvados_prod
-ARVADOS_PROD_PW:
-
-# If a _SECRET variable is set to an empty string, a password
-# will be chosen randomly at build time. This is the
-# recommended setting.
-
-# The signing key shared by Keep at the API server to verify
-# blob permission signatures.
-KEEP_SIGNING_SECRET:
-
-# The value for the Rails config.secret_token setting.
-API_SECRET:
-
-# A "superuser" token with which servers can authenticate to
-# the API server, before an administrative user has been created.
-# Leave this blank to generate a secret randomly at build time (recommended).
-API_SUPERUSER_SECRET:
-
-# More than anything this should be auto-generated, but
-# we don't presently have a good place to store it. So just
-# change it and don't be dumb.
-POSTGRES_ROOT_PW: dummy_pw
-
-# The URL of the SSO server that you want your API server to use. If
-# blank, use the sso docker container.
-OMNIAUTH_URL:
-
-# ==============================
-# Workbench settings
-# ==============================
-WORKBENCH_RAILS_MODE: production
-WORKBENCH_DATA_IMPORT_DIR: /data/arvados-workbench-upload/data
-WORKBENCH_DATA_EXPORT_DIR: /data/arvados-workbench-download/data
-WORKBENCH_VCF_PIPELINE_UUID:
-WORKBENCH_SITE_NAME: Arvados Workbench
-WORKBENCH_INSECURE_HTTPS: true
-WORKBENCH_ACTIVATION_CONTACT_LINK: mailto:arvados@curoverse.com
-WORKBENCH_SECRET:
-
-# ==============================
-# SSO settings
-# ==============================
-SSO_HOSTNAME: sso
-SSO_SECRET:
-SSO_CLIENT_NAME: devsandbox
-SSO_CLIENT_APP_ID: local_docker_installation
-SSO_CLIENT_SECRET:
diff --git a/docker/doc/Dockerfile b/docker/doc/Dockerfile
deleted file mode 100644 (file)
index 1492675..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Arvados Documentation Docker container.
-
-FROM arvados/base
-maintainer Ward Vandewege <ward@curoverse.com>
-
-# Install packages
-RUN /bin/mkdir -p /usr/src/arvados && \
-    apt-get update -q && \
-    apt-get install -qy curl procps apache2-mpm-worker
-
-ADD generated/doc.tar.gz /usr/src/arvados/
-
-# Build static site
-RUN /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile && \
-    /bin/sed -ri 's/^baseurl: .*$/baseurl: /' /usr/src/arvados/doc/_config.yml && \
-    cd /usr/src/arvados/doc && \
-    LANG="en_US.UTF-8" LC_ALL="en_US.UTF-8" /usr/local/rvm/bin/rvm-exec default bundle exec rake generate arvados_api_host=api.dev.arvados arvados_workbench_host=workbench.dev.arvados
-
-
-# Configure Apache
-ADD generated/apache2_vhost /etc/apache2/sites-available/doc
-RUN \
-  a2dissite default && \
-  a2ensite doc
-
-ADD apache2_foreground.sh /etc/apache2/foreground.sh
-
-# Start Apache
-CMD ["/etc/apache2/foreground.sh"]
diff --git a/docker/doc/apache2_foreground.sh b/docker/doc/apache2_foreground.sh
deleted file mode 100755 (executable)
index fc6028e..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/bash
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
-
-source /etc/apache2/envvars
-/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/doc/apache2_vhost.in b/docker/doc/apache2_vhost.in
deleted file mode 100644 (file)
index 76da6d0..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-
-ServerName doc.@@ARVADOS_DOMAIN@@
-
-<VirtualHost *:80>
-  ServerAdmin sysadmin@curoverse.com
-
-  ServerName doc.@@ARVADOS_DOMAIN@@
-
-  DocumentRoot /usr/src/arvados/doc/.site/
-
-</VirtualHost>
diff --git a/docker/install_sdk.sh b/docker/install_sdk.sh
deleted file mode 100755 (executable)
index 1c07c9d..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#! /bin/sh
-
-# Install prerequisites.
-sudo apt-get install curl libcurl3 libcurl3-gnutls libcurl4-openssl-dev python-pip
-
-# Install RVM.
-curl -sSL https://get.rvm.io | bash -s stable
-source ~/.rvm/scripts/rvm
-rvm install 2.1.0
-
-# Install arvados-cli.
-gem install arvados-cli
-sudo pip install --upgrade httplib2
diff --git a/docker/java-bwa-samtools/Dockerfile b/docker/java-bwa-samtools/Dockerfile
deleted file mode 100644 (file)
index 2a73977..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM arvados/jobs
-MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
-
-USER root
-
-RUN apt-get update -q
-RUN apt-get install -qy openjdk-7-jre-headless && \
-    cd /tmp && \
-    curl --location http://cache.arvados.org/sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
-    tar xjf bwa-0.7.9a.tar.bz2 && \
-    cd bwa-0.7.9a && \
-    make && \
-    (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
-    rm -r /tmp/bwa-0.7.9a* && \
-    cd /tmp && \
-    curl --location http://cache.arvados.org/sourceforge.net/project/samtools/samtools/0.1.19/samtools-0.1.19.tar.bz2 -o samtools-0.1.19.tar.bz2 && \
-    tar xjf samtools-0.1.19.tar.bz2 && \
-    cd samtools-0.1.19 && \
-    make && \
-    (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
-    rm -r /tmp/samtools-0.1.19*
-
-USER crunch
index d80c3a882defe43676476df144401eee64d97728..e1e7e87c5e53d0c297ec6d2e3ad0870890f402ff 100644 (file)
@@ -11,7 +11,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3
 ARG COMMIT=latest
 RUN echo $COMMIT && apt-get update -q
 
-RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
+RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
 
 # Install dependencies and set up system.
 RUN /usr/sbin/adduser --disabled-password \
diff --git a/docker/keep/Dockerfile b/docker/keep/Dockerfile
deleted file mode 100644 (file)
index 08e5175..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-# Based on Debian Wheezy
-FROM arvados/debian:wheezy
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-ADD generated/bin/keepstore /usr/local/bin/
-ADD generated/bin/keepproxy /usr/local/bin/
-ADD generated/run-keep /usr/local/bin/
-
-ADD generated/keep_signing_secret /etc/
-
-RUN mkdir /keep-data
-
-# Start keep
-CMD ["/usr/local/bin/run-keep"]
diff --git a/docker/keep/keep_signing_secret.in b/docker/keep/keep_signing_secret.in
deleted file mode 100644 (file)
index e5b39c8..0000000
+++ /dev/null
@@ -1 +0,0 @@
-@@KEEP_SIGNING_SECRET@@
\ No newline at end of file
diff --git a/docker/keep/run-keep.in b/docker/keep/run-keep.in
deleted file mode 100755 (executable)
index 385f0e6..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-
-pkf="/etc/keep_signing_secret"
-if [ -s "$pkf" ]
-then
-    permission_args="-permission-key-file=$pkf -enforce-permissions"
-else
-    permission_args=""
-fi
-
-exec keepstore $permission_args -listen=":25107" -volume="/keep-data"
diff --git a/docker/keepproxy/Dockerfile b/docker/keepproxy/Dockerfile
deleted file mode 100644 (file)
index e8df168..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# Based on Debian Wheezy
-FROM arvados/debian:wheezy
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-RUN apt-get update -q
-RUN apt-get install -qy ca-certificates
-
-ADD generated/bin/keepproxy /usr/local/bin/
-ADD generated/run-keepproxy /usr/local/bin/
-
-# Start keep
-CMD ["/usr/local/bin/run-keepproxy"]
diff --git a/docker/keepproxy/run-keepproxy.in b/docker/keepproxy/run-keepproxy.in
deleted file mode 100755 (executable)
index 4bd934d..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-export ARVADOS_API_HOST=api
-export ARVADOS_API_HOST_INSECURE=yes
-# This should be an anonymous token, but we don't have a good way
-# to get one while building the images
-export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -TERM -$pgrp; exit" HUP EXIT TERM QUIT
-
-while /bin/true ; do
-    keepproxy -listen=':9100'
-    sleep 1
-done
diff --git a/docker/mkimage-debootstrap.sh b/docker/mkimage-debootstrap.sh
deleted file mode 100755 (executable)
index 2ad79ef..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/bin/bash
-set -e
-
-variant='minbase'
-include='iproute,iputils-ping'
-arch='amd64' # intentionally undocumented for now
-skipDetection=
-strictDebootstrap=
-justTar=
-
-usage() {
-       echo >&2
-       
-       echo >&2 "usage: $0 [options] repo suite [mirror]"
-       
-       echo >&2
-       echo >&2 'options: (not recommended)'
-       echo >&2 "  -p set an http_proxy for debootstrap"
-       echo >&2 "  -v $variant # change default debootstrap variant"
-       echo >&2 "  -i $include # change default package includes"
-       echo >&2 "  -d # strict debootstrap (do not apply any docker-specific tweaks)"
-       echo >&2 "  -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
-       echo >&2 "     # note that this will also skip adding universe and/or security/updates to sources.list"
-       echo >&2 "  -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
-       
-       echo >&2
-       echo >&2 "   ie: $0 username/debian squeeze"
-       echo >&2 "       $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
-       
-       echo >&2
-       echo >&2 "   ie: $0 username/ubuntu precise"
-       echo >&2 "       $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
-       
-       echo >&2
-       echo >&2 "   ie: $0 -t precise.tar.bz2 precise"
-       echo >&2 "       $0 -t wheezy.tgz wheezy"
-       echo >&2 "       $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
-       
-       echo >&2
-}
-
-# these should match the names found at http://www.debian.org/releases/
-debianStable=wheezy
-debianUnstable=sid
-# this should match the name found at http://releases.ubuntu.com/
-ubuntuLatestLTS=precise
-
-while getopts v:i:a:p:dst name; do
-       case "$name" in
-               p)
-                       http_proxy="$OPTARG"
-                       ;;
-               v)
-                       variant="$OPTARG"
-                       ;;
-               i)
-                       include="$OPTARG"
-                       ;;
-               a)
-                       arch="$OPTARG"
-                       ;;
-               d)
-                       strictDebootstrap=1
-                       ;;
-               s)
-                       skipDetection=1
-                       ;;
-               t)
-                       justTar=1
-                       ;;
-               ?)
-                       usage
-                       exit 0
-                       ;;
-       esac
-done
-shift $(($OPTIND - 1))
-
-repo="$1"
-suite="$2"
-mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
-
-if [ ! "$repo" ] || [ ! "$suite" ]; then
-       usage
-       exit 1
-fi
-
-# some rudimentary detection for whether we need to "sudo" our docker calls
-set +e
-docker=`which docker.io`
-if [[ "$docker" == "" ]]; then
-       docker=`which docker`
-fi
-set -e
-
-if $docker version > /dev/null 2>&1; then
-       docker="$docker"
-elif sudo $docker version > /dev/null 2>&1; then
-       docker="sudo $docker"
-elif command -v $docker > /dev/null 2>&1; then
-       docker="$docker"
-else
-       echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
-       echo >&2 "         this script is not likely to work as expected"
-       sleep 3
-       docker='docker' # give us a command-not-found later
-fi
-
-# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory
-if [ "$justTar" ]; then
-       if [ ! -d "$(dirname "$repo")" ]; then
-               echo >&2 "error: $(dirname "$repo") does not exist"
-               exit 1
-       fi
-       repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")"
-fi
-
-# will be filled in later, if [ -z "$skipDetection" ]
-lsbDist=''
-
-target="${TMPDIR:-/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
-
-cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
-returnTo="$(pwd -P)"
-
-set -x
-
-# bootstrap
-mkdir -p "$target"
-sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror"
-
-cd "$target"
-
-if [ -z "$strictDebootstrap" ]; then
-       # prevent init scripts from running during install/update
-       #  policy-rc.d (for most scripts)
-       echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
-       sudo chmod +x usr/sbin/policy-rc.d
-       #  initctl (for some pesky upstart scripts)
-       sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
-       sudo ln -sf /bin/true sbin/initctl
-       # see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
-       
-       # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
-       sudo chroot . apt-get clean
-       
-       # while we're at it, apt is unnecessarily slow inside containers
-       #  this forces dpkg not to call sync() after package extraction and speeds up install
-       #    the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
-       echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
-       #  we want to effectively run "apt-get clean" after every install to keep images small
-       echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
-       
-       # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
-       #  rm /usr/sbin/policy-rc.d
-       #  rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
-       #  rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
-       #  rm /etc/apt/apt.conf.d/no-cache
-       
-       if [ -z "$skipDetection" ]; then
-               # see also rudimentary platform detection in hack/install.sh
-               lsbDist=''
-               if [ -r etc/lsb-release ]; then
-                       lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")"
-               fi
-               if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
-                       lsbDist='Debian'
-               fi
-               
-               case "$lsbDist" in
-                       Debian)
-                               # add the updates and security repositories
-                               if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
-                                       # ${suite}-updates only applies to non-unstable
-                                       sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
-                                       
-                                       # same for security updates
-                                       echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
-                               fi
-                               ;;
-                       Ubuntu)
-                               # add the universe, updates, and security repositories
-                               sudo sed -i "
-                                       s/ $suite main$/ $suite main universe/; p;
-                                       s/ $suite main/ ${suite}-updates main/; p;
-                                       s/ $suite-updates main/ ${suite}-security main/
-                               " etc/apt/sources.list
-                               ;;
-               esac
-       fi
-fi
-
-if [ "$justTar" ]; then
-       # create the tarball file so it has the right permissions (ie, not root)
-       touch "$repo"
-       
-       # fill the tarball
-       sudo tar --numeric-owner -caf "$repo" .
-else
-       # create the image (and tag $repo:$suite)
-       sudo tar --numeric-owner -c . | $docker import - $repo:$suite
-       
-       # test the image
-       [[ "$(/usr/bin/tty || true)" != "not a tty" ]] && RUN_OPTS="-i -t"
-       $docker run $RUN_OPS $repo:$suite echo success
-       
-       if [ -z "$skipDetection" ]; then
-               case "$lsbDist" in
-                       Debian)
-                               if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
-                                       # tag latest
-                                       $docker tag -f $repo:$suite $repo:latest
-                                       
-                                       if [ -r etc/debian_version ]; then
-                                               # tag the specific debian release version (which is only reasonable to tag on debian stable)
-                                               ver=$(cat etc/debian_version)
-                                               $docker tag -f $repo:$suite $repo:$ver
-                                       fi
-                               fi
-                               ;;
-                       Ubuntu)
-                               if [ "$suite" = "$ubuntuLatestLTS" ]; then
-                                       # tag latest
-                                       $docker tag -f $repo:$suite $repo:latest
-                               fi
-                               if [ -r etc/lsb-release ]; then
-                                       lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
-                                       if [ "$lsbRelease" ]; then
-                                               # tag specific Ubuntu version number, if available (12.04, etc.)
-                                               $docker tag -f $repo:$suite $repo:$lsbRelease
-                                       fi
-                               fi
-                               ;;
-               esac
-       fi
-fi
-
-# cleanup
-cd "$returnTo"
-sudo rm -rf "$target"
diff --git a/docker/passenger/Dockerfile b/docker/passenger/Dockerfile
deleted file mode 100644 (file)
index 77aeb66..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Arvados passenger image
-
-FROM arvados/base
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-# Install packages and build the passenger apache module
-
-RUN apt-get update -q
-RUN apt-get install -qy \
-        apt-utils git curl procps apache2-mpm-worker \
-        libcurl4-openssl-dev apache2-threaded-dev \
-        libapr1-dev libaprutil1-dev
-
-RUN cd /usr/src/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --auto --languages ruby
-
-RUN cd /usr/src/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --snippet > /etc/apache2/conf.d/passenger
-
diff --git a/docker/postgresql/Dockerfile b/docker/postgresql/Dockerfile
deleted file mode 100644 (file)
index a99a886..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# PostgreSQL Docker container for Arvados.
-
-FROM arvados/debian:wheezy
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-# TODO(twp): parameterize variables via autoconf or similar.
-ENV POSTGRES_ROOT_PW   dummy_pw
-
-ENV ARVADOS_DEV_DB     arvados_development
-ENV ARVADOS_DEV_USER   arvados
-ENV ARVADOS_DEV_PW     dummy_pw
-
-ENV ARVADOS_TEST_DB    arvados_test
-ENV ARVADOS_TEST_USER  arvados
-ENV ARVADOS_TEST_PW    dummy_pw
-
-ENV ARVADOS_PROD_DB    arvados_production
-ENV ARVADOS_PROD_USER  arvados
-ENV ARVADOS_PROD_PW    dummy_pw
-
-# Install postgres and apache
-RUN apt-get install -qy procps postgresql postgresql-server-dev-9.1
-
-# Configure databases and users.
-ADD postgresql.conf /etc/postgresql/9.1/main/
-ADD pg_hba.conf     /etc/postgresql/9.1/main/
-
-ADD postgresql_config.sh /tmp/postgresql_config.sh
-RUN /tmp/postgresql_config.sh
-RUN rm /tmp/postgresql_config.sh
-
-# Accept database connections on port 5432 from outside the container.
-EXPOSE 5432
-
-CMD ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.1/bin/postgres -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf"]
diff --git a/docker/postgresql/pg_hba.conf b/docker/postgresql/pg_hba.conf
deleted file mode 100644 (file)
index c5486ad..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# For full documentation see
-# http://www.postgresql.org/docs/9.1/static/auth-pg-hba-conf.html
-
-# Database administrative login by Unix domain socket
-local   all             postgres                                peer
-
-# TYPE  DATABASE        USER            ADDRESS                 METHOD
-host    all             all             0.0.0.0/0               md5
-
-# "local" is for Unix domain socket connections only
-local   all             all                                     peer
-# IPv4 local connections:
-host    all             all             127.0.0.1/32            md5
-# IPv6 local connections:
-host    all             all             ::1/128                 md5
diff --git a/docker/postgresql/postgresql.conf b/docker/postgresql/postgresql.conf
deleted file mode 100644 (file)
index 3da7c1b..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# For full documentation on run-time settings see
-# http://www.postgresql.org/docs/9.2/static/runtime-config.html
-
-listen_addresses = '*'
-data_directory = '/var/lib/postgresql/9.1/main'                # use data in another directory
-hba_file = '/etc/postgresql/9.1/main/pg_hba.conf'      # host-based authentication file
-ident_file = '/etc/postgresql/9.1/main/pg_ident.conf'  # ident configuration file
-external_pid_file = '/var/run/postgresql/9.1-main.pid'         # write an extra PID file
-port = 5432                            # (change requires restart)
-max_connections = 100                  # (change requires restart)
-unix_socket_directory = '/var/run/postgresql'          # (change requires restart)
-ssl = true                             # (change requires restart)
-shared_buffers = 24MB                  # min 128kB
-log_line_prefix = '%t '                        # special values:
-datestyle = 'iso, mdy'
-lc_messages = 'C'                      # locale for system error message
-lc_monetary = 'C'                      # locale for monetary formatting
-lc_numeric = 'C'                       # locale for number formatting
-lc_time = 'C'                          # locale for time formatting
-default_text_search_config = 'pg_catalog.english'
diff --git a/docker/shell/Dockerfile b/docker/shell/Dockerfile
deleted file mode 100644 (file)
index 3e6e3e4..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Slurm node Docker container.
-
-FROM arvados/base
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-RUN apt-get update -q
-RUN apt-get install -qy \
-    python-pip python-gflags python-google-api-python-client \
-    python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse \
-    crunchstat python-arvados-fuse cron vim supervisor openssh-server
-
-ADD fuse.conf /etc/fuse.conf
-RUN chmod 644 /etc/fuse.conf
-
-ADD generated/superuser_token /tmp/superuser_token
-
-RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
-
-# Supervisor.
-ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
-ADD generated/setup.sh /usr/local/bin/setup.sh
-
-# Start the supervisor.
-CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/shell/fuse.conf b/docker/shell/fuse.conf
deleted file mode 100644 (file)
index 4ed21ba..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# Set the maximum number of FUSE mounts allowed to non-root users.
-# The default is 1000.
-#
-#mount_max = 1000
-
-# Allow non-root users to specify the 'allow_other' or 'allow_root'
-# mount options.
-#
-user_allow_other
-
diff --git a/docker/shell/setup.sh.in b/docker/shell/setup.sh.in
deleted file mode 100755 (executable)
index 03beb4b..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-USER_NAME="@@ARVADOS_USER_NAME@@"
-
-useradd $USER_NAME -s /bin/bash
-mkdir /home/$USER_NAME/.ssh -p
-
-# Install our token
-mkdir -p /home/$USER_NAME/.config/arvados;
-echo "ARVADOS_API_HOST=api" >> /home/$USER_NAME/.config/arvados/settings.conf
-echo "ARVADOS_API_HOST_INSECURE=yes" >> /home/$USER_NAME/.config/arvados/settings.conf
-echo "ARVADOS_API_TOKEN=$(cat /tmp/superuser_token)" >> /home/$USER_NAME/.config/arvados/settings.conf
-chmod 600 /home/$USER_NAME/.config/arvados/settings.conf
-
-chown $USER_NAME:$USER_NAME /home/$USER_NAME -R
-
-rm -f /tmp/superuser_token
-
-
diff --git a/docker/shell/superuser_token.in b/docker/shell/superuser_token.in
deleted file mode 100644 (file)
index 49bb34e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-@@API_SUPERUSER_SECRET@@
diff --git a/docker/shell/supervisor.conf b/docker/shell/supervisor.conf
deleted file mode 100644 (file)
index 97ad540..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-[program:ssh]
-user=root
-command=/etc/init.d/ssh start
-startsecs=0
-
-[program:cron]
-user=root
-command=/etc/init.d/cron start
-startsecs=0
-
-[program:setup]
-user=root
-command=/usr/local/bin/setup.sh
-startsecs=0
-
diff --git a/docker/slurm/Dockerfile b/docker/slurm/Dockerfile
deleted file mode 100644 (file)
index 28a3c3b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# Slurm node Docker container.
-
-FROM arvados/base
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-RUN apt-get update -q
-RUN apt-get install -qy slurm-llnl munge
-
-ADD munge.key /etc/munge/
-RUN chown munge:munge /etc/munge/munge.key && chmod 600 /etc/munge/munge.key
-ADD generated/slurm.conf /etc/slurm-llnl/
-
diff --git a/docker/slurm/munge.key b/docker/slurm/munge.key
deleted file mode 100644 (file)
index 34036a0..0000000
Binary files a/docker/slurm/munge.key and /dev/null differ
diff --git a/docker/slurm/slurm.conf.in b/docker/slurm/slurm.conf.in
deleted file mode 100644 (file)
index 7312a0e..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-
-ControlMachine=api
-#SlurmUser=slurmd
-SlurmctldPort=6817
-SlurmdPort=6818
-AuthType=auth/munge
-#JobCredentialPrivateKey=/etc/slurm-llnl/slurm-key.pem
-#JobCredentialPublicCertificate=/etc/slurm-llnl/slurm-cert.pem
-StateSaveLocation=/tmp
-SlurmdSpoolDir=/tmp/slurmd
-SwitchType=switch/none
-MpiDefault=none
-SlurmctldPidFile=/var/run/slurmctld.pid
-SlurmdPidFile=/var/run/slurmd.pid
-ProctrackType=proctrack/pgid
-CacheGroups=0
-ReturnToService=2
-TaskPlugin=task/affinity
-#
-# TIMERS
-SlurmctldTimeout=300
-SlurmdTimeout=300
-InactiveLimit=0
-MinJobAge=300
-KillWait=30
-Waittime=0
-#
-# SCHEDULING
-SchedulerType=sched/backfill
-#SchedulerType=sched/builtin
-SchedulerPort=7321
-#SchedulerRootFilter=
-#SelectType=select/linear
-SelectType=select/cons_res
-SelectTypeParameters=CR_CPU_Memory
-FastSchedule=1
-#
-# LOGGING
-SlurmctldDebug=3
-#SlurmctldLogFile=
-SlurmdDebug=3
-#SlurmdLogFile=
-JobCompType=jobcomp/none
-#JobCompLoc=
-JobAcctGatherType=jobacct_gather/none
-#JobAcctLogfile=
-#JobAcctFrequency=
-#
-# COMPUTE NODES
-NodeName=DEFAULT
-# CPUs=8 State=UNKNOWN RealMemory=6967 Weight=6967
-PartitionName=DEFAULT MaxTime=INFINITE State=UP
-PartitionName=compute Default=YES Shared=yes
-#PartitionName=sysadmin Hidden=YES Shared=yes
-
-NodeName=compute[0-1]
-#NodeName=compute0 RealMemory=6967 Weight=6967
-
-PartitionName=compute Nodes=compute[0-1]
-PartitionName=crypto Nodes=compute[0-1]
diff --git a/docker/slurm/supervisor.conf b/docker/slurm/supervisor.conf
deleted file mode 100644 (file)
index 64f86b1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-[program:munge]
-user=root
-command=/etc/init.d/munge start
-
-[program:slurm]
-user=root
-command=/etc/init.d/slurm-llnl start
diff --git a/docker/sso/Dockerfile b/docker/sso/Dockerfile
deleted file mode 100644 (file)
index 7d99ac6..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Arvados API server Docker container.
-
-FROM arvados/passenger
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-RUN git clone git://github.com/curoverse/sso-devise-omniauth-provider.git /usr/src/sso-provider && \
-    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/sso-provider/Gemfile
-
-# Install generated config files
-ADD generated/seeds.rb /usr/src/sso-provider/db/seeds.rb
-ADD generated/database.yml /usr/src/sso-provider/config/database.yml
-ADD generated/application.yml /usr/src/sso-provider/config/application.yml
-ADD generated/apache2_vhost /etc/apache2/sites-available/sso-provider
-ADD generated/apache2_vhost /etc/apache2/sites-available/sso-provider
-
-# Configure Apache and Passenger.
-RUN a2dissite default && \
-    a2ensite sso-provider && \
-    a2enmod rewrite && \
-    a2enmod ssl && \
-    cd /usr/src/sso-provider && \
-    cp config/environments/production.rb.example config/environments/production.rb && \
-    RAILS_ENV=production /usr/local/rvm/bin/rvm-exec default bundle exec rake db:setup && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
-    chown www-data:www-data log config.ru -R && \
-    chown www-data:www-data db db/production.sqlite3 && \
-    /bin/mkdir /var/run/apache2
-
-ADD apache2_foreground.sh /etc/apache2/foreground.sh
-
-# Start the supervisor.
-CMD ["/etc/apache2/foreground.sh"]
diff --git a/docker/sso/apache2_foreground.sh b/docker/sso/apache2_foreground.sh
deleted file mode 100755 (executable)
index fc6028e..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/bash
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
-
-source /etc/apache2/envvars
-/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/sso/apache2_vhost.in b/docker/sso/apache2_vhost.in
deleted file mode 100644 (file)
index 465a1e6..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# VirtualHost definition for the Arvados API server
-
-<VirtualHost *:80>
-  ServerName @@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@
-  ServerAdmin sysadmin@curoverse.com
-
-  RedirectPermanent / https://@@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@/
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/error.log
-  CustomLog ${APACHE_LOG_DIR}/access.log combined
-
-</VirtualHost>
-
-<VirtualHost *:443>
-  ServerName @@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@
-  ServerAdmin sysadmin@curoverse.com
-
-  RailsEnv production
-  RackBaseURI /
-  RailsAppSpawnerIdleTime 1200
-
-  # Enable streaming
-  PassengerBufferResponse off
-
-  # Index file and Document Root (where the public files are located)
-  DirectoryIndex index.html
-  DocumentRoot /usr/src/sso-provider/public
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/ssl_error.log
-  CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined
-
-  <Directory /usr/src/sso-provider/public>
-    Options Indexes FollowSymLinks MultiViews IncludesNoExec
-    AllowOverride None
-    Order allow,deny
-    allow from all
-  </Directory>
-
-  <IfModule mod_ssl.c>
-    SSLEngine on
-    # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
-    # SSLCACertificateFile    /etc/ssl/certs/startcom.ca.pem
-    SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem
-    SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
-    SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
-  </IfModule>
-
-</VirtualHost>
diff --git a/docker/sso/application.yml.in b/docker/sso/application.yml.in
deleted file mode 100644 (file)
index 6063851..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Consult application.default.yml for the full list of configuration
-# settings.
-#
-# The order of precedence is:
-# 1. config/environments/{RAILS_ENV}.rb (deprecated)
-# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
-# 3. Section in application.yml called "common"
-# 4. Section in application.default.yml corresponding to RAILS_ENV
-# 5. Section in application.default.yml called "common"
-
-production:
-  allow_account_registration: true
-
-  secret_token: @@SSO_SECRET@@
-  uuid_prefix: 'zzzzz'
-
-  # If true, allow new creation of new accounts in the SSO server's internal
-  # user database.
-  allow_account_registration: true
-
-development:
-  # No development settings 
-
-test:
-  # No test settings 
-
-common:
-  # No common settings 
-
diff --git a/docker/sso/database.yml.in b/docker/sso/database.yml.in
deleted file mode 100644 (file)
index 025d62a..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# SQLite version 3.x
-#   gem install sqlite3-ruby (not necessary on OS X Leopard)
-development:
-  adapter: sqlite3
-  database: db/development.sqlite3
-  pool: 5
-  timeout: 5000
-
-# Warning: The database defined as "test" will be erased and
-# re-generated from your development database when you run "rake".
-# Do not set this db to the same as development or production.
-test:
-  adapter: sqlite3
-  database: db/test.sqlite3
-  pool: 5
-  timeout: 5000
-
-production:
-  adapter: sqlite3
-  database: db/production.sqlite3
-  pool: 5
-  timeout: 5000
diff --git a/docker/sso/seeds.rb.in b/docker/sso/seeds.rb.in
deleted file mode 100644 (file)
index b35b939..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-
-Client.delete_all
-
-c = Client.new()
-c.name = "@@SSO_CLIENT_NAME@@"
-c.app_id = "@@SSO_CLIENT_APP_ID@@"
-c.app_secret = "@@SSO_CLIENT_SECRET@@"
-c.save!
-
diff --git a/docker/workbench/.gitignore b/docker/workbench/.gitignore
deleted file mode 100644 (file)
index bf969c3..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-apache2_vhost
-production.rb
-secret_token.rb
diff --git a/docker/workbench/Dockerfile b/docker/workbench/Dockerfile
deleted file mode 100644 (file)
index 148153a..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Arvados Workbench Docker container.
-
-FROM arvados/passenger
-MAINTAINER Ward Vandewege <ward@curoverse.com>
-
-# We need graphviz for the provenance graphs
-RUN apt-get update -q
-RUN apt-get install -qy graphviz
-
-# Update Arvados source
-RUN /bin/mkdir -p /usr/src/arvados/apps
-ADD generated/workbench.tar.gz /usr/src/arvados/apps/
-ADD generated/workbench_rails_env /etc/
-RUN /bin/cp /usr/src/arvados/apps/workbench/config/environments/$(cat /etc/workbench_rails_env).rb.example /usr/src/arvados/apps/workbench/config/environments/$(cat /etc/workbench_rails_env).rb
-ADD generated/application.yml /usr/src/arvados/apps/workbench/config/application.yml
-
-RUN RAILS_ENV=$(cat /etc/workbench_rails_env) && \
-    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
-    touch /usr/src/arvados/apps/workbench/log/$RAILS_ENV.log && \
-    chmod 666 /usr/src/arvados/apps/workbench/log/$RAILS_ENV.log && \
-    touch /usr/src/arvados/apps/workbench/db/$RAILS_ENV.sqlite3 && \
-    cd /usr/src/arvados/apps/workbench && \
-    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
-    chown -R www-data:www-data /usr/src/arvados/apps/workbench
-
-# Configure Apache
-ADD generated/apache2_vhost /etc/apache2/sites-available/workbench
-RUN \
-  a2dissite default && \
-  a2ensite workbench && \
-  a2enmod rewrite && \
-  /bin/mkdir /var/run/apache2
-
-ADD apache2_foreground.sh /etc/apache2/foreground.sh
-
-# Start Apache
-CMD ["/etc/apache2/foreground.sh"]
diff --git a/docker/workbench/apache2_foreground.sh b/docker/workbench/apache2_foreground.sh
deleted file mode 100755 (executable)
index fc6028e..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/bash
-
-read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
-trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
-
-source /etc/apache2/envvars
-/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/workbench/apache2_vhost.in b/docker/workbench/apache2_vhost.in
deleted file mode 100644 (file)
index ba9e7f8..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-
-<VirtualHost *:80>
-
-  ServerName workbench.@@ARVADOS_DOMAIN@@
-  ServerAdmin sysadmin@curoverse.com
-
-  RailsEnv @@WORKBENCH_RAILS_MODE@@
-  RackBaseURI /
-  RailsAppSpawnerIdleTime 1200
-
-  # Index file and Document Root (where the public files are located)
-  DirectoryIndex index.html
-  DocumentRoot /usr/src/arvados/apps/workbench/public
-
-  LogLevel warn
-  ErrorLog  ${APACHE_LOG_DIR}/error.log
-  CustomLog ${APACHE_LOG_DIR}/access.log combined
-
-  <Directory /usr/src/arvados/apps/workbench>
-    Options Indexes FollowSymLinks MultiViews IncludesNoExec
-    AllowOverride None
-    Order allow,deny
-    allow from all
-  </Directory>
-
-  <IfModule mod_ssl.c>
-    SSLEngine off
-    # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
-    # SSLCACertificateFile    /etc/ssl/certs/startcom.ca.pem
-    SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem
-    SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
-    SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
-  </IfModule>
-
-</VirtualHost>
diff --git a/docker/workbench/application.yml.in b/docker/workbench/application.yml.in
deleted file mode 100644 (file)
index 5e16928..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copy this file to application.yml and edit to suit.
-#
-# Consult application.default.yml for the full list of configuration
-# settings.
-#
-# The order of precedence is:
-# 1. config/environments/{RAILS_ENV}.rb (deprecated)
-# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
-# 3. Section in application.yml called "common"
-# 4. Section in application.default.yml corresponding to RAILS_ENV
-# 5. Section in application.default.yml called "common"
-
-common:
-  # At minimum, you need a nice long randomly generated secret_token here.
-  secret_token: @@WORKBENCH_SECRET@@
-
-  # You probably also want to point to your API server.
-  arvados_login_base: 'https://api.@@ARVADOS_DOMAIN@@/login'
-  arvados_v1_base: 'https://api.@@ARVADOS_DOMAIN@@/arvados/v1'
-  arvados_insecure_https: @@WORKBENCH_INSECURE_HTTPS@@
-
-  data_import_dir: @@WORKBENCH_DATA_IMPORT_DIR@@
-  data_export_dir: @@WORKBENCH_DATA_EXPORT_DIR@@
-
-  site_name: @@WORKBENCH_SITE_NAME@@
-  activation_contact_link: @@WORKBENCH_ACTIVATION_CONTACT_LINK@@
-
-  arvados_docsite: http://doc.@@ARVADOS_DOMAIN@@
-  force_ssl: false
diff --git a/docker/workbench/production.rb.in b/docker/workbench/production.rb.in
deleted file mode 100644 (file)
index bc3bd33..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-ArvadosWorkbench::Application.configure do
-  # Settings specified here will take precedence over those in config/application.rb
-
-  # Code is not reloaded between requests
-  config.cache_classes = true
-
-  # Full error reports are disabled and caching is turned on
-  config.consider_all_requests_local       = false
-  config.action_controller.perform_caching = true
-
-  # Disable Rails's static asset server (Apache or nginx will already do this)
-  config.serve_static_assets = false
-
-  # Compress JavaScripts and CSS
-  config.assets.compress = true
-
-  # Don't fallback to assets pipeline if a precompiled asset is missed
-  config.assets.compile = false
-
-  # Generate digests for assets URLs
-  config.assets.digest = true
-
-  # Defaults to nil and saved in location specified by config.assets.prefix
-  # config.assets.manifest = YOUR_PATH
-
-  # Specifies the header that your server uses for sending files
-  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
-  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
-
-  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
-  # config.force_ssl = true
-
-  # See everything in the log (default is :info)
-  # config.log_level = :debug
-
-  # Prepend all log lines with the following tags
-  # config.log_tags = [ :subdomain, :uuid ]
-
-  # Use a different logger for distributed setups
-  # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
-
-  # Use a different cache store in production
-  # config.cache_store = :mem_cache_store
-
-  # Enable serving of images, stylesheets, and JavaScripts from an asset server
-  # config.action_controller.asset_host = "http://assets.example.com"
-
-  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
-  # config.assets.precompile += %w( search.js )
-
-  # Disable delivery errors, bad email addresses will be ignored
-  # config.action_mailer.raise_delivery_errors = false
-
-  # Enable threaded mode
-  # config.threadsafe!
-
-  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
-  # the I18n.default_locale when a translation can not be found)
-  config.i18n.fallbacks = true
-
-  # Send deprecation notices to registered listeners
-  config.active_support.deprecation = :notify
-
-  # Log the query plan for queries taking more than this (works
-  # with SQLite, MySQL, and PostgreSQL)
-  # config.active_record.auto_explain_threshold_in_seconds = 0.5
-
-  # Log timing data for API transactions
-  config.profiling_enabled = false
-
-  config.arvados_login_base = 'https://' + ENV['API_PORT_443_TCP_ADDR'].to_s + '/login'
-  config.arvados_v1_base = 'https://' + ENV['API_PORT_443_TCP_ADDR'].to_s + '/arvados/v1'
-  config.arvados_insecure_https = @@WORKBENCH_INSECURE_HTTPS@@ # true = do not check server certificate
-
-  config.data_import_dir = '@@WORKBENCH_DATA_IMPORT_DIR@@'
-  config.data_export_dir = '@@WORKBENCH_DATA_EXPORT_DIR@@'
-
-  # Authentication stub: hard code pre-approved API tokens.
-  # config.accept_api_token = { rand(2**256).to_s(36) => true }
-  config.accept_api_token = {}
-
-  config.vcf_pipeline_uuid = '@@WORKBENCH_VCF_PIPELINE_UUID@@'
-
-  config.site_name = '@@WORKBENCH_SITE_NAME@@'
-  config.activation_contact_link = '@@WORKBENCH_ACTIVATION_CONTACT_LINK@@'
-  config.arvados_docsite = 'http://doc.arvados.org'
-
-  config.arvados_theme = 'default'
-
-  config.show_user_agreement_inline = false
-end
diff --git a/docker/workbench/secret_token.rb.in b/docker/workbench/secret_token.rb.in
deleted file mode 100644 (file)
index 91c1a5c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-# Be sure to restart your server when you modify this file.
-
-# Your secret key for verifying the integrity of signed cookies.
-# If you change this key, all old signed cookies will become invalid!
-# Make sure the secret is at least 30 characters and all random,
-# no regular words or you'll be exposed to dictionary attacks.
-ArvadosWorkbench::Application.config.secret_token = '@@WORKBENCH_SECRET@@'
diff --git a/docker/workbench/workbench_rails_env.in b/docker/workbench/workbench_rails_env.in
deleted file mode 100644 (file)
index f4f7638..0000000
+++ /dev/null
@@ -1 +0,0 @@
-@@WORKBENCH_RAILS_MODE@@
\ No newline at end of file
diff --git a/lib/crunchstat/crunchstat.go b/lib/crunchstat/crunchstat.go
new file mode 100644 (file)
index 0000000..fa3cd2c
--- /dev/null
@@ -0,0 +1,444 @@
+// Package crunchstat reports resource usage (CPU, memory, disk,
+// network) for a cgroup.
+package crunchstat
+
+import (
+       "bufio"
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// This magically allows us to look up userHz via _SC_CLK_TCK:
+
+/*
+#include <unistd.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// A Reporter gathers statistics for a cgroup and writes them to a
+// log.Logger.
+type Reporter struct {
+       // CID of the container to monitor. If empty, read the CID
+       // from CIDFile (first waiting until a non-empty file appears
+       // at CIDFile). If CIDFile is also empty, report host
+       // statistics.
+       CID string
+
+       // Path to a file we can read CID from.
+       CIDFile string
+
+       // Where cgroup accounting files live on this system, e.g.,
+       // "/sys/fs/cgroup".
+       CgroupRoot string
+
+       // Parent cgroup, e.g., "docker".
+       CgroupParent string
+
+       // Interval between samples. Must be positive.
+       PollPeriod time.Duration
+
+       // Where to write statistics. Must not be nil.
+       Logger *log.Logger
+
+       reportedStatFile map[string]string
+       lastNetSample    map[string]ioSample
+       lastDiskSample   map[string]ioSample
+       lastCPUSample    cpuSample
+
+       done    chan struct{} // closed when we should stop reporting
+       flushed chan struct{} // closed when we have made our last report
+}
+
+// Start starts monitoring in a new goroutine, and returns
+// immediately.
+//
+// The monitoring goroutine waits for a non-empty CIDFile to appear
+// (unless CID is non-empty). Then it waits for the accounting files
+// to appear for the monitored container. Then it collects and reports
+// statistics until Stop is called.
+//
+// Callers should not call Start more than once.
+//
+// Callers should not modify public data fields after calling Start.
+func (r *Reporter) Start() {
+       r.done = make(chan struct{})
+       r.flushed = make(chan struct{})
+       go r.run()
+}
+
+// Stop reporting. Do not call more than once, or before calling
+// Start.
+//
+// Nothing will be logged after Stop returns.
+func (r *Reporter) Stop() {
+       close(r.done)
+       <-r.flushed
+}
+
+func (r *Reporter) readAllOrWarn(in io.Reader) ([]byte, error) {
+       content, err := ioutil.ReadAll(in)
+       if err != nil {
+               r.Logger.Print(err)
+       }
+       return content, err
+}
+
+// Open the cgroup stats file in /sys/fs corresponding to the target
+// cgroup, and return an io.ReadCloser. If no stats file is available,
+// return nil.
+//
+// Log the file that was opened, if it isn't the same file opened on
+// the last openStatFile for this stat.
+//
+// Log "not available" if no file is found and either this stat has
+// been available in the past, or verbose==true.
+//
+// TODO: Instead of trying all options, choose a process in the
+// container, and read /proc/PID/cgroup to determine the appropriate
+// cgroup root for the given statgroup. (This will avoid falling back
+// to host-level stats during container setup and teardown.)
+func (r *Reporter) openStatFile(statgroup, stat string, verbose bool) (io.ReadCloser, error) {
+       var paths []string
+       if r.CID != "" {
+               // Collect container's stats
+               paths = []string{
+                       fmt.Sprintf("%s/%s/%s/%s/%s", r.CgroupRoot, statgroup, r.CgroupParent, r.CID, stat),
+                       fmt.Sprintf("%s/%s/%s/%s", r.CgroupRoot, r.CgroupParent, r.CID, stat),
+               }
+       } else {
+               // Collect this host's stats
+               paths = []string{
+                       fmt.Sprintf("%s/%s/%s", r.CgroupRoot, statgroup, stat),
+                       fmt.Sprintf("%s/%s", r.CgroupRoot, stat),
+               }
+       }
+       var path string
+       var file *os.File
+       var err error
+       for _, path = range paths {
+               file, err = os.Open(path)
+               if err == nil {
+                       break
+               } else {
+                       path = ""
+               }
+       }
+       if pathWas := r.reportedStatFile[stat]; pathWas != path {
+               // Log whenever we start using a new/different cgroup
+               // stat file for a given statistic. This typically
+               // happens 1 to 3 times per statistic, depending on
+               // whether we happen to collect stats [a] before any
+               // processes have been created in the container and
+               // [b] after all contained processes have exited.
+               if path == "" && verbose {
+                       r.Logger.Printf("notice: stats not available: stat %s, statgroup %s, cid %s, parent %s, root %s\n", stat, statgroup, r.CID, r.CgroupParent, r.CgroupRoot)
+               } else if pathWas != "" {
+                       r.Logger.Printf("notice: stats moved from %s to %s\n", r.reportedStatFile[stat], path)
+               } else {
+                       r.Logger.Printf("notice: reading stats from %s\n", path)
+               }
+               r.reportedStatFile[stat] = path
+       }
+       return file, err
+}
+
+func (r *Reporter) getContainerNetStats() (io.Reader, error) {
+       procsFile, err := r.openStatFile("cpuacct", "cgroup.procs", true)
+       if err != nil {
+               return nil, err
+       }
+       defer procsFile.Close()
+       reader := bufio.NewScanner(procsFile)
+       for reader.Scan() {
+               taskPid := reader.Text()
+               statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
+               stats, err := ioutil.ReadFile(statsFilename)
+               if err != nil {
+                       r.Logger.Print(err)
+                       continue
+               }
+               return strings.NewReader(string(stats)), nil
+       }
+       return nil, errors.New("Could not read stats for any proc in container")
+}
+
+type ioSample struct {
+       sampleTime time.Time
+       txBytes    int64
+       rxBytes    int64
+}
+
+func (r *Reporter) doBlkIOStats() {
+       c, err := r.openStatFile("blkio", "blkio.io_service_bytes", true)
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       var sampleTime = time.Now()
+       newSamples := make(map[string]ioSample)
+       for b.Scan() {
+               var device, op string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
+                       continue
+               }
+               var thisSample ioSample
+               var ok bool
+               if thisSample, ok = newSamples[device]; !ok {
+                       thisSample = ioSample{sampleTime, -1, -1}
+               }
+               switch op {
+               case "Read":
+                       thisSample.rxBytes = val
+               case "Write":
+                       thisSample.txBytes = val
+               }
+               newSamples[device] = thisSample
+       }
+       for dev, sample := range newSamples {
+               if sample.txBytes < 0 || sample.rxBytes < 0 {
+                       continue
+               }
+               delta := ""
+               if prev, ok := r.lastDiskSample[dev]; ok {
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
+                               sample.sampleTime.Sub(prev.sampleTime).Seconds(),
+                               sample.txBytes-prev.txBytes,
+                               sample.rxBytes-prev.rxBytes)
+               }
+               r.Logger.Printf("blkio:%s %d write %d read%s\n", dev, sample.txBytes, sample.rxBytes, delta)
+               r.lastDiskSample[dev] = sample
+       }
+}
+
+type memSample struct {
+       sampleTime time.Time
+       memStat    map[string]int64
+}
+
+func (r *Reporter) doMemoryStats() {
+       c, err := r.openStatFile("memory", "memory.stat", true)
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       thisSample := memSample{time.Now(), make(map[string]int64)}
+       wantStats := [...]string{"cache", "swap", "pgmajfault", "rss"}
+       for b.Scan() {
+               var stat string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err != nil {
+                       continue
+               }
+               thisSample.memStat[stat] = val
+       }
+       var outstat bytes.Buffer
+       for _, key := range wantStats {
+               if val, ok := thisSample.memStat[key]; ok {
+                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+               }
+       }
+       r.Logger.Printf("mem%s\n", outstat.String())
+}
+
+func (r *Reporter) doNetworkStats() {
+       sampleTime := time.Now()
+       stats, err := r.getContainerNetStats()
+       if err != nil {
+               return
+       }
+
+       scanner := bufio.NewScanner(stats)
+       for scanner.Scan() {
+               var ifName string
+               var rx, tx int64
+               words := strings.Fields(scanner.Text())
+               if len(words) != 17 {
+                       // Skip lines with wrong format
+                       continue
+               }
+               ifName = strings.TrimRight(words[0], ":")
+               if ifName == "lo" || ifName == "" {
+                       // Skip loopback interface and lines with wrong format
+                       continue
+               }
+               if tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {
+                       continue
+               }
+               if rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {
+                       continue
+               }
+               nextSample := ioSample{}
+               nextSample.sampleTime = sampleTime
+               nextSample.txBytes = tx
+               nextSample.rxBytes = rx
+               var delta string
+               if prev, ok := r.lastNetSample[ifName]; ok {
+                       interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d tx %d rx",
+                               interval,
+                               tx-prev.txBytes,
+                               rx-prev.rxBytes)
+               }
+               r.Logger.Printf("net:%s %d tx %d rx%s\n", ifName, tx, rx, delta)
+               r.lastNetSample[ifName] = nextSample
+       }
+}
+
+type cpuSample struct {
+       hasData    bool // to distinguish the zero value from real data
+       sampleTime time.Time
+       user       float64
+       sys        float64
+       cpus       int64
+}
+
+// Return the number of CPUs available in the container. Return 0 if
+// we can't figure out the real number of CPUs.
+func (r *Reporter) getCPUCount() int64 {
+       cpusetFile, err := r.openStatFile("cpuset", "cpuset.cpus", true)
+       if err != nil {
+               return 0
+       }
+       defer cpusetFile.Close()
+       b, err := r.readAllOrWarn(cpusetFile)
+       if err != nil {
+               return 0
+       }
+       sp := strings.Split(string(b), ",")
+       cpus := int64(0)
+       for _, v := range sp {
+               var min, max int64
+               n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+               if n == 2 {
+                       cpus += (max - min) + 1
+               } else {
+                       cpus++
+               }
+       }
+       return cpus
+}
+
+func (r *Reporter) doCPUStats() {
+       statFile, err := r.openStatFile("cpuacct", "cpuacct.stat", true)
+       if err != nil {
+               return
+       }
+       defer statFile.Close()
+       b, err := r.readAllOrWarn(statFile)
+       if err != nil {
+               return
+       }
+
+       var userTicks, sysTicks int64
+       fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
+       userHz := float64(C.sysconf(C._SC_CLK_TCK))
+       nextSample := cpuSample{
+               hasData:    true,
+               sampleTime: time.Now(),
+               user:       float64(userTicks) / userHz,
+               sys:        float64(sysTicks) / userHz,
+               cpus:       r.getCPUCount(),
+       }
+
+       delta := ""
+       if r.lastCPUSample.hasData {
+               delta = fmt.Sprintf(" -- interval %.4f seconds %.4f user %.4f sys",
+                       nextSample.sampleTime.Sub(r.lastCPUSample.sampleTime).Seconds(),
+                       nextSample.user-r.lastCPUSample.user,
+                       nextSample.sys-r.lastCPUSample.sys)
+       }
+       r.Logger.Printf("cpu %.4f user %.4f sys %d cpus%s\n",
+               nextSample.user, nextSample.sys, nextSample.cpus, delta)
+       r.lastCPUSample = nextSample
+}
+
+// Report stats periodically until we learn (via r.done) that someone
+// called Stop.
+func (r *Reporter) run() {
+       defer close(r.flushed)
+
+       r.reportedStatFile = make(map[string]string)
+
+       if !r.waitForCIDFile() || !r.waitForCgroup() {
+               return
+       }
+
+       r.lastNetSample = make(map[string]ioSample)
+       r.lastDiskSample = make(map[string]ioSample)
+
+       ticker := time.NewTicker(r.PollPeriod)
+       for {
+               r.doMemoryStats()
+               r.doCPUStats()
+               r.doBlkIOStats()
+               r.doNetworkStats()
+               select {
+               case <-r.done:
+                       return
+               case <-ticker.C:
+               }
+       }
+}
+
+// If CID is empty, wait for it to appear in CIDFile. Return true if
+// we get it before we learn (via r.done) that someone called Stop.
+func (r *Reporter) waitForCIDFile() bool {
+       if r.CID != "" || r.CIDFile == "" {
+               return true
+       }
+
+       ticker := time.NewTicker(100 * time.Millisecond)
+       defer ticker.Stop()
+       for {
+               cid, err := ioutil.ReadFile(r.CIDFile)
+               if err == nil && len(cid) > 0 {
+                       r.CID = string(cid)
+                       return true
+               }
+               select {
+               case <-ticker.C:
+               case <-r.done:
+                       r.Logger.Printf("CID never appeared in %+q: %v", r.CIDFile, err)
+                       return false
+               }
+       }
+}
+
+// Wait for the cgroup stats files to appear in cgroup_root. Return
+// true if they appear before r.done indicates someone called Stop. If
+// they don't appear within one poll interval, log a warning and keep
+// waiting.
+func (r *Reporter) waitForCgroup() bool {
+       ticker := time.NewTicker(100 * time.Millisecond)
+       defer ticker.Stop()
+       warningTimer := time.After(r.PollPeriod)
+       for {
+               c, err := r.openStatFile("cpuacct", "cgroup.procs", false)
+               if err == nil {
+                       c.Close()
+                       return true
+               }
+               select {
+               case <-ticker.C:
+               case <-warningTimer:
+                       r.Logger.Printf("cgroup stats files have not appeared after %v (config error?) -- still waiting...", r.PollPeriod)
+               case <-r.done:
+                       r.Logger.Printf("cgroup stats files never appeared for %v", r.CID)
+                       return false
+               }
+       }
+}
diff --git a/lib/crunchstat/crunchstat_test.go b/lib/crunchstat/crunchstat_test.go
new file mode 100644 (file)
index 0000000..697f235
--- /dev/null
@@ -0,0 +1,62 @@
+package crunchstat
+
+import (
+       "bufio"
+       "io"
+       "log"
+       "os"
+       "regexp"
+       "testing"
+)
+
+func bufLogger() (*log.Logger, *bufio.Reader) {
+       r, w := io.Pipe()
+       logger := log.New(w, "", 0)
+       return logger, bufio.NewReader(r)
+}
+
+func TestReadAllOrWarnFail(t *testing.T) {
+       logger, rcv := bufLogger()
+       rep := Reporter{Logger: logger}
+
+       done := make(chan bool)
+       var msg []byte
+       var err error
+       go func() {
+               msg, err = rcv.ReadBytes('\n')
+               close(done)
+       }()
+       {
+               // The special file /proc/self/mem can be opened for
+               // reading, but reading from byte 0 returns an error.
+               f, err := os.Open("/proc/self/mem")
+               if err != nil {
+                       t.Fatalf("Opening /proc/self/mem: %s", err)
+               }
+               if x, err := rep.readAllOrWarn(f); err == nil {
+                       t.Fatalf("Expected error, got %v", x)
+               }
+       }
+       <-done
+       if err != nil {
+               t.Fatal(err)
+       } else if matched, err := regexp.MatchString("^read /proc/self/mem: .*", string(msg)); err != nil || !matched {
+               t.Fatalf("Expected error message about unreadable file, got \"%s\"", msg)
+       }
+}
+
+func TestReadAllOrWarnSuccess(t *testing.T) {
+       rep := Reporter{Logger: log.New(os.Stderr, "", 0)}
+
+       f, err := os.Open("./crunchstat_test.go")
+       if err != nil {
+               t.Fatalf("Opening ./crunchstat_test.go: %s", err)
+       }
+       data, err := rep.readAllOrWarn(f)
+       if err != nil {
+               t.Fatalf("got error %s", err)
+       }
+       if matched, err := regexp.MatchString("^package crunchstat\n", string(data)); err != nil || !matched {
+               t.Fatalf("data failed regexp: err %v, matched %v", err, matched)
+       }
+}
index 39238b0fc649d400a380e397c8f0520fbc75b476..7584d3a83d427de4cebf29b50109c21971c9fe59 100755 (executable)
@@ -355,6 +355,7 @@ my @jobstep_done = ();
 my @jobstep_tomerge = ();
 my $jobstep_tomerge_level = 0;
 my $squeue_checked = 0;
+my $sinfo_checked = 0;
 my $latest_refresh = scalar time;
 
 
@@ -1401,6 +1402,37 @@ sub check_squeue
   }
 }
 
+sub check_sinfo
+{
+  # If a node fails in a multi-node "srun" call during job setup, the call
+  # may hang instead of exiting with a nonzero code.  This function checks
+  # "sinfo" for the health of the nodes that were allocated and ensures that
+  # they are all still in the "alloc" state.  If a node that is allocated to
+  # this job is not in "alloc" state, then set please_freeze.
+  #
+  # This is only called from srun_sync() for node configuration.  If a
+  # node fails doing actual work, there are other recovery mechanisms.
+
+  # Do not call `sinfo` more than once every 15 seconds.
+  return if $sinfo_checked > time - 15;
+  $sinfo_checked = time;
+
+  # The output format "%t" means output node states.
+  my @sinfo = `sinfo --nodes=\Q$ENV{SLURM_NODELIST}\E --noheader -o "%t"`;
+  if ($? != 0)
+  {
+    Log(undef, "warning: sinfo exit status $? ($!)");
+    return;
+  }
+  chop @sinfo;
+
+  foreach (@sinfo)
+  {
+    if ($_ != "alloc" && $_ != "alloc*") {
+      $main::please_freeze = 1;
+    }
+  }
+}
 
 sub release_allocation
 {
@@ -1478,8 +1510,14 @@ sub preprocess_stderr
     substr $jobstep[$jobstepidx]->{stderr}, 0, 1+length($line), "";
     Log ($jobstepidx, "stderr $line");
     if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOB_ID} has expired|Unable to confirm allocation for job $ENV{SLURM_JOB_ID})/) {
-      # whoa.
+      # If the allocation is revoked, we can't possibly continue, so mark all
+      # nodes as failed.  This will cause the overall exit code to be
+      # EX_RETRY_UNLOCKED instead of failure so that crunch_dispatch can re-run
+      # this job.
       $main::please_freeze = 1;
+      foreach my $st (@slot) {
+        $st->{node}->{fail_count}++;
+      }
     }
     elsif ($line =~ /srun: error: (Node failure on|Aborting, .*\bio error\b)/) {
       $jobstep[$jobstepidx]->{tempfail} = 1;
@@ -1906,7 +1944,6 @@ sub freezeunquote
   return $s;
 }
 
-
 sub srun_sync
 {
   my $srunargs = shift;
@@ -1961,6 +1998,7 @@ sub srun_sync
     if (!$busy || ($latest_refresh + 2 < scalar time)) {
       check_refresh_wanted();
       check_squeue();
+      check_sinfo();
     }
     if (!$busy) {
       select(undef, undef, undef, 0.1);
index 962a690d6813cb96e14f06c7d5fb430d5d5c8e98..5262cb4971aaa09c002c1d52a56cc1899bf9ed4b 100644 (file)
@@ -8,26 +8,36 @@ import logging
 import os
 import sys
 import threading
+import hashlib
+from functools import partial
 import pkg_resources  # part of setuptools
 
 from cwltool.errors import WorkflowException
 import cwltool.main
 import cwltool.workflow
+import schema_salad
 
 import arvados
-import arvados.events
+import arvados.config
 
 from .arvcontainer import ArvadosContainer, RunnerContainer
 from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
 from .arvtool import ArvadosCommandTool
+from .arvworkflow import ArvadosWorkflow, upload_workflow
 from .fsaccess import CollectionFsAccess
+from .perf import Perf
+from cwltool.pack import pack
 
 from cwltool.process import shortname, UnsupportedRequirement
+from cwltool.pathmapper import adjustFileObjs
+from cwltool.draft2tool import compute_checksums
 from arvados.api import OrderedJsonModel
 
 logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
 logger.setLevel(logging.INFO)
 
+
 class ArvCwlRunner(object):
     """Execute a CWL tool or workflow, submit work (using either jobs or
     containers API), wait for them to complete, and report output.
@@ -45,6 +55,9 @@ class ArvCwlRunner(object):
         self.num_retries = 4
         self.uuid = None
         self.work_api = work_api
+        self.stop_polling = threading.Event()
+        self.poll_api = None
+        self.pipeline = None
 
         if self.work_api is None:
             # todo: autodetect API to use.
@@ -53,9 +66,12 @@ class ArvCwlRunner(object):
         if self.work_api not in ("containers", "jobs"):
             raise Exception("Unsupported API '%s'" % self.work_api)
 
-    def arvMakeTool(self, toolpath_object, **kwargs):
+    def arv_make_tool(self, toolpath_object, **kwargs):
+        kwargs["work_api"] = self.work_api
         if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
-            return ArvadosCommandTool(self, toolpath_object, work_api=self.work_api, **kwargs)
+            return ArvadosCommandTool(self, toolpath_object, **kwargs)
+        elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
+            return ArvadosWorkflow(self, toolpath_object, **kwargs)
         else:
             return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
 
@@ -89,27 +105,73 @@ class ArvCwlRunner(object):
                         self.cond.acquire()
                         j = self.processes[uuid]
                         logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
-                        j.done(event["properties"]["new_attributes"])
+                        with Perf(metrics, "done %s" % j.name):
+                            j.done(event["properties"]["new_attributes"])
                         self.cond.notify()
                     finally:
                         self.cond.release()
 
+    def poll_states(self):
+        """Poll status of jobs or containers listed in the processes dict.
+
+        Runs in a separate thread.
+        """
+
+        while True:
+            self.stop_polling.wait(15)
+            if self.stop_polling.is_set():
+                break
+            with self.lock:
+                keys = self.processes.keys()
+            if not keys:
+                continue
+
+            if self.work_api == "containers":
+                table = self.poll_api.containers()
+            elif self.work_api == "jobs":
+                table = self.poll_api.jobs()
+
+            try:
+                proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
+            except Exception as e:
+                logger.warn("Error checking states on API server: %s", e)
+                continue
+
+            for p in proc_states["items"]:
+                self.on_message({
+                    "object_uuid": p["uuid"],
+                    "event_type": "update",
+                    "properties": {
+                        "new_attributes": p
+                    }
+                })
+
     def get_uploaded(self):
         return self.uploaded.copy()
 
     def add_uploaded(self, src, pair):
         self.uploaded[src] = pair
 
-    def arvExecutor(self, tool, job_order, **kwargs):
+    def check_writable(self, obj):
+        if isinstance(obj, dict):
+            if obj.get("writable"):
+                raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
+            for v in obj.itervalues():
+                self.check_writable(v)
+        if isinstance(obj, list):
+            for v in obj:
+                self.check_writable(v)
+
+    def arv_executor(self, tool, job_order, **kwargs):
         self.debug = kwargs.get("debug")
 
-        if kwargs.get("quiet"):
-            logger.setLevel(logging.WARN)
-            logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+        tool.visit(self.check_writable)
 
         useruuid = self.api.users().current().execute()["uuid"]
         self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
         self.pipeline = None
+        make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess, api_client=self.api)
+        self.fs_access = make_fs_access(kwargs["basedir"])
 
         if kwargs.get("create_template"):
             tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
@@ -117,18 +179,26 @@ class ArvCwlRunner(object):
             # cwltool.main will write our return value to stdout.
             return tmpl.uuid
 
-        self.debug = kwargs.get("debug")
+        if kwargs.get("create_workflow") or kwargs.get("update_workflow"):
+            return upload_workflow(self, tool, job_order, self.project_uuid, kwargs.get("update_workflow"))
+
         self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
-        self.fs_access = CollectionFsAccess(kwargs["basedir"])
 
-        kwargs["fs_access"] = self.fs_access
+        kwargs["make_fs_access"] = make_fs_access
         kwargs["enable_reuse"] = kwargs.get("enable_reuse")
+        kwargs["use_container"] = True
+        kwargs["tmpdir_prefix"] = "tmp"
+        kwargs["on_error"] = "continue"
+        kwargs["compute_checksum"] = kwargs.get("compute_checksum")
 
         if self.work_api == "containers":
             kwargs["outdir"] = "/var/spool/cwl"
+            kwargs["docker_outdir"] = "/var/spool/cwl"
             kwargs["tmpdir"] = "/tmp"
+            kwargs["docker_tmpdir"] = "/tmp"
         elif self.work_api == "jobs":
             kwargs["outdir"] = "$(task.outdir)"
+            kwargs["docker_outdir"] = "$(task.outdir)"
             kwargs["tmpdir"] = "$(task.tmpdir)"
 
         runnerjob = None
@@ -157,10 +227,9 @@ class ArvCwlRunner(object):
             runnerjob.run()
             return runnerjob.uuid
 
-        if self.work_api == "containers":
-            events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#container"]], self.on_message)
-        if self.work_api == "jobs":
-            events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
+        self.poll_api = arvados.api('v1')
+        self.polling_thread = threading.Thread(target=self.poll_states)
+        self.polling_thread.start()
 
         if runnerjob:
             jobiter = iter((runnerjob,))
@@ -169,7 +238,6 @@ class ArvCwlRunner(object):
                 self.uuid = kwargs.get("cwl_runner_job").get('uuid')
             jobiter = tool.job(job_order,
                                self.output_callback,
-                               docker_outdir="$(task.outdir)",
                                **kwargs)
 
         try:
@@ -178,20 +246,25 @@ class ArvCwlRunner(object):
             # except when in cond.wait(), at which point on_message can update
             # job state and process output callbacks.
 
+            loopperf = Perf(metrics, "jobiter")
+            loopperf.__enter__()
             for runnable in jobiter:
+                loopperf.__exit__()
                 if runnable:
-                    runnable.run(**kwargs)
+                    with Perf(metrics, "run"):
+                        runnable.run(**kwargs)
                 else:
                     if self.processes:
                         self.cond.wait(1)
                     else:
                         logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
                         break
+                loopperf.__enter__()
+            loopperf.__exit__()
 
             while self.processes:
                 self.cond.wait(1)
 
-            events.close()
         except UnsupportedRequirement:
             raise
         except:
@@ -207,13 +280,21 @@ class ArvCwlRunner(object):
                                                      body={"priority": "0"}).execute(num_retries=self.num_retries)
         finally:
             self.cond.release()
+            self.stop_polling.set()
+            self.polling_thread.join()
 
         if self.final_status == "UnsupportedRequirement":
             raise UnsupportedRequirement("Check log for details.")
 
+        if self.final_status != "success":
+            raise WorkflowException("Workflow failed.")
+
         if self.final_output is None:
             raise WorkflowException("Workflow did not return a result.")
 
+        if kwargs.get("compute_checksum"):
+            adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
+
         return self.final_output
 
 
@@ -248,6 +329,8 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
     exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
 
+    parser.add_argument("--metrics", action="store_true", help="Print timing metrics")
+
     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
 
     exgroup = parser.add_mutually_exclusive_group()
@@ -258,7 +341,7 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default=True, dest="enable_reuse",
                         help="")
 
-    parser.add_argument("--project-uuid", type=str, help="Project that will own the workflow jobs, if not provided, will go to home project.")
+    parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
     parser.add_argument("--ignore-docker-for-reuse", action="store_true",
                         help="Ignore Docker image version when deciding whether to reuse past jobs.",
                         default=False)
@@ -269,6 +352,8 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
                         default=True, dest="submit")
     exgroup.add_argument("--create-template", action="store_true", help="Create an Arvados pipeline template.")
+    exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow.")
+    exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update existing Arvados workflow with uuid.")
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
@@ -280,20 +365,36 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default=None, dest="work_api",
                         help="Select work submission API, one of 'jobs' or 'containers'.")
 
+    parser.add_argument("--compute-checksum", action="store_true", default=False,
+                        help="Compute checksum of contents while collecting outputs",
+                        dest="compute_checksum")
+
     parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
     parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
 
     return parser
 
+def add_arv_hints():
+    cache = {}
+    res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema.yml')
+    cache["http://arvados.org/cwl"] = res.read()
+    res.close()
+    _, cwlnames, _, _ = cwltool.process.get_schema("v1.0")
+    _, extnames, _, _ = schema_salad.schema.load_schema("http://arvados.org/cwl", cache=cache)
+    for n in extnames.names:
+        if not cwlnames.has_name("http://arvados.org/cwl#"+n, ""):
+            cwlnames.add_name("http://arvados.org/cwl#"+n, "", extnames.get_name(n, ""))
 
 def main(args, stdout, stderr, api_client=None):
     parser = arg_parser()
 
     job_order_object = None
     arvargs = parser.parse_args(args)
-    if arvargs.create_template and not arvargs.job_order:
+    if (arvargs.create_template or arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
         job_order_object = ({}, "")
 
+    add_arv_hints()
+
     try:
         if api_client is None:
             api_client=arvados.api('v1', model=OrderedJsonModel())
@@ -302,12 +403,25 @@ def main(args, stdout, stderr, api_client=None):
         logger.error(e)
         return 1
 
+    if arvargs.debug:
+        logger.setLevel(logging.DEBUG)
+
+    if arvargs.quiet:
+        logger.setLevel(logging.WARN)
+        logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+
+    if arvargs.metrics:
+        metrics.setLevel(logging.DEBUG)
+        logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG)
+
     arvargs.conformance_test = None
+    arvargs.use_container = True
 
     return cwltool.main.main(args=arvargs,
                              stdout=stdout,
                              stderr=stderr,
-                             executor=runner.arvExecutor,
-                             makeTool=runner.arvMakeTool,
+                             executor=runner.arv_executor,
+                             makeTool=runner.arv_make_tool,
                              versionfunc=versionstring,
-                             job_order_object=job_order_object)
+                             job_order_object=job_order_object,
+                             make_fs_access=partial(CollectionFsAccess, api_client=api_client))
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema.yml
new file mode 100644 (file)
index 0000000..44b1b06
--- /dev/null
@@ -0,0 +1,47 @@
+$base: "http://arvados.org/cwl#"
+$graph:
+- name: RunInSingleContainer
+  type: record
+  doc: |
+    Indicates that a subworkflow should run in a single container
+    and not be scheduled as separate steps.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:RunInSingleContainer'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+
+- name: RuntimeConstraints
+  type: record
+  doc: |
+    Set Arvados-specific runtime hints.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:RuntimeConstraints'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    - name: keep_cache
+      type: int?
+      doc: |
+        Size of file data buffer for Keep mount in MiB. Default is 256
+        MiB. Increase this to reduce cache thrashing in situations such as
+        accessing multiple large (64+ MiB) files at the same time, or
+        performing random access on a large file.
+
+- name: APIRequirement
+  type: record
+  doc: |
+    Indicates that process wants to access to the Arvados API.  Will be granted
+    limited network access and have ARVADOS_API_HOST and ARVADOS_API_TOKEN set
+    in the environment.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:APIRequirement'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
index 94a7579f202a62d88f7a69481e4f2fe65dfa0f7b..aaae7d9f66ce9d72a2dcade588f58f0e5f6894e9 100644 (file)
@@ -3,7 +3,8 @@ import json
 import os
 
 from cwltool.errors import WorkflowException
-from cwltool.process import get_feature, adjustFiles, UnsupportedRequirement, shortname
+from cwltool.process import get_feature, UnsupportedRequirement, shortname
+from cwltool.pathmapper import adjustFiles
 
 import arvados.collection
 
@@ -29,38 +30,51 @@ class ArvadosContainer(object):
             "command": self.command_line,
             "owner_uuid": self.arvrunner.project_uuid,
             "name": self.name,
-            "output_path": "/var/spool/cwl",
-            "cwd": "/var/spool/cwl",
+            "output_path": self.outdir,
+            "cwd": self.outdir,
             "priority": 1,
             "state": "Committed"
         }
         runtime_constraints = {}
         mounts = {
-            "/var/spool/cwl": {
+            self.outdir: {
                 "kind": "tmp"
             }
         }
 
+        dirs = set()
         for f in self.pathmapper.files():
-            _, p = self.pathmapper.mapper(f)
-            mounts[p] = {
-                "kind": "collection",
-                "portable_data_hash": p[6:]
-            }
+            _, p, tp = self.pathmapper.mapper(f)
+            if tp == "Directory" and '/' not in p[6:]:
+                mounts[p] = {
+                    "kind": "collection",
+                    "portable_data_hash": p[6:]
+                }
+                dirs.add(p[6:])
+        for f in self.pathmapper.files():
+            _, p, tp = self.pathmapper.mapper(f)
+            if p[6:].split("/")[0] not in dirs:
+                mounts[p] = {
+                    "kind": "collection",
+                    "portable_data_hash": p[6:]
+                }
 
-        if self.generatefiles:
+        if self.generatefiles["listing"]:
             raise UnsupportedRequirement("Generate files not supported")
 
-        container_request["environment"] = {"TMPDIR": "/tmp"}
+        container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
         if self.environment:
             container_request["environment"].update(self.environment)
 
         if self.stdin:
             raise UnsupportedRequirement("Stdin redirection currently not suppported")
 
+        if self.stderr:
+            raise UnsupportedRequirement("Stderr redirection currently not suppported")
+
         if self.stdout:
             mounts["stdout"] = {"kind": "file",
-                                "path": "/var/spool/cwl/%s" % (self.stdout)}
+                                "path": "%s/%s" % (self.outdir, self.stdout)}
 
         (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
         if not docker_req:
@@ -76,6 +90,14 @@ class ArvadosContainer(object):
             runtime_constraints["vcpus"] = resources.get("cores", 1)
             runtime_constraints["ram"] = resources.get("ram") * 2**20
 
+        api_req, _ = get_feature(self, "http://arvados.org/cwl#APIRequirement")
+        if api_req:
+            runtime_constraints["API"] = True
+
+        runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
+        if runtime_req:
+            logger.warn("RuntimeConstraints not yet supported by container API")
+
         container_request["mounts"] = mounts
         container_request["runtime_constraints"] = runtime_constraints
 
@@ -86,7 +108,7 @@ class ArvadosContainer(object):
 
             self.arvrunner.processes[response["container_uuid"]] = self
 
-            logger.info("Container %s (%s) request state is %s", self.name, response["container_uuid"], response["state"])
+            logger.info("Container %s (%s) request state is %s", self.name, response["uuid"], response["state"])
 
             if response["state"] == "Final":
                 self.done(response)
@@ -114,7 +136,7 @@ class ArvadosContainer(object):
             try:
                 outputs = {}
                 if record["output"]:
-                    outputs = done.done(self, record, "/tmp", "/var/spool/cwl", "/keep")
+                    outputs = done.done(self, record, "/tmp", self.outdir, "/keep")
             except WorkflowException as e:
                 logger.error("Error while collecting container outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
                 processStatus = "permanentFail"
index f129dfa80436451b0e0fc04dd0aef93df60f3417..0818d5d62b4dcd023153daf1e69ace6b34175655 100644 (file)
@@ -1,10 +1,11 @@
 import logging
 import re
 import copy
+import json
 
 from cwltool.process import get_feature, shortname
 from cwltool.errors import WorkflowException
-from cwltool.draft2tool import revmap_file, remove_hostfs, CommandLineTool
+from cwltool.draft2tool import revmap_file, CommandLineTool
 from cwltool.load_tool import fetch_document
 from cwltool.builder import Builder
 
@@ -12,9 +13,12 @@ import arvados.collection
 
 from .arvdocker import arv_docker_get_image
 from .runner import Runner
+from .pathmapper import InitialWorkDirPathMapper
+from .perf import Perf
 from . import done
 
 logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
 
 tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)")
 outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)")
@@ -34,35 +38,54 @@ class ArvadosJob(object):
         }
         runtime_constraints = {}
 
-        if self.generatefiles:
-            vwd = arvados.collection.Collection()
-            script_parameters["task.vwd"] = {}
-            for t in self.generatefiles:
-                if isinstance(self.generatefiles[t], dict):
-                    src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:"))
-                    vwd.copy(rest, t, source_collection=src)
-                else:
-                    with vwd.open(t, "w") as f:
-                        f.write(self.generatefiles[t].encode('utf-8'))
-            vwd.save_new()
-            for t in self.generatefiles:
-                script_parameters["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t)
-
-        script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
+        with Perf(metrics, "generatefiles %s" % self.name):
+            if self.generatefiles["listing"]:
+                vwd = arvados.collection.Collection()
+                script_parameters["task.vwd"] = {}
+                generatemapper = InitialWorkDirPathMapper([self.generatefiles], "", "",
+                                                          separateDirs=False)
+
+                with Perf(metrics, "createfiles %s" % self.name):
+                    for f, p in generatemapper.items():
+                        if p.type == "CreateFile":
+                            with vwd.open(p.target, "w") as n:
+                                n.write(p.resolved.encode("utf-8"))
+
+                with Perf(metrics, "generatefiles.save_new %s" % self.name):
+                    vwd.save_new()
+
+                for f, p in generatemapper.items():
+                    if p.type == "File":
+                        script_parameters["task.vwd"][p.target] = p.resolved
+                    if p.type == "CreateFile":
+                        script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
+
+        script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
         if self.environment:
             script_parameters["task.env"].update(self.environment)
 
         if self.stdin:
-            script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
+            script_parameters["task.stdin"] = self.stdin
 
         if self.stdout:
             script_parameters["task.stdout"] = self.stdout
 
-        (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
-        if docker_req and kwargs.get("use_container") is not False:
-            runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
-        else:
-            runtime_constraints["docker_image"] = "arvados/jobs"
+        if self.stderr:
+            script_parameters["task.stderr"] = self.stderr
+
+        if self.successCodes:
+            script_parameters["task.successCodes"] = self.successCodes
+        if self.temporaryFailCodes:
+            script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
+        if self.permanentFailCodes:
+            script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
+
+        with Perf(metrics, "arv_docker_get_image %s" % self.name):
+            (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
+            if docker_req and kwargs.get("use_container") is not False:
+                runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
+            else:
+                runtime_constraints["docker_image"] = "arvados/jobs"
 
         resources = self.builder.resources
         if resources is not None:
@@ -70,6 +93,10 @@ class ArvadosJob(object):
             runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
             runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
 
+        runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
+        if runtime_req:
+            runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
+
         filters = [["repository", "=", "arvados"],
                    ["script", "=", "crunchrunner"],
                    ["script_version", "in git", "9e5b98e8f5f4727856b53447191f9c06e3da2ba6"]]
@@ -77,19 +104,20 @@ class ArvadosJob(object):
             filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
 
         try:
-            response = self.arvrunner.api.jobs().create(
-                body={
-                    "owner_uuid": self.arvrunner.project_uuid,
-                    "script": "crunchrunner",
-                    "repository": "arvados",
-                    "script_version": "master",
-                    "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
-                    "script_parameters": {"tasks": [script_parameters]},
-                    "runtime_constraints": runtime_constraints
-                },
-                filters=filters,
-                find_or_create=kwargs.get("enable_reuse", True)
-            ).execute(num_retries=self.arvrunner.num_retries)
+            with Perf(metrics, "create %s" % self.name):
+                response = self.arvrunner.api.jobs().create(
+                    body={
+                        "owner_uuid": self.arvrunner.project_uuid,
+                        "script": "crunchrunner",
+                        "repository": "arvados",
+                        "script_version": "master",
+                        "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
+                        "script_parameters": {"tasks": [script_parameters]},
+                        "runtime_constraints": runtime_constraints
+                    },
+                    filters=filters,
+                    find_or_create=kwargs.get("enable_reuse", True)
+                ).execute(num_retries=self.arvrunner.num_retries)
 
             self.arvrunner.processes[response["uuid"]] = self
 
@@ -98,7 +126,8 @@ class ArvadosJob(object):
             logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
 
             if response["state"] in ("Complete", "Failed", "Cancelled"):
-                self.done(response)
+                with Perf(metrics, "done %s" % self.name):
+                    self.done(response)
         except Exception as e:
             logger.error("Got error %s" % str(e))
             self.output_callback({}, "permanentFail")
@@ -106,7 +135,8 @@ class ArvadosJob(object):
     def update_pipeline_component(self, record):
         if self.arvrunner.pipeline:
             self.arvrunner.pipeline["components"][self.name] = {"job": record}
-            self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
+            with Perf(metrics, "update_pipeline_component %s" % self.name):
+                self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
                                                                                  body={
                                                                                     "components": self.arvrunner.pipeline["components"]
                                                                                  }).execute(num_retries=self.arvrunner.num_retries)
@@ -138,37 +168,41 @@ class ArvadosJob(object):
             outputs = {}
             try:
                 if record["output"]:
-                    logc = arvados.collection.Collection(record["log"])
-                    log = logc.open(logc.keys()[0])
-                    tmpdir = None
-                    outdir = None
-                    keepdir = None
-                    for l in log:
-                        # Determine the tmpdir, outdir and keepdir paths from
-                        # the job run.  Unfortunately, we can't take the first
-                        # values we find (which are expected to be near the
-                        # top) and stop scanning because if the node fails and
-                        # the job restarts on a different node these values
-                        # will different runs, and we need to know about the
-                        # final run that actually produced output.
-
-                        g = tmpdirre.match(l)
-                        if g:
-                            tmpdir = g.group(1)
-                        g = outdirre.match(l)
-                        if g:
-                            outdir = g.group(1)
-                        g = keepre.match(l)
-                        if g:
-                            keepdir = g.group(1)
-
-                    outputs = done.done(self, record, tmpdir, outdir, keepdir)
+                    with Perf(metrics, "inspect log %s" % self.name):
+                        logc = arvados.collection.Collection(record["log"])
+                        log = logc.open(logc.keys()[0])
+                        tmpdir = None
+                        outdir = None
+                        keepdir = None
+                        for l in log:
+                            # Determine the tmpdir, outdir and keepdir paths from
+                            # the job run.  Unfortunately, we can't take the first
+                            # values we find (which are expected to be near the
+                            # top) and stop scanning because if the node fails and
+                            # the job restarts on a different node these values
+                            # will different runs, and we need to know about the
+                            # final run that actually produced output.
+
+                            g = tmpdirre.match(l)
+                            if g:
+                                tmpdir = g.group(1)
+                            g = outdirre.match(l)
+                            if g:
+                                outdir = g.group(1)
+                            g = keepre.match(l)
+                            if g:
+                                keepdir = g.group(1)
+
+                    with Perf(metrics, "output collection %s" % self.name):
+                        outputs = done.done(self, record, tmpdir, outdir, keepdir)
             except WorkflowException as e:
                 logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
                 processStatus = "permanentFail"
+                outputs = None
             except Exception as e:
                 logger.exception("Got unknown exception while collecting job outputs:")
                 processStatus = "permanentFail"
+                outputs = None
 
             self.output_callback(outputs, processStatus)
         finally:
@@ -188,7 +222,7 @@ class RunnerJob(Runner):
 
         workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
 
-        self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"])[1]
+        self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"]).target[5:]
         return {
             "script": "cwl-runner",
             "script_version": "master",
@@ -214,7 +248,7 @@ class RunnerJob(Runner):
         logger.info("Submitted job %s", response["uuid"])
 
         if kwargs.get("submit"):
-            self.pipeline = self.arvrunner.api.pipeline_instances().create(
+            self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
                 body={
                     "owner_uuid": self.arvrunner.project_uuid,
                     "name": shortname(self.tool.tool["id"]),
@@ -231,6 +265,7 @@ class RunnerTemplate(object):
     type_to_dataclass = {
         'boolean': 'boolean',
         'File': 'File',
+        'Directory': 'Collection',
         'float': 'number',
         'int': 'number',
         'string': 'text',
@@ -284,7 +319,7 @@ class RunnerTemplate(object):
 
             # Title and description...
             title = param.pop('label', '')
-            descr = param.pop('description', '').rstrip('\n')
+            descr = param.pop('doc', '').rstrip('\n')
             if title:
                 param['title'] = title
             if descr:
@@ -297,8 +332,8 @@ class RunnerTemplate(object):
                 pass
             elif not isinstance(value, dict):
                 param['value'] = value
-            elif param.get('dataclass') == 'File' and value.get('path'):
-                param['value'] = value['path']
+            elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
+                param['value'] = value['location'][5:]
 
             spec['script_parameters'][param_id] = param
         spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
index ecc913ecee1e1f6f0227568f8c44e32e09e5d95c..987ce8967aef2934d6d0f17bf3316e9eb85d70cf 100644 (file)
@@ -17,7 +17,8 @@ class ArvadosCommandTool(CommandLineTool):
         elif self.work_api == "jobs":
             return ArvadosJob(self.arvrunner)
 
-    def makePathMapper(self, reffiles, **kwargs):
+    def makePathMapper(self, reffiles, stagedir, **kwargs):
+        # type: (List[Any], unicode, **Any) -> PathMapper
         if self.work_api == "containers":
             return ArvPathMapper(self.arvrunner, reffiles, kwargs["basedir"],
                                  "/keep/%s",
@@ -28,3 +29,14 @@ class ArvadosCommandTool(CommandLineTool):
                                  "$(task.keep)/%s",
                                  "$(task.keep)/%s/%s",
                                  **kwargs)
+
+    def job(self, joborder, output_callback, **kwargs):
+        if self.work_api == "containers":
+            kwargs["outdir"] = "/var/spool/cwl"
+            kwargs["docker_outdir"] = "/var/spool/cwl"
+        elif self.work_api == "jobs":
+            kwargs["outdir"] = "$(task.outdir)"
+            kwargs["docker_outdir"] = "$(task.outdir)"
+            kwargs["tmpdir"] = "$(task.tmpdir)"
+            kwargs["docker_tmpdir"] = "$(task.tmpdir)"
+        return super(ArvadosCommandTool, self).job(joborder, output_callback, **kwargs)
diff --git a/sdk/cwl/arvados_cwl/arvworkflow.py b/sdk/cwl/arvados_cwl/arvworkflow.py
new file mode 100644 (file)
index 0000000..8eb8fe6
--- /dev/null
@@ -0,0 +1,128 @@
+import os
+import json
+import copy
+import logging
+
+from cwltool.pack import pack
+from cwltool.load_tool import fetch_document
+from cwltool.process import shortname
+from cwltool.workflow import Workflow, WorkflowException
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
+
+import ruamel.yaml as yaml
+
+from .runner import upload_docker, upload_dependencies, trim_listing
+from .arvtool import ArvadosCommandTool
+from .perf import Perf
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+
+def upload_workflow(arvRunner, tool, job_order, project_uuid, update_uuid):
+    upload_docker(arvRunner, tool)
+
+    document_loader, workflowobj, uri = (tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]), tool.tool["id"])
+
+    packed = pack(document_loader, workflowobj, uri, tool.metadata)
+
+    adjustDirObjs(job_order, trim_listing)
+
+    main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
+    for inp in main["inputs"]:
+        sn = shortname(inp["id"])
+        if sn in job_order:
+            inp["default"] = job_order[sn]
+
+    name = os.path.basename(tool.tool["id"])
+    upload_dependencies(arvRunner, name, document_loader,
+                        packed, uri, False)
+
+    body = {
+        "workflow": {
+            "owner_uuid": project_uuid,
+            "name": tool.tool.get("label", name),
+            "description": tool.tool.get("doc", ""),
+            "definition":yaml.safe_dump(packed)
+        }}
+
+    if update_uuid:
+        return arvRunner.api.workflows().update(uuid=update_uuid, body=body).execute(num_retries=arvRunner.num_retries)["uuid"]
+    else:
+        return arvRunner.api.workflows().create(body=body).execute(num_retries=arvRunner.num_retries)["uuid"]
+
+class ArvadosWorkflow(Workflow):
+    """Wrap cwltool Workflow to override selected methods."""
+
+    def __init__(self, arvrunner, toolpath_object, **kwargs):
+        super(ArvadosWorkflow, self).__init__(toolpath_object, **kwargs)
+        self.arvrunner = arvrunner
+        self.work_api = kwargs["work_api"]
+
+    def job(self, joborder, output_callback, **kwargs):
+        kwargs["work_api"] = self.work_api
+        req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
+        if req:
+            document_loader, workflowobj, uri = (self.doc_loader, self.doc_loader.fetch(self.tool["id"]), self.tool["id"])
+
+            with Perf(metrics, "subworkflow upload_deps"):
+                workflowobj["requirements"] = self.requirements + workflowobj.get("requirements", [])
+                workflowobj["hints"] = self.hints + workflowobj.get("hints", [])
+                packed = pack(document_loader, workflowobj, uri, self.metadata)
+
+                upload_dependencies(self.arvrunner,
+                                    kwargs.get("name", ""),
+                                    document_loader,
+                                    packed,
+                                    uri,
+                                    False)
+
+                upload_dependencies(self.arvrunner,
+                                    os.path.basename(joborder.get("id", "#")),
+                                    document_loader,
+                                    joborder,
+                                    joborder.get("id", "#"),
+                                    False)
+
+            with Perf(metrics, "subworkflow adjust"):
+                joborder_keepmount = copy.deepcopy(joborder)
+
+                def keepmount(obj):
+                    if obj["location"].startswith("keep:"):
+                        obj["location"] = "/keep/" + obj["location"][5:]
+                        if "listing" in obj:
+                            del obj["listing"]
+                    elif obj["location"].startswith("_:"):
+                        del obj["location"]
+                    else:
+                        raise WorkflowException("Location is not a keep reference or a literal: '%s'" % obj["location"])
+
+                adjustFileObjs(joborder_keepmount, keepmount)
+                adjustDirObjs(joborder_keepmount, keepmount)
+                adjustFileObjs(packed, keepmount)
+                adjustDirObjs(packed, keepmount)
+
+            wf_runner = {
+                "class": "CommandLineTool",
+                "baseCommand": "cwltool",
+                "inputs": self.tool["inputs"],
+                "outputs": self.tool["outputs"],
+                "stdout": "cwl.output.json",
+                "requirements": workflowobj["requirements"]+[
+                    {
+                    "class": "InitialWorkDirRequirement",
+                    "listing": [{
+                            "entryname": "workflow.cwl",
+                            "entry": yaml.safe_dump(packed).replace("\\", "\\\\").replace('$(', '\$(').replace('${', '\${')
+                        }, {
+                            "entryname": "cwl.input.yml",
+                            "entry": yaml.safe_dump(joborder_keepmount).replace("\\", "\\\\").replace('$(', '\$(').replace('${', '\${')
+                        }]
+                }],
+                "hints": workflowobj["hints"],
+                "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"]
+            }
+            kwargs["loader"] = self.doc_loader
+            kwargs["avsc_names"] = self.doc_schema
+            return ArvadosCommandTool(self.arvrunner, wf_runner, **kwargs).job(joborder, output_callback, **kwargs)
+        else:
+            return super(ArvadosWorkflow, self).job(joborder, output_callback, **kwargs)
index 28b0feeff46fd74d6a3bb0a10ffbca48f912fb28..e44e7a928218c04e377b04373d7ec44ea59b04a0 100644 (file)
@@ -1,16 +1,20 @@
 import fnmatch
 import os
+import errno
 
-import cwltool.process
+import cwltool.stdfsaccess
+from cwltool.pathmapper import abspath
 
 import arvados.util
 import arvados.collection
+import arvados.arvfile
 
-class CollectionFsAccess(cwltool.process.StdFsAccess):
+class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
     """Implement the cwltool FsAccess interface for Arvados Collections."""
 
-    def __init__(self, basedir):
+    def __init__(self, basedir, api_client=None):
         super(CollectionFsAccess, self).__init__(basedir)
+        self.api_client = api_client
         self.collections = {}
 
     def get_collection(self, path):
@@ -18,7 +22,7 @@ class CollectionFsAccess(cwltool.process.StdFsAccess):
         if p[0].startswith("keep:") and arvados.util.keep_locator_pattern.match(p[0][5:]):
             pdh = p[0][5:]
             if pdh not in self.collections:
-                self.collections[pdh] = arvados.collection.CollectionReader(pdh)
+                self.collections[pdh] = arvados.collection.CollectionReader(pdh, api_client=self.api_client)
             return (self.collections[pdh], "/".join(p[1:]))
         else:
             return (None, path)
@@ -47,6 +51,8 @@ class CollectionFsAccess(cwltool.process.StdFsAccess):
 
     def glob(self, pattern):
         collection, rest = self.get_collection(pattern)
+        if collection and not rest:
+            return [pattern]
         patternsegments = rest.split("/")
         return self._match(collection, patternsegments, "keep:" + collection.manifest_locator())
 
@@ -55,11 +61,60 @@ class CollectionFsAccess(cwltool.process.StdFsAccess):
         if collection:
             return collection.open(rest, mode)
         else:
-            return open(self._abs(fn), mode)
+            return super(CollectionFsAccess, self).open(self._abs(fn), mode)
 
     def exists(self, fn):
         collection, rest = self.get_collection(fn)
         if collection:
             return collection.exists(rest)
         else:
-            return os.path.exists(self._abs(fn))
+            return super(CollectionFsAccess, self).exists(fn)
+
+    def isfile(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection:
+            if rest:
+                return isinstance(collection.find(rest), arvados.arvfile.ArvadosFile)
+            else:
+                return False
+        else:
+            return super(CollectionFsAccess, self).isfile(fn)
+
+    def isdir(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection:
+            if rest:
+                return isinstance(collection.find(rest), arvados.collection.RichCollectionBase)
+            else:
+                return True
+        else:
+            return super(CollectionFsAccess, self).isdir(fn)
+
+    def listdir(self, fn):  # type: (unicode) -> List[unicode]
+        collection, rest = self.get_collection(fn)
+        if collection:
+            if rest:
+                dir = collection.find(rest)
+            else:
+                dir = collection
+            if dir is None:
+                raise IOError(errno.ENOENT, "Directory '%s' in '%s' not found" % (rest, collection.portable_data_hash()))
+            if not isinstance(dir, arvados.collection.RichCollectionBase):
+                raise IOError(errno.ENOENT, "Path '%s' in '%s' is not a Directory" % (rest, collection.portable_data_hash()))
+            return [abspath(l, fn) for l in dir.keys()]
+        else:
+            return super(CollectionFsAccess, self).listdir(fn)
+
+    def join(self, path, *paths): # type: (unicode, *unicode) -> unicode
+        if paths and paths[-1].startswith("keep:") and arvados.util.keep_locator_pattern.match(paths[-1][5:]):
+            return paths[-1]
+        return os.path.join(path, *paths)
+
+    def realpath(self, path):
+        if path.startswith("$(task.tmpdir)") or path.startswith("$(task.outdir)"):
+            return path
+        collection, rest = self.get_collection(path)
+        if collection:
+            return path
+        else:
+            return os.path.realpath(path)
index 9538a9176f4c7bd876376452a3da8ea38dd21bfb..0fd9a0ed332ececaf4474ca3939fe9b4785db82d 100644 (file)
 import re
+import logging
+import uuid
+import os
 
 import arvados.commands.run
 import arvados.collection
-import cwltool.pathmapper
 
-class ArvPathMapper(cwltool.pathmapper.PathMapper):
+from cwltool.pathmapper import PathMapper, MapperEnt, abspath, adjustFileObjs, adjustDirObjs
+from cwltool.workflow import WorkflowException
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+class ArvPathMapper(PathMapper):
     """Convert container-local paths to and from Keep collection ids."""
 
+    pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+$')
+    pdh_dirpath = re.compile(r'^keep:[0-9a-f]{32}\+\d+(/.+)?$')
+
     def __init__(self, arvrunner, referenced_files, input_basedir,
                  collection_pattern, file_pattern, name=None, **kwargs):
-        self._pathmap = arvrunner.get_uploaded()
-        uploadfiles = set()
-
-        pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+')
+        self.arvrunner = arvrunner
+        self.input_basedir = input_basedir
+        self.collection_pattern = collection_pattern
+        self.file_pattern = file_pattern
+        self.name = name
+        super(ArvPathMapper, self).__init__(referenced_files, input_basedir, None)
 
-        for src in referenced_files:
-            if isinstance(src, basestring) and pdh_path.match(src):
-                self._pathmap[src] = (src, collection_pattern % src[5:])
+    def visit(self, srcobj, uploadfiles):
+        src = srcobj["location"]
+        if srcobj["class"] == "File":
             if "#" in src:
                 src = src[:src.index("#")]
+            if isinstance(src, basestring) and ArvPathMapper.pdh_path.match(src):
+                self._pathmap[src] = MapperEnt(src, self.collection_pattern % src[5:], "File")
             if src not in self._pathmap:
-                ab = cwltool.pathmapper.abspath(src, input_basedir)
-                st = arvados.commands.run.statfile("", ab, fnPattern=file_pattern)
-                if kwargs.get("conformance_test"):
-                    self._pathmap[src] = (src, ab)
-                elif isinstance(st, arvados.commands.run.UploadFile):
+                # Local FS ref, may need to be uploaded or may be on keep
+                # mount.
+                ab = abspath(src, self.input_basedir)
+                st = arvados.commands.run.statfile("", ab, fnPattern=self.file_pattern)
+                if isinstance(st, arvados.commands.run.UploadFile):
                     uploadfiles.add((src, ab, st))
                 elif isinstance(st, arvados.commands.run.ArvFile):
-                    self._pathmap[src] = (ab, st.fn)
+                    self._pathmap[src] = MapperEnt(ab, st.fn, "File")
+                elif src.startswith("_:"):
+                    if "contents" in srcobj:
+                        pass
+                    else:
+                        raise WorkflowException("File literal '%s' is missing contents" % src)
                 else:
-                    raise cwltool.workflow.WorkflowException("Input file path '%s' is invalid" % st)
+                    raise WorkflowException("Input file path '%s' is invalid" % st)
+            if "secondaryFiles" in srcobj:
+                for l in srcobj["secondaryFiles"]:
+                    self.visit(l, uploadfiles)
+        elif srcobj["class"] == "Directory":
+            if isinstance(src, basestring) and ArvPathMapper.pdh_dirpath.match(src):
+                self._pathmap[src] = MapperEnt(src, self.collection_pattern % src[5:], "Directory")
+            for l in srcobj.get("listing", []):
+                self.visit(l, uploadfiles)
+
+    def addentry(self, obj, c, path, subdirs):
+        if obj["location"] in self._pathmap:
+            src, srcpath = self.arvrunner.fs_access.get_collection(self._pathmap[obj["location"]].resolved)
+            if srcpath == "":
+                srcpath = "."
+            c.copy(srcpath, path + "/" + obj["basename"], source_collection=src, overwrite=True)
+            for l in obj.get("secondaryFiles", []):
+                self.addentry(l, c, path, subdirs)
+        elif obj["class"] == "Directory":
+            for l in obj["listing"]:
+                self.addentry(l, c, path + "/" + obj["basename"], subdirs)
+            subdirs.append((obj["location"], path + "/" + obj["basename"]))
+        elif obj["location"].startswith("_:") and "contents" in obj:
+            with c.open(path + "/" + obj["basename"], "w") as f:
+                f.write(obj["contents"].encode("utf-8"))
+        else:
+            raise WorkflowException("Don't know what to do with '%s'" % obj["location"])
+
+    def setup(self, referenced_files, basedir):
+        # type: (List[Any], unicode) -> None
+        self._pathmap = self.arvrunner.get_uploaded()
+        uploadfiles = set()
+
+        for srcobj in referenced_files:
+            self.visit(srcobj, uploadfiles)
 
         if uploadfiles:
             arvados.commands.run.uploadfiles([u[2] for u in uploadfiles],
-                                             arvrunner.api,
-                                             dry_run=kwargs.get("dry_run"),
-                                             num_retries=3,
-                                             fnPattern=file_pattern,
-                                             name=name,
-                                             project=arvrunner.project_uuid)
+                                             self.arvrunner.api,
+                                             dry_run=False,
+                                             num_retries=self.arvrunner.num_retries,
+                                             fnPattern=self.file_pattern,
+                                             name=self.name,
+                                             project=self.arvrunner.project_uuid)
 
         for src, ab, st in uploadfiles:
-            arvrunner.add_uploaded(src, (ab, st.fn))
-            self._pathmap[src] = (ab, st.fn)
+            self._pathmap[src] = MapperEnt("keep:" + st.keepref, st.fn, "File")
+            self.arvrunner.add_uploaded(src, self._pathmap[src])
+
+        for srcobj in referenced_files:
+            if srcobj["class"] == "Directory":
+                if srcobj["location"] not in self._pathmap:
+                    c = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                      num_retries=self.arvrunner.num_retries)
+                    subdirs = []
+                    for l in srcobj["listing"]:
+                        self.addentry(l, c, ".", subdirs)
+
+                    check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
+                    if not check["items"]:
+                        c.save_new(owner_uuid=self.arvrunner.project_uuid)
+
+                    ab = self.collection_pattern % c.portable_data_hash()
+                    self._pathmap[srcobj["location"]] = MapperEnt(ab, ab, "Directory")
+                    for loc, sub in subdirs:
+                        ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
+                        self._pathmap[loc] = MapperEnt(ab, ab, "Directory")
+            elif srcobj["class"] == "File" and (srcobj.get("secondaryFiles") or
+                (srcobj["location"].startswith("_:") and "contents" in srcobj)):
+
+                c = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                  num_retries=self.arvrunner.num_retries                                                  )
+                subdirs = []
+                self.addentry(srcobj, c, ".", subdirs)
+
+                check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
+                if not check["items"]:
+                    c.save_new(owner_uuid=self.arvrunner.project_uuid)
+
+                ab = self.file_pattern % (c.portable_data_hash(), srcobj["basename"])
+                self._pathmap[srcobj["location"]] = MapperEnt(ab, ab, "File")
+                if srcobj.get("secondaryFiles"):
+                    ab = self.collection_pattern % c.portable_data_hash()
+                    self._pathmap["_:" + unicode(uuid.uuid4())] = MapperEnt(ab, ab, "Directory")
+                for loc, sub in subdirs:
+                    ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
+                    self._pathmap[loc] = MapperEnt(ab, ab, "Directory")
 
         self.keepdir = None
 
@@ -53,3 +145,37 @@ class ArvPathMapper(cwltool.pathmapper.PathMapper):
             return (target, "keep:" + target[len(self.keepdir)+1:])
         else:
             return super(ArvPathMapper, self).reversemap(target)
+
+class InitialWorkDirPathMapper(PathMapper):
+
+    def visit(self, obj, stagedir, basedir, copy=False):
+        # type: (Dict[unicode, Any], unicode, unicode, bool) -> None
+        if obj["class"] == "Directory":
+            self._pathmap[obj["location"]] = MapperEnt(obj["location"], stagedir, "Directory")
+            self.visitlisting(obj.get("listing", []), stagedir, basedir)
+        elif obj["class"] == "File":
+            loc = obj["location"]
+            if loc in self._pathmap:
+                return
+            tgt = os.path.join(stagedir, obj["basename"])
+            if "contents" in obj and obj["location"].startswith("_:"):
+                self._pathmap[loc] = MapperEnt(obj["contents"], tgt, "CreateFile")
+            else:
+                if copy:
+                    self._pathmap[loc] = MapperEnt(obj["path"], tgt, "WritableFile")
+                else:
+                    self._pathmap[loc] = MapperEnt(obj["path"], tgt, "File")
+                self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
+
+    def setup(self, referenced_files, basedir):
+        # type: (List[Any], unicode) -> None
+
+        # Go through each file and set the target to its own directory along
+        # with any secondary files.
+        stagedir = self.stagedir
+        for fob in referenced_files:
+            self.visit(fob, stagedir, basedir)
+
+        for path, (ab, tgt, type) in self._pathmap.items():
+            if type in ("File", "Directory") and ab.startswith("keep:"):
+                self._pathmap[path] = MapperEnt("$(task.keep)/%s" % ab[5:], tgt, type)
diff --git a/sdk/cwl/arvados_cwl/perf.py b/sdk/cwl/arvados_cwl/perf.py
new file mode 100644 (file)
index 0000000..a418ced
--- /dev/null
@@ -0,0 +1,15 @@
+import time
+import uuid
+
+class Perf(object):
+    def __init__(self, logger, name):
+        self.logger = logger
+        self.name = name
+
+    def __enter__(self):
+        self.time = time.time()
+        self.logger.debug("ENTER %s %s", self.name, self.time)
+
+    def __exit__(self, exc_type=None, exc_value=None, traceback=None):
+        now = time.time()
+        self.logger.debug("EXIT %s %s %s", self.name, now, now - self.time)
index 629b1042bb75400b9e8c6b05dacd65e3876362fc..a4132ca3f762675ff9dccc0129f3f8139b313f1b 100644 (file)
@@ -3,19 +3,116 @@ import urlparse
 from functools import partial
 import logging
 import json
+import re
+from cStringIO import StringIO
 
+import cwltool.draft2tool
 from cwltool.draft2tool import CommandLineTool
 import cwltool.workflow
-from cwltool.process import get_feature, scandeps, adjustFiles, UnsupportedRequirement
+from cwltool.process import get_feature, scandeps, UnsupportedRequirement, normalizeFilesDirs
 from cwltool.load_tool import fetch_document
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
 
 import arvados.collection
+import ruamel.yaml as yaml
 
 from .arvdocker import arv_docker_get_image
 from .pathmapper import ArvPathMapper
 
 logger = logging.getLogger('arvados.cwl-runner')
 
+cwltool.draft2tool.ACCEPTLIST_RE = re.compile(r".*")
+
+def trim_listing(obj):
+    """Remove 'listing' field from Directory objects that are keep references.
+
+    When Directory objects represent Keep references, it redundant and
+    potentially very expensive to pass fully enumerated Directory objects
+    between instances of cwl-runner (e.g. a submitting a job, or using the
+    RunInSingleContainer feature), so delete the 'listing' field when it is
+    safe to do so.
+    """
+
+    if obj.get("location", "").startswith("keep:") and "listing" in obj:
+        del obj["listing"]
+    if obj.get("location", "").startswith("_:"):
+        del obj["location"]
+
+def upload_dependencies(arvrunner, name, document_loader,
+                        workflowobj, uri, loadref_run):
+    """Upload the dependencies of the workflowobj document to Keep.
+
+    Returns a pathmapper object mapping local paths to keep references.  Also
+    does an in-place update of references in "workflowobj".
+
+    Use scandeps to find $import, $include, $schemas, run, File and Directory
+    fields that represent external references.
+
+    If workflowobj has an "id" field, this will reload the document to ensure
+    it is scanning the raw document prior to preprocessing.
+    """
+
+    loaded = set()
+    def loadref(b, u):
+        joined = urlparse.urljoin(b, u)
+        defrg, _ = urlparse.urldefrag(joined)
+        if defrg not in loaded:
+            loaded.add(defrg)
+            # Use fetch_text to get raw file (before preprocessing).
+            text = document_loader.fetch_text(defrg)
+            if isinstance(text, bytes):
+                textIO = StringIO(text.decode('utf-8'))
+            else:
+                textIO = StringIO(text)
+            return yaml.safe_load(textIO)
+        else:
+            return {}
+
+    if loadref_run:
+        loadref_fields = set(("$import", "run"))
+    else:
+        loadref_fields = set(("$import",))
+
+    scanobj = workflowobj
+    if "id" in workflowobj:
+        # Need raw file content (before preprocessing) to ensure
+        # that external references in $include and $mixin are captured.
+        scanobj = loadref("", workflowobj["id"])
+
+    sc = scandeps(uri, scanobj,
+                  loadref_fields,
+                  set(("$include", "$schemas", "location")),
+                  loadref)
+
+    normalizeFilesDirs(sc)
+
+    if "id" in workflowobj:
+        sc.append({"class": "File", "location": workflowobj["id"]})
+
+    mapper = ArvPathMapper(arvrunner, sc, "",
+                           "keep:%s",
+                           "keep:%s/%s",
+                           name=name)
+
+    def setloc(p):
+        if "location" in p and (not p["location"].startswith("_:")) and (not p["location"].startswith("keep:")):
+            p["location"] = mapper.mapper(p["location"]).resolved
+    adjustFileObjs(workflowobj, setloc)
+    adjustDirObjs(workflowobj, setloc)
+
+    return mapper
+
+
+def upload_docker(arvrunner, tool):
+    if isinstance(tool, CommandLineTool):
+        (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement")
+        if docker_req:
+            arv_docker_get_image(arvrunner.api, docker_req, True, arvrunner.project_uuid)
+    elif isinstance(tool, cwltool.workflow.Workflow):
+        for s in tool.steps:
+            upload_docker(arvrunner, s.embedded_tool)
+
+
 class Runner(object):
     def __init__(self, runner, tool, job_order, enable_reuse):
         self.arvrunner = runner
@@ -28,60 +125,26 @@ class Runner(object):
     def update_pipeline_component(self, record):
         pass
 
-    def upload_docker(self, tool):
-        if isinstance(tool, CommandLineTool):
-            (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement")
-            if docker_req:
-                arv_docker_get_image(self.arvrunner.api, docker_req, True, self.arvrunner.project_uuid)
-        elif isinstance(tool, cwltool.workflow.Workflow):
-            for s in tool.steps:
-                self.upload_docker(s.embedded_tool)
-
-
     def arvados_job_spec(self, *args, **kwargs):
-        self.upload_docker(self.tool)
-
-        workflowfiles = set()
-        jobfiles = set()
-        workflowfiles.add(self.tool.tool["id"])
+        upload_docker(self.arvrunner, self.tool)
 
         self.name = os.path.basename(self.tool.tool["id"])
 
-        def visitFiles(files, path):
-            files.add(path)
-            return path
-
-        document_loader, workflowobj, uri = fetch_document(self.tool.tool["id"])
-        loaded = set()
-        def loadref(b, u):
-            joined = urlparse.urljoin(b, u)
-            if joined not in loaded:
-                loaded.add(joined)
-                return document_loader.fetch(urlparse.urljoin(b, u))
-            else:
-                return {}
-
-        sc = scandeps(uri, workflowobj,
-                      set(("$import", "run")),
-                      set(("$include", "$schemas", "path")),
-                      loadref)
-        adjustFiles(sc, partial(visitFiles, workflowfiles))
-        adjustFiles(self.job_order, partial(visitFiles, jobfiles))
-
-        keepprefix = kwargs.get("keepprefix", "")
-        workflowmapper = ArvPathMapper(self.arvrunner, workflowfiles, "",
-                                       keepprefix+"%s",
-                                       keepprefix+"%s/%s",
-                                       name=self.name,
-                                       **kwargs)
-
-        jobmapper = ArvPathMapper(self.arvrunner, jobfiles, "",
-                                  keepprefix+"%s",
-                                  keepprefix+"%s/%s",
-                                  name=os.path.basename(self.job_order.get("id", "#")),
-                                  **kwargs)
-
-        adjustFiles(self.job_order, lambda p: jobmapper.mapper(p)[1])
+        workflowmapper = upload_dependencies(self.arvrunner,
+                                             self.name,
+                                             self.tool.doc_loader,
+                                             self.tool.tool,
+                                             self.tool.tool["id"],
+                                             True)
+
+        jobmapper = upload_dependencies(self.arvrunner,
+                                        os.path.basename(self.job_order.get("id", "#")),
+                                        self.tool.doc_loader,
+                                        self.job_order,
+                                        self.job_order.get("id", "#"),
+                                        False)
+
+        adjustDirObjs(self.job_order, trim_listing)
 
         if "id" in self.job_order:
             del self.job_order["id"]
@@ -109,12 +172,12 @@ class Runner(object):
                 outc = arvados.collection.Collection(record["output"])
                 with outc.open("cwl.output.json") as f:
                     outputs = json.load(f)
-                def keepify(path):
+                def keepify(fileobj):
+                    path = fileobj["location"]
                     if not path.startswith("keep:"):
-                        return "keep:%s/%s" % (record["output"], path)
-                    else:
-                        return path
-                adjustFiles(outputs, keepify)
+                        fileobj["location"] = "keep:%s/%s" % (record["output"], path)
+                adjustFileObjs(outputs, keepify)
+                adjustDirObjs(outputs, keepify)
             except Exception as e:
                 logger.error("While getting final output object: %s", e)
             self.arvrunner.output_callback(outputs, processStatus)
index b1ff7f34b82100126efe56a62cf1cefc690b19bc..929011b3d6b642c44d170a4e88b4616fdd1cd962 100644 (file)
@@ -25,13 +25,16 @@ setup(name='arvados-cwl-runner',
       download_url="https://github.com/curoverse/arvados.git",
       license='Apache 2.0',
       packages=find_packages(),
+      package_data={'arvados_cwl': ['arv-cwl-schema.yml']},
       scripts=[
           'bin/cwl-runner',
           'bin/arvados-cwl-runner'
       ],
+      # Make sure to update arvados/build/run-build-packages.sh as well
+      # when updating the cwltool version pin.
       install_requires=[
-          'cwltool==1.0.20160609160402',
-          'arvados-python-client>=0.1.20160322001610'
+          'cwltool==1.0.20160930152149',
+          'arvados-python-client>=0.1.20160826210445'
       ],
       data_files=[
           ('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
index 5501e2964557fa5346d32ab798aa3e4a2381abb8..3b16bbcc200819f04386db92b698db5b46f276c8 100755 (executable)
@@ -1,5 +1,7 @@
 #!/bin/sh
 
+set -x
+
 if ! which arvbox >/dev/null ; then
     export PATH=$PATH:$(readlink -f $(dirname $0)/../../tools/arvbox/bin)
 fi
@@ -8,6 +10,7 @@ reset_container=1
 leave_running=0
 config=dev
 docker_pull=1
+tag=""
 
 while test -n "$1" ; do
     arg="$1"
@@ -28,8 +31,12 @@ while test -n "$1" ; do
             docker_pull=0
             shift
             ;;
+        --tag)
+            tag=$2
+            shift ; shift
+            ;;
         -h|--help)
-            echo "$0 [--no-reset-container] [--leave-running] [--no-docker-pull] [--config dev|localdemo]"
+            echo "$0 [--no-reset-container] [--leave-running] [--no-docker-pull] [--config dev|localdemo] [--tag docker_tag]"
             exit
             ;;
         *)
@@ -46,7 +53,7 @@ if test $reset_container = 1 ; then
     arvbox reset -f
 fi
 
-arvbox start $config
+arvbox start $config $tag
 
 arvbox pipe <<EOF
 set -eu -o pipefail
@@ -69,18 +76,18 @@ export ARVADOS_API_HOST_INSECURE=1
 export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
 
 if test $docker_pull = 1 ; then
-  arv-keepdocker --pull arvados/jobs
+  arv-keepdocker --pull arvados/jobs $tag
 fi
 
 cat >/tmp/cwltest/arv-cwl-jobs <<EOF2
 #!/bin/sh
-exec arvados-cwl-runner --api=jobs \\\$@
+exec arvados-cwl-runner --api=jobs --compute-checksum \\\$@
 EOF2
 chmod +x /tmp/cwltest/arv-cwl-jobs
 
 cat >/tmp/cwltest/arv-cwl-containers <<EOF2
 #!/bin/sh
-exec arvados-cwl-runner --api=containers \\\$@
+exec arvados-cwl-runner --api=containers --compute-checksum \\\$@
 EOF2
 chmod +x /tmp/cwltest/arv-cwl-containers
 
diff --git a/sdk/cwl/tests/arvados-tests.sh b/sdk/cwl/tests/arvados-tests.sh
new file mode 100755 (executable)
index 0000000..8646704
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+if ! arv-get d7514270f356df848477718d58308cc4+94 > /dev/null ; then
+    arv-put --portable-data-hash testdir
+fi
+exec cwltest --test arvados-tests.yml --tool $PWD/runner.sh
diff --git a/sdk/cwl/tests/arvados-tests.yml b/sdk/cwl/tests/arvados-tests.yml
new file mode 100644 (file)
index 0000000..1187962
--- /dev/null
@@ -0,0 +1,10 @@
+- job: dir-job.yml
+  output:
+    "outlist": {
+        "size": 20,
+        "location": "output.txt",
+        "class": "File",
+        "checksum": "sha1$13cda8661796ae241da3a18668fb552161a72592"
+    }
+  tool: keep-dir-test-input.cwl
+  doc: Test directory in keep
diff --git a/sdk/cwl/tests/dir-job.yml b/sdk/cwl/tests/dir-job.yml
new file mode 100644 (file)
index 0000000..91204d7
--- /dev/null
@@ -0,0 +1,3 @@
+indir:
+  class: Directory
+  location: keep:d7514270f356df848477718d58308cc4+94
\ No newline at end of file
diff --git a/sdk/cwl/tests/keep-dir-test-input.cwl b/sdk/cwl/tests/keep-dir-test-input.cwl
new file mode 100644 (file)
index 0000000..93362b5
--- /dev/null
@@ -0,0 +1,21 @@
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  - class: ShellCommandRequirement
+inputs:
+  indir:
+    type: Directory
+    inputBinding:
+      prefix: cd
+      position: -1
+outputs:
+  outlist:
+    type: File
+    outputBinding:
+      glob: output.txt
+arguments: [
+  {shellQuote: false, valueFrom: "&&"},
+  "find", ".",
+  {shellQuote: false, valueFrom: "|"},
+  "sort"]
+stdout: output.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/runner.sh b/sdk/cwl/tests/runner.sh
new file mode 100755 (executable)
index 0000000..22ede5c
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec arvados-cwl-runner --disable-reuse --compute-checksum "$@"
index 95ff0ff981174e05caf292addf59cc5af24f7198..49d5944c06d81f6f3ece48c8f37ce5421859a942 100644 (file)
@@ -2,5 +2,22 @@
     "x": {
         "class": "File",
         "path": "input/blorp.txt"
+    },
+    "y": {
+        "class": "Directory",
+        "location": "keep:99999999999999999999999999999998+99",
+        "listing": [{
+            "class": "File",
+            "location": "keep:99999999999999999999999999999998+99/file1.txt"
+        }]
+    },
+    "z": {
+        "class": "Directory",
+        "basename": "anonymous",
+        "listing": [{
+            "basename": "renamed.txt",
+            "class": "File",
+            "location": "keep:99999999999999999999999999999998+99/file1.txt"
+        }]
     }
 }
index df7d1424db228e86abc2e169fca1dff5b91ae90f..822a213fe7a387383eaddec8e75386c785353d6c 100644 (file)
@@ -3,7 +3,11 @@ import logging
 import mock
 import unittest
 import os
+import functools
 import cwltool.process
+from schema_salad.ref_resolver import Loader
+
+from schema_salad.ref_resolver import Loader
 
 if not os.getenv('ARVADOS_DEBUG'):
     logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
@@ -29,15 +33,20 @@ class TestContainer(unittest.TestCase):
         tool = {
             "inputs": [],
             "outputs": [],
-            "baseCommand": "ls"
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}]
         }
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names, basedir="")
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess, api_client=runner.api)
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
+                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run"):
+        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run",
+                             make_fs_access=make_fs_access, tmpdir="/tmp"):
             j.run()
             runner.api.container_requests().create.assert_called_with(
                 body={
                     'environment': {
+                        'HOME': '/var/spool/cwl',
                         'TMPDIR': '/tmp'
                     },
                     'name': 'test_run',
@@ -52,7 +61,7 @@ class TestContainer(unittest.TestCase):
                     'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
                     'output_path': '/var/spool/cwl',
                     'container_image': '99999999999999999999999999999993+99',
-                    'command': ['ls'],
+                    'command': ['ls', '/var/spool/cwl'],
                     'cwd': '/var/spool/cwl'
                 })
 
@@ -77,23 +86,34 @@ class TestContainer(unittest.TestCase):
                 "coresMin": 3,
                 "ramMin": 3000,
                 "tmpdirMin": 4000
+            }, {
+                "class": "http://arvados.org/cwl#RuntimeConstraints",
+                "keep_cache": 512
+            }, {
+                "class": "http://arvados.org/cwl#APIRequirement",
             }],
             "baseCommand": "ls"
         }
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names)
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess, api_client=runner.api)
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers",
+                                                 avsc_names=avsc_names, make_fs_access=make_fs_access,
+                                                 loader=Loader({}))
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_resource_requirements"):
+        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_resource_requirements",
+                             make_fs_access=make_fs_access, tmpdir="/tmp"):
             j.run()
 
         runner.api.container_requests().create.assert_called_with(
             body={
                 'environment': {
+                    'HOME': '/var/spool/cwl',
                     'TMPDIR': '/tmp'
                 },
                 'name': 'test_resource_requirements',
                 'runtime_constraints': {
                     'vcpus': 3,
-                    'ram': 3145728000
+                    'ram': 3145728000,
+                    'API': True
                 }, 'priority': 1,
                 'mounts': {
                     '/var/spool/cwl': {'kind': 'tmp'}
@@ -126,6 +146,7 @@ class TestContainer(unittest.TestCase):
         arvjob.output_callback = mock.MagicMock()
         arvjob.collect_outputs = mock.MagicMock()
         arvjob.successCodes = [0]
+        arvjob.outdir = "/var/spool/cwl"
 
         arvjob.done({
             "state": "Complete",
@@ -170,6 +191,7 @@ class TestContainer(unittest.TestCase):
         arvjob.output_callback = mock.MagicMock()
         arvjob.collect_outputs = mock.MagicMock()
         arvjob.successCodes = [0]
+        arvjob.outdir = "/var/spool/cwl"
 
         arvjob.done({
             "state": "Complete",
index abaf1614f1601d5296b1718af474513e0794ebcb..7f31520454662540aea97cbcfcbb254b0dccee8f 100644 (file)
@@ -1,9 +1,13 @@
-import arvados_cwl
 import logging
 import mock
 import unittest
 import os
+import functools
+import json
+
+import arvados_cwl
 import cwltool.process
+from schema_salad.ref_resolver import Loader
 
 if not os.getenv('ARVADOS_DEBUG'):
     logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
@@ -23,11 +27,14 @@ class TestJob(unittest.TestCase):
         tool = {
             "inputs": [],
             "outputs": [],
-            "baseCommand": "ls"
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}]
         }
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names, basedir="")
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess, api_client=runner.api)
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names,
+                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir=""):
+        for j in arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access):
             j.run()
             runner.api.jobs().create.assert_called_with(
                 body={
@@ -35,8 +42,8 @@ class TestJob(unittest.TestCase):
                     'runtime_constraints': {},
                     'script_parameters': {
                         'tasks': [{
-                            'task.env': {'TMPDIR': '$(task.tmpdir)'},
-                            'command': ['ls']
+                            'task.env': {'HOME': '$(task.outdir)', 'TMPDIR': '$(task.tmpdir)'},
+                            'command': ['ls', '$(task.outdir)']
                         }],
                     },
                     'script_version': 'master',
@@ -73,12 +80,19 @@ class TestJob(unittest.TestCase):
                 "coresMin": 3,
                 "ramMin": 3000,
                 "tmpdirMin": 4000
+            }, {
+                "class": "http://arvados.org/cwl#RuntimeConstraints",
+                "keep_cache": 512
+            }, {
+                "class": "http://arvados.org/cwl#APIRequirement",
             }],
             "baseCommand": "ls"
         }
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names)
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess, api_client=runner.api)
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names,
+                                                 make_fs_access=make_fs_access, loader=Loader({}))
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir=""):
+        for j in arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access):
             j.run()
         runner.api.jobs().create.assert_called_with(
             body={
@@ -86,7 +100,7 @@ class TestJob(unittest.TestCase):
                 'runtime_constraints': {},
                 'script_parameters': {
                     'tasks': [{
-                        'task.env': {'TMPDIR': '$(task.tmpdir)'},
+                        'task.env': {'HOME': '$(task.outdir)', 'TMPDIR': '$(task.tmpdir)'},
                         'command': ['ls']
                     }]
             },
@@ -98,7 +112,8 @@ class TestJob(unittest.TestCase):
                     'docker_image': 'arvados/jobs',
                     'min_cores_per_node': 3,
                     'min_ram_mb_per_node': 3000,
-                    'min_scratch_mb_per_node': 5024 # tmpdirSize + outdirSize
+                    'min_scratch_mb_per_node': 5024, # tmpdirSize + outdirSize
+                    'keep_cache_mb_per_task': 512
                 }
             },
             find_or_create=True,
@@ -184,3 +199,65 @@ class TestJob(unittest.TestCase):
             mock.call().execute(num_retries=0)])
 
         self.assertFalse(api.collections().create.called)
+
+
+class TestWorkflow(unittest.TestCase):
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.collection.Collection")
+    def test_run(self, mockcollection):
+        arvados_cwl.add_arv_hints()
+
+        runner = arvados_cwl.ArvCwlRunner(mock.MagicMock())
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        tool, metadata = document_loader.resolve_ref("tests/wf/scatter2.cwl")
+        metadata["cwlVersion"] = tool["cwlVersion"]
+
+        mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess, api_client=runner.api)
+        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, work_api="jobs", avsc_names=avsc_names,
+                                              basedir="", make_fs_access=make_fs_access, loader=document_loader,
+                                              makeTool=runner.arv_make_tool, metadata=metadata)
+        arvtool.formatgraph = None
+        it = arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access)
+        it.next().run()
+        it.next().run()
+
+        with open("tests/wf/scatter2_subwf.cwl") as f:
+            subwf = f.read()
+
+        mockcollection().open().__enter__().write.assert_has_calls([mock.call(subwf)])
+        mockcollection().open().__enter__().write.assert_has_calls([mock.call('{sleeptime: 5}')])
+
+        runner.api.jobs().create.assert_called_with(
+            body={
+                'minimum_script_version': '9e5b98e8f5f4727856b53447191f9c06e3da2ba6',
+                'repository': 'arvados',
+                'script_version': 'master',
+                'script': 'crunchrunner',
+                'script_parameters': {
+                    'tasks': [{'task.env': {
+                        'HOME': '$(task.outdir)',
+                        'TMPDIR': '$(task.tmpdir)'},
+                               'task.vwd': {
+                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999999+118/workflow.cwl',
+                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999999+118/cwl.input.yml'
+                               },
+                    'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
+                    'task.stdout': 'cwl.output.json'}]},
+                'runtime_constraints': {
+                    'min_scratch_mb_per_node': 2048,
+                    'min_cores_per_node': 1,
+                    'docker_image': 'arvados/jobs',
+                    'min_ram_mb_per_node': 1024
+                },
+                'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'},
+            filters=[['repository', '=', 'arvados'],
+                     ['script', '=', 'crunchrunner'],
+                     ['script_version', 'in git', '9e5b98e8f5f4727856b53447191f9c06e3da2ba6'],
+                     ['docker_image_locator', 'in docker', 'arvados/jobs']],
+            find_or_create=True)
index 6f056ab0c4fcd1d2f33ac4106fb51442a0022e19..eb7199295144368d30da1122e435209e2c6323fa 100644 (file)
@@ -9,6 +9,7 @@ import hashlib
 import mock
 import sys
 import unittest
+import json
 
 from .matcher import JsonDiffMatcher
 
@@ -41,17 +42,24 @@ def stubs(func):
         stubs.api.collections().create().execute.side_effect = ({
             "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
             "portable_data_hash": "99999999999999999999999999999991+99",
+            "manifest_text": ""
         }, {
             "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
             "portable_data_hash": "99999999999999999999999999999992+99",
+            "manifest_text": "./tool 00000000000000000000000000000000+0 0:0:submit_tool.cwl 0:0:blub.txt"
         },
         {
             "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz4",
             "portable_data_hash": "99999999999999999999999999999994+99",
             "manifest_text": ""
-        })
+        },
+        {
+            "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz5",
+            "portable_data_hash": "99999999999999999999999999999995+99",
+            "manifest_text": ""
+        }        )
         stubs.api.collections().get().execute.return_value = {
-            "portable_data_hash": "99999999999999999999999999999993+99"}
+            "portable_data_hash": "99999999999999999999999999999993+99", "manifest_text": "./tool 00000000000000000000000000000000+0 0:0:submit_tool.cwl 0:0:blub.txt"}
 
         stubs.expect_job_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         stubs.api.jobs().create().execute.return_value = {
@@ -76,9 +84,24 @@ def stubs(func):
             },
             'script_parameters': {
                 'x': {
-                    'path': '99999999999999999999999999999992+99/blorp.txt',
+                    'basename': 'blorp.txt',
+                    'location': 'keep:99999999999999999999999999999994+99/blorp.txt',
                     'class': 'File'
                 },
+                'y': {
+                    'basename': '99999999999999999999999999999998+99',
+                    'location': 'keep:99999999999999999999999999999998+99',
+                    'class': 'Directory'
+                },
+                'z': {
+                    'basename': 'anonymous',
+                    "listing": [{
+                        "basename": "renamed.txt",
+                        "class": "File",
+                        "location": "keep:99999999999999999999999999999998+99/file1.txt"
+                    }],
+                    'class': 'Directory'
+                },
                 'cwl:tool':
                 '99999999999999999999999999999991+99/wf/submit_wf.cwl'
             },
@@ -103,7 +126,7 @@ def stubs(func):
                     'kind': 'file'
                 },
                 '/var/lib/cwl/job/cwl.input.json': {
-                    'portable_data_hash': '33be5c865fe12e1e4788d2f1bc627f7a+60/cwl.input.json',
+                    'portable_data_hash': 'd20d7cddd1984f105dd3702c7f125afb+60/cwl.input.json',
                     'kind': 'collection'
                 }
             },
@@ -120,6 +143,12 @@ def stubs(func):
                 'ram': 268435456
             }
         }
+
+        stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
+        stubs.api.workflows().create().execute.return_value = {
+            "uuid": stubs.expect_workflow_uuid,
+        }
+
         return func(self, stubs, *args, **kwargs)
     return wrapped
 
@@ -138,13 +167,20 @@ class TestSubmit(unittest.TestCase):
             mock.call(),
             mock.call(body={
                 'manifest_text':
-                './tool a3954c369b8924d40547ec8cf5f6a7f4+449 '
-                '0:16:blub.txt 16:433:submit_tool.cwl\n./wf '
-                'e046cace0b1a0a6ee645f6ea8688f7e2+364 0:364:submit_wf.cwl\n',
+                './tool d51232d96b6116d964a69bfb7e0c73bf+450 '
+                '0:16:blub.txt 16:434:submit_tool.cwl\n./wf '
+                'cc2ffb940e60adf1b2b282c67587e43d+413 0:413:submit_wf.cwl\n',
                 'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
                 'name': 'submit_wf.cwl',
             }, ensure_unique_name=True),
             mock.call().execute(),
+            mock.call(body={'manifest_text': '. d41d8cd98f00b204e9800998ecf8427e+0 '
+                            '0:0:blub.txt 0:0:submit_tool.cwl\n',
+                            'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
+                            'replication_desired': None,
+                            'name': 'New collection'
+            }, ensure_unique_name=True),
+            mock.call().execute(num_retries=4),
             mock.call(body={
                 'manifest_text':
                 '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
@@ -191,13 +227,20 @@ class TestSubmit(unittest.TestCase):
             mock.call(),
             mock.call(body={
                 'manifest_text':
-                './tool a3954c369b8924d40547ec8cf5f6a7f4+449 '
-                '0:16:blub.txt 16:433:submit_tool.cwl\n./wf '
-                'e046cace0b1a0a6ee645f6ea8688f7e2+364 0:364:submit_wf.cwl\n',
+                './tool d51232d96b6116d964a69bfb7e0c73bf+450 '
+                '0:16:blub.txt 16:434:submit_tool.cwl\n./wf '
+                'cc2ffb940e60adf1b2b282c67587e43d+413 0:413:submit_wf.cwl\n',
                 'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
                 'name': 'submit_wf.cwl',
             }, ensure_unique_name=True),
             mock.call().execute(),
+            mock.call(body={'manifest_text': '. d41d8cd98f00b204e9800998ecf8427e+0 '
+                            '0:0:blub.txt 0:0:submit_tool.cwl\n',
+                            'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
+                            'name': 'New collection',
+                            'replication_desired': None,
+            }, ensure_unique_name=True),
+            mock.call().execute(num_retries=4),
             mock.call(body={
                 'manifest_text':
                 '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
@@ -222,7 +265,7 @@ class TestCreateTemplate(unittest.TestCase):
         capture_stdout = cStringIO.StringIO()
 
         exited = arvados_cwl.main(
-            ["--create-template", "--no-wait",
+            ["--create-template", "--debug",
              "--project-uuid", project_uuid,
              "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
             capture_stdout, sys.stderr, api_client=stubs.api)
@@ -236,7 +279,18 @@ class TestCreateTemplate(unittest.TestCase):
             'dataclass': 'File',
             'required': True,
             'type': 'File',
-            'value': '99999999999999999999999999999992+99/blorp.txt',
+            'value': '99999999999999999999999999999994+99/blorp.txt',
+        }
+        expect_component['script_parameters']['y'] = {
+            'dataclass': 'Collection',
+            'required': True,
+            'type': 'Directory',
+            'value': '99999999999999999999999999999998+99',
+        }
+        expect_component['script_parameters']['z'] = {
+            'dataclass': 'Collection',
+            'required': True,
+            'type': 'Directory',
         }
         expect_template = {
             "components": {
@@ -252,6 +306,41 @@ class TestCreateTemplate(unittest.TestCase):
                          stubs.expect_pipeline_template_uuid + '\n')
 
 
+class TestCreateWorkflow(unittest.TestCase):
+    @stubs
+    def test_create(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        capture_stdout = cStringIO.StringIO()
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--project-uuid", project_uuid,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            capture_stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 0)
+
+        stubs.api.pipeline_templates().create.refute_called()
+        stubs.api.container_requests().create.refute_called()
+
+        with open("tests/wf/expect_packed.cwl") as f:
+            expect_workflow = f.read()
+
+        body = {
+            "workflow": {
+                "owner_uuid": project_uuid,
+                "name": "submit_wf.cwl",
+                "description": "",
+                "definition": expect_workflow
+                }
+        }
+        stubs.api.workflows().create.assert_called_with(
+            body=JsonDiffMatcher(body))
+
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_workflow_uuid + '\n')
+
+
 class TestTemplateInputs(unittest.TestCase):
     expect_template = {
         "components": {
@@ -327,7 +416,7 @@ class TestTemplateInputs(unittest.TestCase):
         expect_template["owner_uuid"] = stubs.fake_user_uuid
         params = expect_template[
             "components"]["inputs_test.cwl"]["script_parameters"]
-        params["fileInput"]["value"] = '99999999999999999999999999999992+99/blorp.txt'
+        params["fileInput"]["value"] = '99999999999999999999999999999994+99/blorp.txt'
         params["floatInput"]["value"] = 1.234
         params["boolInput"]["value"] = True
 
diff --git a/sdk/cwl/tests/testdir/a b/sdk/cwl/tests/testdir/a
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/testdir/b b/sdk/cwl/tests/testdir/b
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/testdir/c/d b/sdk/cwl/tests/testdir/c/d
new file mode 100644 (file)
index 0000000..e69de29
index e9fa423ee664bf1c3513d9d8ed80c01616ee2c71..19df1de2086b70b462579b8c46dfa02cfe995dc4 100644 (file)
@@ -4,7 +4,7 @@
 # value blub.txt) and uploading to Keep works as intended.
 
 class: CommandLineTool
-cwlVersion: draft-3
+cwlVersion: v1.0
 requirements:
   - class: DockerRequirement
     dockerPull: debian:8
@@ -13,7 +13,7 @@ inputs:
     type: File
     default:
       class: File
-      path: blub.txt
+      location: blub.txt
     inputBinding:
       position: 1
 outputs: []
diff --git a/sdk/cwl/tests/wf/expect_packed.cwl b/sdk/cwl/tests/wf/expect_packed.cwl
new file mode 100644 (file)
index 0000000..25d02b2
--- /dev/null
@@ -0,0 +1,36 @@
+$graph:
+- baseCommand: cat
+  class: CommandLineTool
+  id: '#submit_tool.cwl'
+  inputs:
+  - default: {class: File, location: 'keep:99999999999999999999999999999991+99/tool/blub.txt'}
+    id: '#submit_tool.cwl/x'
+    inputBinding: {position: 1}
+    type: File
+  outputs: []
+  requirements:
+  - {class: DockerRequirement, dockerImageId: 'debian:8', dockerPull: 'debian:8'}
+- class: Workflow
+  id: '#main'
+  inputs:
+  - default: {basename: blorp.txt, class: File, location: 'keep:99999999999999999999999999999991+99/input/blorp.txt'}
+    id: '#main/x'
+    type: File
+  - default: {basename: 99999999999999999999999999999998+99, class: Directory, location: 'keep:99999999999999999999999999999998+99'}
+    id: '#main/y'
+    type: Directory
+  - default:
+      basename: anonymous
+      class: Directory
+      listing:
+      - {basename: renamed.txt, class: File, location: 'keep:99999999999999999999999999999998+99/file1.txt'}
+    id: '#main/z'
+    type: Directory
+  outputs: []
+  steps:
+  - id: '#main/step1'
+    in:
+    - {id: '#main/step1/x', source: '#main/x'}
+    out: []
+    run: '#submit_tool.cwl'
+cwlVersion: v1.0
index ec43207c696aa8916f89981f4fc71d01d79a9773..5fea4fdddfe9058745f2ded5d070c324d9b6a4e0 100644 (file)
@@ -2,12 +2,12 @@
 # various input types as script_parameters in pipeline templates.
 
 class: Workflow
-cwlVersion: draft-3
+cwlVersion: v1.0
 inputs:
   - id: "#fileInput"
     type: File
     label: It's a file; we expect to find some characters in it.
-    description: |
+    doc: |
       If there were anything further to say, it would be said here,
       or here.
   - id: "#boolInput"
@@ -22,7 +22,7 @@ inputs:
 outputs: []
 steps:
   - id: step1
-    inputs:
-      - { id: x, source: "#x" }
-    outputs: []
+    in:
+      - { id: x, source: "#fileInput" }
+    out: []
     run: ../tool/submit_tool.cwl
diff --git a/sdk/cwl/tests/wf/scatter2.cwl b/sdk/cwl/tests/wf/scatter2.cwl
new file mode 100644 (file)
index 0000000..f73ec2b
--- /dev/null
@@ -0,0 +1,56 @@
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  sleeptime:
+    type: int[]
+    default: [5]
+outputs:
+  out:
+    type: string[]
+    outputSource: scatterstep/out
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  scatterstep:
+    in:
+      sleeptime: sleeptime
+    out: [out]
+    scatter: sleeptime
+    hints:
+      - class: arv:RunInSingleContainer
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        sleeptime: int
+      outputs:
+        out:
+          type: string
+          outputSource: sleep1/out
+      steps:
+        sleep1:
+          in:
+            sleeptime: sleeptime
+            blurb:
+              valueFrom: |
+                ${
+                  return String(inputs.sleeptime) + "b";
+                }
+          out: [out]
+          run:
+            class: CommandLineTool
+            inputs:
+              sleeptime:
+                type: int
+                inputBinding: {position: 1}
+            outputs:
+              out:
+                type: string
+                outputBinding:
+                  outputEval: "out"
+            baseCommand: sleep
diff --git a/sdk/cwl/tests/wf/scatter2_subwf.cwl b/sdk/cwl/tests/wf/scatter2_subwf.cwl
new file mode 100644 (file)
index 0000000..0ae1cf0
--- /dev/null
@@ -0,0 +1,33 @@
+$graph:
+- class: Workflow
+  hints:
+  - {class: 'http://arvados.org/cwl#RunInSingleContainer'}
+  id: '#main'
+  inputs:
+  - {id: '#main/sleeptime', type: int}
+  outputs:
+  - {id: '#main/out', outputSource: '#main/sleep1/out', type: string}
+  requirements:
+  - {class: InlineJavascriptRequirement}
+  - {class: ScatterFeatureRequirement}
+  - {class: StepInputExpressionRequirement}
+  - {class: SubworkflowFeatureRequirement}
+  steps:
+  - id: '#main/sleep1'
+    in:
+    - {id: '#main/sleep1/blurb', valueFrom: "${\n  return String(inputs.sleeptime)\
+        \ + \"b\";\n}\n"}
+    - {id: '#main/sleep1/sleeptime', source: '#main/sleeptime'}
+    out: ['#main/sleep1/out']
+    run:
+      baseCommand: sleep
+      class: CommandLineTool
+      inputs:
+      - id: '#main/sleep1/sleeptime'
+        inputBinding: {position: 1}
+        type: int
+      outputs:
+      - id: '#main/sleep1/out'
+        outputBinding: {outputEval: out}
+        type: string
+cwlVersion: v1.0
\ No newline at end of file
index 36db603cc6040ed181986b9322dc50e4d4634b32..9aab5cf4b008c20765598a50790a410cd6446820 100644 (file)
@@ -4,14 +4,18 @@
 # (e.g. submit_tool.cwl) and uploading to Keep works as intended.
 
 class: Workflow
-cwlVersion: draft-3
+cwlVersion: v1.0
 inputs:
   - id: x
     type: File
+  - id: y
+    type: Directory
+  - id: z
+    type: Directory
 outputs: []
 steps:
   - id: step1
-    inputs:
+    in:
       - { id: x, source: "#x" }
-    outputs: []
+    out: []
     run: ../tool/submit_tool.cwl
index d6d610da91f9d48d90a5dc5d750adb9c681b47a6..36f4eb52ae298982dfa09ddf82b0cea08c2604f7 100644 (file)
@@ -10,6 +10,8 @@ import (
        "net/http"
        "net/url"
        "os"
+       "strings"
+       "time"
 )
 
 // A Client is an HTTP client with an API endpoint and a set of
@@ -20,8 +22,8 @@ import (
 // of results using List APIs.
 type Client struct {
        // HTTP client used to make requests. If nil,
-       // http.DefaultClient or InsecureHTTPClient will be used.
-       Client *http.Client
+       // DefaultSecureClient or InsecureHTTPClient will be used.
+       Client *http.Client `json:"-"`
 
        // Hostname (or host:port) of Arvados API server.
        APIHost string
@@ -32,6 +34,13 @@ type Client struct {
        // Accept unverified certificates. This works only if the
        // Client field is nil: otherwise, it has no effect.
        Insecure bool
+
+       // Override keep service discovery with a list of base
+       // URIs. (Currently there are no Client methods for
+       // discovering keep services so this is just a convenience for
+       // callers who use a Client to initialize an
+       // arvadosclient.ArvadosClient.)
+       KeepServiceURIs []string `json:",omitempty"`
 }
 
 // The default http.Client used by a Client with Insecure==true and
@@ -39,16 +48,26 @@ type Client struct {
 var InsecureHTTPClient = &http.Client{
        Transport: &http.Transport{
                TLSClientConfig: &tls.Config{
-                       InsecureSkipVerify: true}}}
+                       InsecureSkipVerify: true}},
+       Timeout: 5 * time.Minute}
+
+// The default http.Client used by a Client otherwise.
+var DefaultSecureClient = &http.Client{
+       Timeout: 5 * time.Minute}
 
 // NewClientFromEnv creates a new Client that uses the default HTTP
 // client with the API endpoint and credentials given by the
 // ARVADOS_API_* environment variables.
 func NewClientFromEnv() *Client {
+       var svcs []string
+       if s := os.Getenv("ARVADOS_KEEP_SERVICES"); s != "" {
+               svcs = strings.Split(s, " ")
+       }
        return &Client{
-               APIHost:   os.Getenv("ARVADOS_API_HOST"),
-               AuthToken: os.Getenv("ARVADOS_API_TOKEN"),
-               Insecure:  os.Getenv("ARVADOS_API_HOST_INSECURE") != "",
+               APIHost:         os.Getenv("ARVADOS_API_HOST"),
+               AuthToken:       os.Getenv("ARVADOS_API_TOKEN"),
+               Insecure:        os.Getenv("ARVADOS_API_HOST_INSECURE") != "",
+               KeepServiceURIs: svcs,
        }
 }
 
@@ -169,7 +188,7 @@ func (c *Client) httpClient() *http.Client {
        case c.Insecure:
                return InsecureHTTPClient
        default:
-               return http.DefaultClient
+               return DefaultSecureClient
        }
 }
 
index 422ad9037acc45ed6a2a39b9f8be5ab9bedcf3c8..5011aa81f689c0bf1d6098c123bcaeee14ba6c41 100644 (file)
@@ -117,7 +117,7 @@ func TestAnythingToValues(t *testing.T) {
                        },
                },
                {
-                       in: map[string]interface{}{"foo": map[string]interface{}{"bar":1.234}},
+                       in: map[string]interface{}{"foo": map[string]interface{}{"bar": 1.234}},
                        ok: func(out url.Values) bool {
                                return out.Get("foo") == `{"bar":1.234}`
                        },
index ac129526fdda23652e0b7f351c9a9b89c9c6f088..bb36b17324ce06c808e25dd5fe83901f094dae1d 100644 (file)
@@ -18,12 +18,13 @@ type Container struct {
 
 // Mount is special behavior to attach to a filesystem path or device.
 type Mount struct {
-       Kind             string `json:"kind"`
-       Writable         bool   `json:"writable"`
-       PortableDataHash string `json:"portable_data_hash"`
-       UUID             string `json:"uuid"`
-       DeviceType       string `json:"device_type"`
-       Path             string `json:"path"`
+       Kind             string      `json:"kind"`
+       Writable         bool        `json:"writable"`
+       PortableDataHash string      `json:"portable_data_hash"`
+       UUID             string      `json:"uuid"`
+       DeviceType       string      `json:"device_type"`
+       Path             string      `json:"path"`
+       Content          interface{} `json:"content"`
 }
 
 // RuntimeConstraints specify a container's compute resources (RAM,
index 1639c5852a6573ca2f51deb0080558cec587e464..7b87aee6ab7b14c875aa044e3b88e11ed22bd567 100644 (file)
@@ -13,9 +13,7 @@ type Duration time.Duration
 // UnmarshalJSON implements json.Unmarshaler
 func (d *Duration) UnmarshalJSON(data []byte) error {
        if data[0] == '"' {
-               dur, err := time.ParseDuration(string(data[1 : len(data)-1]))
-               *d = Duration(dur)
-               return err
+               return d.Set(string(data[1 : len(data)-1]))
        }
        return fmt.Errorf("duration must be given as a string like \"600s\" or \"1h30m\"")
 }
@@ -29,3 +27,10 @@ func (d *Duration) MarshalJSON() ([]byte, error) {
 func (d Duration) String() string {
        return time.Duration(d).String()
 }
+
+// Value implements flag.Value
+func (d *Duration) Set(s string) error {
+       dur, err := time.ParseDuration(s)
+       *d = Duration(dur)
+       return err
+}
index 4af1b7910f6f3b111583ad91fa5416ef520b4ac5..b29748a2247342a2497a4d4018e41da5174e471e 100644 (file)
@@ -30,6 +30,7 @@ type KeepServiceList struct {
 // us about a stored block.
 type KeepServiceIndexEntry struct {
        SizedDigest
+       // Time of last write, in nanoseconds since Unix epoch
        Mtime int64
 }
 
@@ -108,6 +109,14 @@ func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry,
                if err != nil {
                        return nil, fmt.Errorf("Malformed index line %q: mtime: %v", line, err)
                }
+               if mtime < 1e12 {
+                       // An old version of keepstore is giving us
+                       // timestamps in seconds instead of
+                       // nanoseconds. (This threshold correctly
+                       // handles all times between 1970-01-02 and
+                       // 33658-09-27.)
+                       mtime = mtime * 1e9
+               }
                entries = append(entries, KeepServiceIndexEntry{
                        SizedDigest: SizedDigest(fields[0]),
                        Mtime:       mtime,
index 8cdfa484bd96df3f479c7a40c8bb6692eddc1da5..5f24c7107d72798621b4a3110030981297489fc9 100644 (file)
@@ -15,6 +15,8 @@ import (
        "regexp"
        "strings"
        "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
 )
 
 type StringMatcher func(string) bool
@@ -86,6 +88,12 @@ type ArvadosClient struct {
        // the client is outside the cluster.
        External bool
 
+       // Base URIs of Keep services, e.g., {"https://host1:8443",
+       // "https://host2:8443"}.  If this is nil, Keep clients will
+       // use the arvados.v1.keep_services.accessible API to discover
+       // available services.
+       KeepServiceURIs []string
+
        // Discovery document
        DiscoveryDoc Dict
 
@@ -95,15 +103,34 @@ type ArvadosClient struct {
        Retries int
 }
 
-// Create a new ArvadosClient, initialized with standard Arvados environment
-// variables ARVADOS_API_HOST, ARVADOS_API_TOKEN, and (optionally)
-// ARVADOS_API_HOST_INSECURE.
-func MakeArvadosClient() (ac ArvadosClient, err error) {
+// New returns an ArvadosClient using the given arvados.Client
+// configuration. This is useful for callers who load arvados.Client
+// fields from configuration files but still need to use the
+// arvadosclient.ArvadosClient package.
+func New(c *arvados.Client) (*ArvadosClient, error) {
+       return &ArvadosClient{
+               Scheme:      "https",
+               ApiServer:   c.APIHost,
+               ApiToken:    c.AuthToken,
+               ApiInsecure: c.Insecure,
+               Client: &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: &tls.Config{InsecureSkipVerify: c.Insecure}}},
+               External:          false,
+               Retries:           2,
+               lastClosedIdlesAt: time.Now(),
+       }, nil
+}
+
+// MakeArvadosClient creates a new ArvadosClient using the standard
+// environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,
+// ARVADOS_API_HOST_INSECURE, ARVADOS_EXTERNAL_CLIENT, and
+// ARVADOS_KEEP_SERVICES.
+func MakeArvadosClient() (ac *ArvadosClient, err error) {
        var matchTrue = regexp.MustCompile("^(?i:1|yes|true)$")
        insecure := matchTrue.MatchString(os.Getenv("ARVADOS_API_HOST_INSECURE"))
        external := matchTrue.MatchString(os.Getenv("ARVADOS_EXTERNAL_CLIENT"))
 
-       ac = ArvadosClient{
+       ac = &ArvadosClient{
                Scheme:      "https",
                ApiServer:   os.Getenv("ARVADOS_API_HOST"),
                ApiToken:    os.Getenv("ARVADOS_API_TOKEN"),
@@ -113,6 +140,18 @@ func MakeArvadosClient() (ac ArvadosClient, err error) {
                External: external,
                Retries:  2}
 
+       for _, s := range strings.Split(os.Getenv("ARVADOS_KEEP_SERVICES"), " ") {
+               if s == "" {
+                       continue
+               }
+               if u, err := url.Parse(s); err != nil {
+                       return ac, fmt.Errorf("ARVADOS_KEEP_SERVICES: %q: %s", s, err)
+               } else if !u.IsAbs() {
+                       return ac, fmt.Errorf("ARVADOS_KEEP_SERVICES: %q: not an absolute URI", s)
+               }
+               ac.KeepServiceURIs = append(ac.KeepServiceURIs, s)
+       }
+
        if ac.ApiServer == "" {
                return ac, MissingArvadosApiHost
        }
@@ -127,7 +166,7 @@ func MakeArvadosClient() (ac ArvadosClient, err error) {
 
 // CallRaw is the same as Call() but returns a Reader that reads the
 // response body, instead of taking an output object.
-func (c ArvadosClient) CallRaw(method string, resourceType string, uuid string, action string, parameters Dict) (reader io.ReadCloser, err error) {
+func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string, action string, parameters Dict) (reader io.ReadCloser, err error) {
        scheme := c.Scheme
        if scheme == "" {
                scheme = "https"
@@ -273,7 +312,7 @@ func newAPIServerError(ServerAddress string, resp *http.Response) APIServerError
 // Returns a non-nil error if an error occurs making the API call, the
 // API responds with a non-successful HTTP status, or an error occurs
 // parsing the response body.
-func (c ArvadosClient) Call(method, resourceType, uuid, action string, parameters Dict, output interface{}) error {
+func (c *ArvadosClient) Call(method, resourceType, uuid, action string, parameters Dict, output interface{}) error {
        reader, err := c.CallRaw(method, resourceType, uuid, action, parameters)
        if reader != nil {
                defer reader.Close()
@@ -292,22 +331,22 @@ func (c ArvadosClient) Call(method, resourceType, uuid, action string, parameter
 }
 
 // Create a new resource. See Call for argument descriptions.
-func (c ArvadosClient) Create(resourceType string, parameters Dict, output interface{}) error {
+func (c *ArvadosClient) Create(resourceType string, parameters Dict, output interface{}) error {
        return c.Call("POST", resourceType, "", "", parameters, output)
 }
 
 // Delete a resource. See Call for argument descriptions.
-func (c ArvadosClient) Delete(resource string, uuid string, parameters Dict, output interface{}) (err error) {
+func (c *ArvadosClient) Delete(resource string, uuid string, parameters Dict, output interface{}) (err error) {
        return c.Call("DELETE", resource, uuid, "", parameters, output)
 }
 
 // Modify attributes of a resource. See Call for argument descriptions.
-func (c ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
+func (c *ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
        return c.Call("PUT", resourceType, uuid, "", parameters, output)
 }
 
 // Get a resource. See Call for argument descriptions.
-func (c ArvadosClient) Get(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
+func (c *ArvadosClient) Get(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
        if !UUIDMatch(uuid) && !(resourceType == "collections" && PDHMatch(uuid)) {
                // No object has uuid == "": there is no need to make
                // an API call. Furthermore, the HTTP request for such
@@ -319,7 +358,7 @@ func (c ArvadosClient) Get(resourceType string, uuid string, parameters Dict, ou
 }
 
 // List resources of a given type. See Call for argument descriptions.
-func (c ArvadosClient) List(resource string, parameters Dict, output interface{}) (err error) {
+func (c *ArvadosClient) List(resource string, parameters Dict, output interface{}) (err error) {
        return c.Call("GET", resource, "", "", parameters, output)
 }
 
index 87b67c39b67c6f3ee95a165f9cc50f83ba5be3a7..6b3171f7eb9877d4f71fa2e17769c85639bddec4 100644 (file)
@@ -10,22 +10,32 @@ import (
 // credentials. See arvados-git-httpd for an example, and sync.Pool
 // for more information about garbage collection.
 type ClientPool struct {
-       sync.Pool
-       lastErr error
+       // Initialize new clients by coping this one.
+       Prototype *ArvadosClient
+
+       pool      *sync.Pool
+       lastErr   error
+       setupOnce sync.Once
 }
 
-// MakeClientPool returns a new empty ClientPool.
+// MakeClientPool returns a new empty ClientPool, using environment
+// variables to initialize the prototype.
 func MakeClientPool() *ClientPool {
-       p := &ClientPool{}
-       p.Pool = sync.Pool{New: func() interface{} {
-               arv, err := MakeArvadosClient()
-               if err != nil {
-                       p.lastErr = err
+       proto, err := MakeArvadosClient()
+       return &ClientPool{
+               Prototype: proto,
+               lastErr:   err,
+       }
+}
+
+func (p *ClientPool) setup() {
+       p.pool = &sync.Pool{New: func() interface{} {
+               if p.lastErr != nil {
                        return nil
                }
-               return &arv
+               c := *p.Prototype
+               return &c
        }}
-       return p
 }
 
 // Err returns the error that was encountered last time Get returned
@@ -39,7 +49,8 @@ func (p *ClientPool) Err() error {
 // (including its ApiToken) will be just as it was when it was Put
 // back in the pool.
 func (p *ClientPool) Get() *ArvadosClient {
-       c, ok := p.Pool.Get().(*ArvadosClient)
+       p.setupOnce.Do(p.setup)
+       c, ok := p.pool.Get().(*ArvadosClient)
        if !ok {
                return nil
        }
@@ -48,5 +59,6 @@ func (p *ClientPool) Get() *ArvadosClient {
 
 // Put puts an ArvadosClient back in the pool.
 func (p *ClientPool) Put(c *ArvadosClient) {
-       p.Pool.Put(c)
+       p.setupOnce.Do(p.setup)
+       p.pool.Put(c)
 }
index 84a3bff06c0f09e3d326925d89b30ef4deaf0804..ebb5992eb43669ce4a7dd1726391aff3648da3a1 100644 (file)
@@ -18,8 +18,8 @@ const (
        Dispatch1AuthUUID = "zzzzz-gj3su-k9dvestay1plssr"
 )
 
-// A valid manifest designed to test various edge cases and parsing
-// requirements
+// PathologicalManifest : A valid manifest designed to test
+// various edge cases and parsing requirements
 const PathologicalManifest = ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero@0 0:1:f 1:0:zero@1 1:4:ooba 4:0:zero@4 5:1:r 5:4:rbaz 9:0:zero@9\n" +
        "./overlapReverse acbd18db4cc2f85cedef654fccc4a4d8+3 acbd18db4cc2f85cedef654fccc4a4d8+3 5:1:o 4:2:oo 2:4:ofoo\n" +
        "./segmented acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:1:frob 5:1:frob 1:1:frob 1:2:oof 0:1:oof 5:0:frob 3:1:frob\n" +
@@ -37,4 +37,5 @@ var (
        MD5CollisionMD5 = "cee9a457e790cf20d4bdaa6d69f01e41"
 )
 
+// BlobSigningKey used by the test servers
 const BlobSigningKey = "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc"
index c61b68b319fbe50b5e71ea97e5751da57af687d9..d3b48ea9fb738fb897ce2ed0926dc619c4522bbf 100644 (file)
@@ -3,21 +3,35 @@ package arvadostest
 import (
        "bufio"
        "bytes"
+       "fmt"
+       "io/ioutil"
        "log"
        "os"
        "os/exec"
+       "path"
        "strconv"
        "strings"
 )
 
 var authSettings = make(map[string]string)
 
+// ResetEnv resets test env
 func ResetEnv() {
        for k, v := range authSettings {
                os.Setenv(k, v)
        }
 }
 
+// APIHost returns the address:port of the current test server.
+func APIHost() string {
+       h := authSettings["ARVADOS_API_HOST"]
+       if h == "" {
+               log.Fatal("arvadostest.APIHost() was called but authSettings is not populated")
+       }
+       return h
+}
+
+// ParseAuthSettings parses auth settings from given input
 func ParseAuthSettings(authScript []byte) {
        scanner := bufio.NewScanner(bytes.NewReader(authScript))
        for scanner.Scan() {
@@ -36,7 +50,7 @@ func ParseAuthSettings(authScript []byte) {
        log.Printf("authSettings: %v", authSettings)
 }
 
-var pythonTestDir string = ""
+var pythonTestDir string
 
 func chdirToPythonTests() {
        if pythonTestDir != "" {
@@ -59,6 +73,7 @@ func chdirToPythonTests() {
        }
 }
 
+// StartAPI starts test API server
 func StartAPI() {
        cwd, _ := os.Getwd()
        defer os.Chdir(cwd)
@@ -76,6 +91,7 @@ func StartAPI() {
        ResetEnv()
 }
 
+// StopAPI stops test API server
 func StopAPI() {
        cwd, _ := os.Getwd()
        defer os.Chdir(cwd)
@@ -132,3 +148,24 @@ func bgRun(cmd *exec.Cmd) {
                log.Fatalf("%+v: %s", cmd.Args, err)
        }
 }
+
+// CreateBadPath creates a tmp dir, appends given string and returns that path
+// This will guarantee that the path being returned does not exist
+func CreateBadPath() (badpath string, err error) {
+       tempdir, err := ioutil.TempDir("", "bad")
+       if err != nil {
+               return "", fmt.Errorf("Could not create temporary directory for bad path: %v", err)
+       }
+       badpath = path.Join(tempdir, "bad")
+       return badpath, nil
+}
+
+// DestroyBadPath deletes the tmp dir created by the previous CreateBadPath call
+func DestroyBadPath(badpath string) error {
+       tempdir := path.Join(badpath, "..")
+       err := os.Remove(tempdir)
+       if err != nil {
+               return fmt.Errorf("Could not remove bad path temporary directory %v: %v", tempdir, err)
+       }
+       return nil
+}
diff --git a/sdk/go/config/load.go b/sdk/go/config/load.go
new file mode 100644 (file)
index 0000000..9c65d65
--- /dev/null
@@ -0,0 +1,24 @@
+package config
+
+import (
+       "fmt"
+       "io/ioutil"
+
+       "github.com/ghodss/yaml"
+)
+
+// LoadFile loads configuration from the file given by configPath and
+// decodes it into cfg.
+//
+// YAML and JSON formats are supported.
+func LoadFile(cfg interface{}, configPath string) error {
+       buf, err := ioutil.ReadFile(configPath)
+       if err != nil {
+               return err
+       }
+       err = yaml.Unmarshal(buf, cfg)
+       if err != nil {
+               return fmt.Errorf("Error decoding config %q: %v", configPath, err)
+       }
+       return nil
+}
index 14c75afff282cbfd6fc389f0d81678cadb502260..5c3d65c56144bab68e322b1040c0fb846e30ddaa 100644 (file)
@@ -20,6 +20,7 @@ type TaskDef struct {
        Env                map[string]string `json:"task.env"`
        Stdin              string            `json:"task.stdin"`
        Stdout             string            `json:"task.stdout"`
+       Stderr             string            `json:"task.stderr"`
        Vwd                map[string]string `json:"task.vwd"`
        SuccessCodes       []int             `json:"task.successCodes"`
        PermanentFailCodes []int             `json:"task.permanentFailCodes"`
@@ -80,13 +81,13 @@ func checkOutputFilename(outdir, fn string) error {
        return nil
 }
 
-func setupCommand(cmd *exec.Cmd, taskp TaskDef, outdir string, replacements map[string]string) (stdin, stdout string, err error) {
+func setupCommand(cmd *exec.Cmd, taskp TaskDef, outdir string, replacements map[string]string) (stdin, stdout, stderr string, err error) {
        if taskp.Vwd != nil {
                for k, v := range taskp.Vwd {
                        v = substitute(v, replacements)
                        err = checkOutputFilename(outdir, k)
                        if err != nil {
-                               return "", "", err
+                               return "", "", "", err
                        }
                        os.Symlink(v, outdir+"/"+k)
                }
@@ -97,26 +98,39 @@ func setupCommand(cmd *exec.Cmd, taskp TaskDef, outdir string, replacements map[
                stdin = substitute(taskp.Stdin, replacements)
                cmd.Stdin, err = os.Open(stdin)
                if err != nil {
-                       return "", "", err
+                       return "", "", "", err
                }
        }
 
        if taskp.Stdout != "" {
                err = checkOutputFilename(outdir, taskp.Stdout)
                if err != nil {
-                       return "", "", err
+                       return "", "", "", err
                }
                // Set up stdout redirection
                stdout = outdir + "/" + taskp.Stdout
                cmd.Stdout, err = os.Create(stdout)
                if err != nil {
-                       return "", "", err
+                       return "", "", "", err
                }
        } else {
                cmd.Stdout = os.Stdout
        }
 
-       cmd.Stderr = os.Stderr
+       if taskp.Stderr != "" {
+               err = checkOutputFilename(outdir, taskp.Stderr)
+               if err != nil {
+                       return "", "", "", err
+               }
+               // Set up stderr redirection
+               stderr = outdir + "/" + taskp.Stderr
+               cmd.Stderr, err = os.Create(stderr)
+               if err != nil {
+                       return "", "", "", err
+               }
+       } else {
+               cmd.Stderr = os.Stderr
+       }
 
        if taskp.Env != nil {
                // Set up subprocess environment
@@ -126,7 +140,7 @@ func setupCommand(cmd *exec.Cmd, taskp TaskDef, outdir string, replacements map[
                        cmd.Env = append(cmd.Env, k+"="+v)
                }
        }
-       return stdin, stdout, nil
+       return stdin, stdout, stderr, nil
 }
 
 // Set up signal handlers.  Go sends signal notifications to a "signal
@@ -227,8 +241,8 @@ func runner(api IArvadosClient,
 
        cmd.Dir = outdir
 
-       var stdin, stdout string
-       stdin, stdout, err = setupCommand(cmd, taskp, outdir, replacements)
+       var stdin, stdout, stderr string
+       stdin, stdout, stderr, err = setupCommand(cmd, taskp, outdir, replacements)
        if err != nil {
                return err
        }
@@ -240,7 +254,10 @@ func runner(api IArvadosClient,
        if stdout != "" {
                stdout = " > " + stdout
        }
-       log.Printf("Running %v%v%v", cmd.Args, stdin, stdout)
+       if stderr != "" {
+               stderr = " 2> " + stderr
+       }
+       log.Printf("Running %v%v%v%v", cmd.Args, stdin, stdout, stderr)
 
        var caughtSignal os.Signal
        sigChan := setupSignals(cmd)
@@ -362,7 +379,7 @@ func main() {
        }
 
        var kc IKeepClient
-       kc, err = keepclient.MakeKeepClient(&api)
+       kc, err = keepclient.MakeKeepClient(api)
        if err != nil {
                log.Fatal(err)
        }
index 52d5c1a64e5181fe78aecf891057dc89b18bc836..9805412d13fd5fb53d6809eee9a7e9d379ef74eb 100644 (file)
@@ -53,7 +53,7 @@ func (s *TestSuite) TestSimpleRun(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"echo", "foo"}}}}},
                Task{Sequence: 0})
        c.Check(err, IsNil)
@@ -89,8 +89,8 @@ func (s *TestSuite) TestSimpleRunSubtask(c *C) {
                tmpdir,
                "",
                Job{Script_parameters: Tasks{[]TaskDef{
-                       TaskDef{Command: []string{"echo", "bar"}},
-                       TaskDef{Command: []string{"echo", "foo"}}}}},
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
                Task{Parameters: TaskDef{
                        Command: []string{"echo", "foo"},
                        Stdout:  "output.txt"},
@@ -118,7 +118,7 @@ func (s *TestSuite) TestRedirect(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat"},
                        Stdout:  "output.txt",
                        Stdin:   tmpfile.Name()}}}},
@@ -140,7 +140,7 @@ func (s *TestSuite) TestEnv(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $BAR"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"BAR": "foo"}}}}},
@@ -161,7 +161,7 @@ func (s *TestSuite) TestEnvSubstitute(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "foo\n",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $BAR"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"BAR": "$(task.keep)"}}}}},
@@ -182,7 +182,7 @@ func (s *TestSuite) TestEnvReplace(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $PATH"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"PATH": "foo"}}}}},
@@ -211,12 +211,12 @@ func (t SubtaskTestClient) Update(resourceType string, uuid string, parameters a
 func (s *TestSuite) TestScheduleSubtask(c *C) {
 
        api := SubtaskTestClient{c, []Task{
-               Task{Job_uuid: "zzzz-8i9sb-111111111111111",
+               {Job_uuid: "zzzz-8i9sb-111111111111111",
                        Created_by_job_task_uuid: "zzzz-ot0gb-111111111111111",
                        Sequence:                 1,
                        Parameters: TaskDef{
                                Command: []string{"echo", "bar"}}},
-               Task{Job_uuid: "zzzz-8i9sb-111111111111111",
+               {Job_uuid: "zzzz-8i9sb-111111111111111",
                        Created_by_job_task_uuid: "zzzz-ot0gb-111111111111111",
                        Sequence:                 1,
                        Parameters: TaskDef{
@@ -234,8 +234,8 @@ func (s *TestSuite) TestScheduleSubtask(c *C) {
                tmpdir,
                "",
                Job{Script_parameters: Tasks{[]TaskDef{
-                       TaskDef{Command: []string{"echo", "bar"}},
-                       TaskDef{Command: []string{"echo", "foo"}}}}},
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
                Task{Sequence: 0})
        c.Check(err, IsNil)
 
@@ -252,7 +252,7 @@ func (s *TestSuite) TestRunFail(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "exit 1"}}}}},
                Task{Sequence: 0})
        c.Check(err, FitsTypeOf, PermFail{})
@@ -269,7 +269,7 @@ func (s *TestSuite) TestRunSuccessCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:      []string{"/bin/sh", "-c", "exit 1"},
                        SuccessCodes: []int{0, 1}}}}},
                Task{Sequence: 0})
@@ -287,7 +287,7 @@ func (s *TestSuite) TestRunFailCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:            []string{"/bin/sh", "-c", "exit 0"},
                        PermanentFailCodes: []int{0, 1}}}}},
                Task{Sequence: 0})
@@ -305,7 +305,7 @@ func (s *TestSuite) TestRunTempFailCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:            []string{"/bin/sh", "-c", "exit 1"},
                        TemporaryFailCodes: []int{1}}}}},
                Task{Sequence: 0})
@@ -329,7 +329,7 @@ func (s *TestSuite) TestVwd(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"ls", "output.txt"},
                        Vwd: map[string]string{
                                "output.txt": tmpfile.Name()}}}}},
@@ -361,7 +361,7 @@ func (s *TestSuite) TestSubstitutionStdin(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                keepmount,
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat"},
                        Stdout:  "output.txt",
                        Stdin:   "$(task.keep)/file1.txt"}}}},
@@ -389,7 +389,7 @@ func (s *TestSuite) TestSubstitutionCommandLine(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                keepmount,
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat", "$(task.keep)/file1.txt"},
                        Stdout:  "output.txt"}}}},
                Task{Sequence: 0})
@@ -417,7 +417,7 @@ func (s *TestSuite) TestSignal(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"sleep", "4"}}}}},
                Task{Sequence: 0})
        c.Check(err, FitsTypeOf, PermFail{})
@@ -437,7 +437,7 @@ func (s *TestSuite) TestQuoting(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"echo", "foo"},
                        Stdout:  "s ub:dir/:e vi\nl"}}}},
                Task{Sequence: 0})
index ce536de47a07be129d7f9e1f9ed730bdf30f6918..4987c01055203e262b2534e2f001b200c7de21eb 100644 (file)
@@ -25,7 +25,7 @@ const (
 // Dispatcher holds the state of the dispatcher
 type Dispatcher struct {
        // The Arvados client
-       Arv arvadosclient.ArvadosClient
+       Arv *arvadosclient.ArvadosClient
 
        // When a new queued container appears and is either already owned by
        // this dispatcher or is successfully locked, the dispatcher will call
@@ -178,7 +178,7 @@ func (dispatcher *Dispatcher) handleUpdate(container arvados.Container) {
 
        if container.State == Queued && container.Priority > 0 {
                // Try to take the lock
-               if err := dispatcher.UpdateState(container.UUID, Locked); err != nil {
+               if err := dispatcher.Lock(container.UUID); err != nil {
                        return
                }
                container.State = Locked
@@ -203,6 +203,24 @@ func (dispatcher *Dispatcher) UpdateState(uuid string, newState arvados.Containe
        return err
 }
 
+// Lock makes the lock API call which updates the state of a container to Locked.
+func (dispatcher *Dispatcher) Lock(uuid string) error {
+       err := dispatcher.Arv.Call("POST", "containers", uuid, "lock", nil, nil)
+       if err != nil {
+               log.Printf("Error locking container %s: %q", uuid, err)
+       }
+       return err
+}
+
+// Unlock makes the unlock API call which updates the state of a container to Queued.
+func (dispatcher *Dispatcher) Unlock(uuid string) error {
+       err := dispatcher.Arv.Call("POST", "containers", uuid, "unlock", nil, nil)
+       if err != nil {
+               log.Printf("Error unlocking container %s: %q", uuid, err)
+       }
+       return err
+}
+
 // RunDispatcher runs the main loop of the dispatcher until receiving a message
 // on the dispatcher.DoneProcessing channel.  It also installs a signal handler
 // to terminate gracefully on SIGINT, SIGTERM or SIGQUIT.
index bed60f499562a36c4585018932860fe35df34701..33bb58710e0c94e1cfa562b8bd1c56afff62a4d7 100644 (file)
@@ -243,7 +243,7 @@ GET:
        // In case we exited the above loop early: before returning,
        // drain the toGet channel so its sender doesn't sit around
        // blocking forever.
-       for _ = range r.toGet {
+       for range r.toGet {
        }
 }
 
index 2cc23738855dfeab3cd8ab2ef33cb27055a35fa1..be4f386ff229f7227ccdb03a722b9c3eeb63f8ff 100644 (file)
@@ -19,7 +19,7 @@ import (
 var _ = check.Suite(&CollectionReaderUnit{})
 
 type CollectionReaderUnit struct {
-       arv     arvadosclient.ArvadosClient
+       arv     *arvadosclient.ArvadosClient
        kc      *KeepClient
        handler SuccessHandler
 }
@@ -30,7 +30,7 @@ func (s *CollectionReaderUnit) SetUpTest(c *check.C) {
        c.Assert(err, check.IsNil)
        s.arv.ApiToken = arvadostest.ActiveToken
 
-       s.kc, err = MakeKeepClient(&s.arv)
+       s.kc, err = MakeKeepClient(s.arv)
        c.Assert(err, check.IsNil)
 
        s.handler = SuccessHandler{
index f039c2181055543bfb0a85ee14710ddd0cf5d1ad..2892031817f62d8069742e7463d947a3acb97fbc 100644 (file)
@@ -12,16 +12,32 @@ import (
        "time"
 )
 
-// DiscoverKeepServers gets list of available keep services from api server
+// DiscoverKeepServers gets list of available keep services from the
+// API server.
+//
+// If a list of services is provided in the arvadosclient (e.g., from
+// an environment variable or local config), that list is used
+// instead.
 func (this *KeepClient) DiscoverKeepServers() error {
-       var list svcList
+       if this.Arvados.KeepServiceURIs != nil {
+               this.foundNonDiskSvc = true
+               this.replicasPerService = 0
+               this.setClientSettingsNonDisk()
+               roots := make(map[string]string)
+               for i, uri := range this.Arvados.KeepServiceURIs {
+                       roots[fmt.Sprintf("00000-bi6l4-%015d", i)] = uri
+               }
+               this.SetServiceRoots(roots, roots, roots)
+               return nil
+       }
 
-       // Get keep services from api server
+       // ArvadosClient did not provide a services list. Ask API
+       // server for a list of accessible services.
+       var list svcList
        err := this.Arvados.Call("GET", "keep_services", "", "accessible", nil, &list)
        if err != nil {
                return err
        }
-
        return this.loadKeepServers(list)
 }
 
index 43a984e8c42f8696e116c15a2471325d20d6e332..379d44c820aec0e0b88d84b3c52e5fd316480844 100644 (file)
@@ -1,21 +1,65 @@
 package keepclient
 
 import (
+       "crypto/md5"
        "fmt"
+       "gopkg.in/check.v1"
+       "net/http"
+       "os"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
 )
 
-func ExampleRefreshServices() {
+func ExampleKeepClient_RefreshServices() {
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                panic(err)
        }
-       kc, err := MakeKeepClient(&arv)
+       kc, err := MakeKeepClient(arv)
        if err != nil {
                panic(err)
        }
        go kc.RefreshServices(5*time.Minute, 3*time.Second)
        fmt.Printf("LocalRoots: %#v\n", kc.LocalRoots())
 }
+
+func (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {
+       defer os.Setenv("ARVADOS_KEEP_SERVICES", "")
+
+       hash := fmt.Sprintf("%x+3", md5.Sum([]byte("TestOverrideDiscovery")))
+       st := StubGetHandler{
+               c,
+               hash,
+               arvadostest.ActiveToken,
+               http.StatusOK,
+               []byte("TestOverrideDiscovery")}
+       ks := RunSomeFakeKeepServers(st, 2)
+
+       os.Setenv("ARVADOS_KEEP_SERVICES", "")
+       arv1, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       arv1.ApiToken = arvadostest.ActiveToken
+
+       os.Setenv("ARVADOS_KEEP_SERVICES", ks[0].url+"  "+ks[1].url+" ")
+       arv2, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       arv2.ApiToken = arvadostest.ActiveToken
+
+       // ARVADOS_KEEP_SERVICES was empty when we created arv1, but
+       // it pointed to our stub servers when we created
+       // arv2. Regardless of what it's set to now, a keepclient for
+       // arv2 should use our stub servers, but one created for arv1
+       // should not.
+
+       kc1, err := MakeKeepClient(arv1)
+       c.Assert(err, check.IsNil)
+       kc2, err := MakeKeepClient(arv2)
+       c.Assert(err, check.IsNil)
+
+       _, _, _, err = kc1.Get(hash)
+       c.Check(err, check.NotNil)
+       _, _, _, err = kc2.Get(hash)
+       c.Check(err, check.IsNil)
+}
index 26aa7177e05c3aa64d3e956f183379614093d341..58f3ffb8348ff7b5f9d9588e6455ae7c9e9ff18a 100644 (file)
@@ -352,7 +352,7 @@ func (kc *KeepClient) WritableLocalRoots() map[string]string {
 // caller can reuse/modify them after SetServiceRoots returns, but
 // they should not be modified by any other goroutine while
 // SetServiceRoots is running.
-func (kc *KeepClient) SetServiceRoots(newLocals, newWritableLocals map[string]string, newGateways map[string]string) {
+func (kc *KeepClient) SetServiceRoots(newLocals, newWritableLocals, newGateways map[string]string) {
        locals := make(map[string]string)
        for uuid, root := range newLocals {
                locals[uuid] = root
index 4ba1d7c245c272ae5a017e513eca4d8adcd292ee..bd36d9d5e12f43b93567a5b30bd5a75c77d1e921 100644 (file)
@@ -62,7 +62,7 @@ func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, Equals, nil)
 
-       kc, err := MakeKeepClient(&arv)
+       kc, err := MakeKeepClient(arv)
 
        c.Assert(err, Equals, nil)
        c.Check(len(kc.LocalRoots()), Equals, 2)
@@ -75,15 +75,15 @@ func (s *ServerRequiredSuite) TestDefaultReplications(c *C) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, Equals, nil)
 
-       kc, err := MakeKeepClient(&arv)
+       kc, err := MakeKeepClient(arv)
        c.Assert(kc.Want_replicas, Equals, 2)
 
        arv.DiscoveryDoc["defaultCollectionReplication"] = 3.0
-       kc, err = MakeKeepClient(&arv)
+       kc, err = MakeKeepClient(arv)
        c.Assert(kc.Want_replicas, Equals, 3)
 
        arv.DiscoveryDoc["defaultCollectionReplication"] = 1.0
-       kc, err = MakeKeepClient(&arv)
+       kc, err = MakeKeepClient(arv)
        c.Assert(kc.Want_replicas, Equals, 1)
 }
 
@@ -125,7 +125,7 @@ func UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,
        arv, _ := arvadosclient.MakeArvadosClient()
        arv.ApiToken = "abc123"
 
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        reader, writer := io.Pipe()
        upload_status := make(chan uploadStatus)
@@ -269,7 +269,7 @@ func (s *StandaloneSuite) TestPutB(c *C) {
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -310,7 +310,7 @@ func (s *StandaloneSuite) TestPutHR(c *C) {
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -361,7 +361,7 @@ func (s *StandaloneSuite) TestPutWithFail(c *C) {
                make(chan string, 1)}
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -419,7 +419,7 @@ func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
                make(chan string, 4)}
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        kc.Retries = 0
@@ -480,7 +480,7 @@ func (s *StandaloneSuite) TestGet(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -504,7 +504,7 @@ func (s *StandaloneSuite) TestGet404(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -524,7 +524,7 @@ func (s *StandaloneSuite) TestGetFail(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
        kc.Retries = 0
@@ -554,7 +554,7 @@ func (s *StandaloneSuite) TestGetFailRetry(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -573,7 +573,7 @@ func (s *StandaloneSuite) TestGetNetError(c *C) {
        hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": "http://localhost:62222"}, nil, nil)
 
@@ -609,7 +609,7 @@ func (s *StandaloneSuite) TestGetWithServiceHint(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(
                map[string]string{"x": ks0.url},
@@ -652,7 +652,7 @@ func (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(
                map[string]string{
@@ -699,7 +699,7 @@ func (s *StandaloneSuite) TestGetWithServiceHintFailoverToLocals(c *C) {
        defer ksGateway.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(
                map[string]string{"zzzzz-bi6l4-keepdisk0000000": ksLocal.url},
@@ -736,7 +736,7 @@ func (s *StandaloneSuite) TestChecksum(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -770,7 +770,7 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
                content}
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        localRoots := make(map[string]string)
        writableLocalRoots := make(map[string]string)
@@ -816,7 +816,7 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
        content := []byte("TestPutGetHead")
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, err := MakeKeepClient(&arv)
+       kc, err := MakeKeepClient(arv)
        c.Assert(err, Equals, nil)
 
        hash := fmt.Sprintf("%x", md5.Sum(content))
@@ -863,7 +863,7 @@ func (s *StandaloneSuite) TestPutProxy(c *C) {
        st := StubProxyHandler{make(chan string, 1)}
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -891,7 +891,7 @@ func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
        st := StubProxyHandler{make(chan string, 1)}
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 3
        arv.ApiToken = "abc123"
@@ -964,7 +964,7 @@ func (s *StandaloneSuite) TestPutBWant2ReplicasWithOnlyOneWritableLocalRoot(c *C
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -1002,7 +1002,7 @@ func (s *StandaloneSuite) TestPutBWithNoWritableLocalRoots(c *C) {
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -1054,7 +1054,7 @@ func (s *StandaloneSuite) TestGetIndexWithNoPrefix(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -1080,7 +1080,7 @@ func (s *StandaloneSuite) TestGetIndexWithPrefix(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -1106,7 +1106,7 @@ func (s *StandaloneSuite) TestGetIndexIncomplete(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -1128,7 +1128,7 @@ func (s *StandaloneSuite) TestGetIndexWithNoSuchServer(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -1148,7 +1148,7 @@ func (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
 
@@ -1186,7 +1186,7 @@ func (s *StandaloneSuite) TestPutBRetry(c *C) {
                        make(chan string, 5)}}
 
        arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(&arv)
+       kc, _ := MakeKeepClient(arv)
 
        kc.Want_replicas = 2
        arv.ApiToken = "abc123"
@@ -1226,7 +1226,7 @@ func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
        defer func() { arv.Delete("keep_services", blobKeepService["uuid"].(string), nil, nil) }()
 
        // Make a keepclient and ensure that the testblobstore is included
-       kc, err := MakeKeepClient(&arv)
+       kc, err := MakeKeepClient(arv)
        c.Assert(err, Equals, nil)
 
        // verify kc.LocalRoots
index b12f512507a71d8b07c08da17d7db29f93228fd2..22447794f1c7f93c4eca6fcdec83d254cbc1740b 100644 (file)
@@ -184,13 +184,13 @@ func (this *KeepClient) putReplicas(
                }()
        }()
 
-       // Desired number of replicas
-       remaining_replicas := this.Want_replicas
+       replicasDone := 0
+       replicasTodo := this.Want_replicas
 
        replicasPerThread := this.replicasPerService
        if replicasPerThread < 1 {
                // unlimited or unknown
-               replicasPerThread = remaining_replicas
+               replicasPerThread = replicasTodo
        }
 
        retriesRemaining := 1 + this.Retries
@@ -200,8 +200,8 @@ func (this *KeepClient) putReplicas(
                retriesRemaining -= 1
                next_server = 0
                retryServers = []string{}
-               for remaining_replicas > 0 {
-                       for active*replicasPerThread < remaining_replicas {
+               for replicasTodo > 0 {
+                       for active*replicasPerThread < replicasTodo {
                                // Start some upload requests
                                if next_server < len(sv) {
                                        DebugPrintf("DEBUG: [%08x] Begin upload %s to %s", requestID, hash, sv[next_server])
@@ -210,14 +210,14 @@ func (this *KeepClient) putReplicas(
                                        active += 1
                                } else {
                                        if active == 0 && retriesRemaining == 0 {
-                                               return locator, (this.Want_replicas - remaining_replicas), InsufficientReplicasError
+                                               return locator, replicasDone, InsufficientReplicasError
                                        } else {
                                                break
                                        }
                                }
                        }
                        DebugPrintf("DEBUG: [%08x] Replicas remaining to write: %v active uploads: %v",
-                               requestID, remaining_replicas, active)
+                               requestID, replicasTodo, active)
 
                        // Now wait for something to happen.
                        if active > 0 {
@@ -226,7 +226,8 @@ func (this *KeepClient) putReplicas(
 
                                if status.statusCode == 200 {
                                        // good news!
-                                       remaining_replicas -= status.replicas_stored
+                                       replicasDone += status.replicas_stored
+                                       replicasTodo -= status.replicas_stored
                                        locator = status.response
                                } else if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
                                        (status.statusCode >= 500 && status.statusCode != 503) {
@@ -242,5 +243,5 @@ func (this *KeepClient) putReplicas(
                sv = retryServers
        }
 
-       return locator, this.Want_replicas, nil
+       return locator, replicasDone, nil
 }
index 3b2db3a321271495a3965759363e2d723ea9e082..6dd7fb3723ca6fa78f29141b754e52448055a15e 100644 (file)
@@ -37,9 +37,9 @@ const (
 )
 
 type LoggerParams struct {
-       Client          arvadosclient.ArvadosClient // The client we use to write log entries
-       EventTypePrefix string                      // The prefix we use for the event type in the log entry
-       WriteInterval   time.Duration               // Wait at least this long between log writes
+       Client          *arvadosclient.ArvadosClient // The client we use to write log entries
+       EventTypePrefix string                       // The prefix we use for the event type in the log entry
+       WriteInterval   time.Duration                // Wait at least this long between log writes
 }
 
 // A LogMutator is a function which modifies the log entry.
index 6bc86250b4e369548d71e77ed38d07413d0d56a7..ac510de02a65005108661a232b36cf293c34b920 100644 (file)
@@ -6,7 +6,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
 )
 
-func UserIsAdmin(arv arvadosclient.ArvadosClient) (is_admin bool, err error) {
+func UserIsAdmin(arv *arvadosclient.ArvadosClient) (is_admin bool, err error) {
        type user struct {
                IsAdmin bool `json:"is_admin"`
        }
@@ -21,7 +21,7 @@ func UserIsAdmin(arv arvadosclient.ArvadosClient) (is_admin bool, err error) {
 // return
 //   count - the number of items of type resource the api server reports, if no error
 //   err - error accessing the resource, or nil if no error
-func NumberItemsAvailable(client arvadosclient.ArvadosClient, resource string) (count int, err error) {
+func NumberItemsAvailable(client *arvadosclient.ArvadosClient, resource string) (count int, err error) {
        var response struct {
                ItemsAvailable int `json:"items_available"`
        }
index 7c9c0138eae86206bb75a1bd5523ecfdee92f040..33c5755eec7c4eed7a9176175fa151f9a1b07462 100644 (file)
@@ -4,7 +4,7 @@
  *
  */
 
-import org.arvados.sdk.java.Arvados;
+import org.arvados.sdk.Arvados;
 
 import java.io.File;
 import java.util.HashMap;
@@ -77,4 +77,4 @@ public class ArvadosSDKJavaExample {
       }
     }
   }
-}
\ No newline at end of file
+}
index 93ba3aa54040c4d4334be2aa91f55eebae15e9c8..1c928aa4f8a3426a398124f84012e6ef37be990e 100644 (file)
@@ -7,7 +7,7 @@
  * @author radhika
  */
 
-import org.arvados.sdk.java.Arvados;
+import org.arvados.sdk.Arvados;
 
 import java.io.File;
 import java.util.HashMap;
index 5176e8c7d2fcea75f1ae571a12b303c116e71222..ba11c988ce8598fb1dba327b596156f83b554d31 100644 (file)
@@ -1,4 +1,4 @@
-package org.arvados.sdk.java;
+package org.arvados.sdk;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -257,7 +257,6 @@ public class ArvadosTest {
     Map<String, Object> params = new HashMap<String, Object>();
     params.put("pipeline_template", new String(data));
     Map response = arv.call("pipeline_templates", "create", params);
-
     assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
     String uuid = (String)response.get("uuid");
     assertNotNull("Expected uuid for pipeline template", uuid);
@@ -461,4 +460,4 @@ public class ArvadosTest {
     assertTrue("Excected some optional parameters for list method for users", parameters.get("optional").contains("filters"));
   }
 
-}
\ No newline at end of file
+}
index 3caa97246624dd1e615fc6e745294b0c553ec9b8..dc3b080e13c85d62612dd94b9e515e9016bde8e9 100644 (file)
@@ -1,5 +1,4 @@
 {
-  "name":"first pipeline",
   "components":{
     "do_hash":{
       "script":"hash.py",
index c194013d49074e3652216b2070e1eaa59bd0e055..e1046ac9c75c8f3368cde784fac38bfa3feab7bf 100755 (executable)
@@ -37,6 +37,13 @@ setup(name='arvados-pam',
           ('share/pam-configs', ['pam-configs/arvados']),
           ('share/doc/arvados-pam', ['LICENSE-2.0.txt', 'README.rst']),
           ('share/doc/arvados-pam/examples', glob.glob('examples/*')),
+
+          # The arvados build scripts used to install data files to
+          # "/usr/data/*" but now install them to "/usr/*". Here, we
+          # install an extra copy in the old location so existing pam
+          # configs can still work. When old systems have had a chance
+          # to update to the new paths, this line can be removed.
+          ('data/lib/security', ['lib/libpam_arvados.py']),
       ],
       install_requires=[
           'arvados-python-client>=0.1.20150801000000',
index 56d8b239331a8f65e8b5781376719244c03b2905..27aad033ae55523de4fd14ae7bf9cf8a7d2b654f 100644 (file)
@@ -307,7 +307,8 @@ class CollectionWriter(CollectionBase):
     def set_current_stream_name(self, newstreamname):
         if re.search(r'[\t\n]', newstreamname):
             raise errors.AssertionError(
-                "Manifest stream names cannot contain whitespace")
+                "Manifest stream names cannot contain whitespace: '%s'" %
+                (newstreamname))
         self._current_stream_name = '.' if newstreamname=='' else newstreamname
 
     def current_stream_name(self):
@@ -556,7 +557,7 @@ class RichCollectionBase(CollectionBase):
                 if isinstance(item, RichCollectionBase):
                     return item.find_or_create(pathcomponents[1], create_type)
                 else:
-                    raise IOError(errno.ENOTDIR, "Not a directory: '%s'" % pathcomponents[0])
+                    raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
         else:
             return self
 
@@ -582,7 +583,7 @@ class RichCollectionBase(CollectionBase):
                 else:
                     return item
             else:
-                raise IOError(errno.ENOTDIR, "Is not a directory: %s" % pathcomponents[0])
+                raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
 
     @synchronized
     def mkdirs(self, path):
@@ -594,7 +595,7 @@ class RichCollectionBase(CollectionBase):
         """
 
         if self.find(path) != None:
-            raise IOError(errno.EEXIST, "Directory or file exists: '%s'" % path)
+            raise IOError(errno.EEXIST, "Directory or file exists", path)
 
         return self.find_or_create(path, COLLECTION)
 
@@ -630,9 +631,9 @@ class RichCollectionBase(CollectionBase):
             arvfile = self.find(path)
 
         if arvfile is None:
-            raise IOError(errno.ENOENT, "File not found")
+            raise IOError(errno.ENOENT, "File not found", path)
         if not isinstance(arvfile, ArvadosFile):
-            raise IOError(errno.EISDIR, "Is a directory: %s" % path)
+            raise IOError(errno.EISDIR, "Is a directory", path)
 
         if mode[0] == "w":
             arvfile.truncate(0)
@@ -732,10 +733,10 @@ class RichCollectionBase(CollectionBase):
         pathcomponents = path.split("/", 1)
         item = self._items.get(pathcomponents[0])
         if item is None:
-            raise IOError(errno.ENOENT, "File not found")
+            raise IOError(errno.ENOENT, "File not found", path)
         if len(pathcomponents) == 1:
             if isinstance(self._items[pathcomponents[0]], RichCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
-                raise IOError(errno.ENOTEMPTY, "Subcollection not empty")
+                raise IOError(errno.ENOTEMPTY, "Directory not empty", path)
             deleteditem = self._items[pathcomponents[0]]
             del self._items[pathcomponents[0]]
             self._committed = False
@@ -773,7 +774,7 @@ class RichCollectionBase(CollectionBase):
         """
 
         if target_name in self and not overwrite:
-            raise IOError(errno.EEXIST, "File already exists")
+            raise IOError(errno.EEXIST, "File already exists", target_name)
 
         modified_from = None
         if target_name in self:
@@ -802,7 +803,7 @@ class RichCollectionBase(CollectionBase):
         if isinstance(source, basestring):
             source_obj = source_collection.find(source)
             if source_obj is None:
-                raise IOError(errno.ENOENT, "File not found")
+                raise IOError(errno.ENOENT, "File not found", source)
             sourcecomponents = source.split("/")
         else:
             source_obj = source
@@ -826,7 +827,7 @@ class RichCollectionBase(CollectionBase):
                 target_dir = self
 
         if target_dir is None:
-            raise IOError(errno.ENOENT, "Target directory not found.")
+            raise IOError(errno.ENOENT, "Target directory not found", target_name)
 
         if target_name in target_dir and isinstance(self[target_name], RichCollectionBase) and sourcecomponents:
             target_dir = target_dir[target_name]
@@ -881,7 +882,7 @@ class RichCollectionBase(CollectionBase):
 
         source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, False)
         if not source_obj.writable():
-            raise IOError(errno.EROFS, "Source collection is read only.")
+            raise IOError(errno.EROFS, "Source collection is read only", source)
         target_dir.add(source_obj, target_name, overwrite, True)
 
     def portable_manifest_text(self, stream_name="."):
index e8ce2ee21d6299dfb9cf4082d105b2d9a439757f..9310f066219ae3063153e4a4393ecba771b7c6ff 100644 (file)
@@ -399,7 +399,12 @@ def main(arguments=None, stdout=sys.stdout):
     # Read the image metadata and make Arvados links from it.
     image_file.seek(0)
     image_tar = tarfile.open(fileobj=image_file)
-    json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json'))
+    image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+    if image_hash_type:
+        json_filename = raw_image_hash + '.json'
+    else:
+        json_filename = raw_image_hash + '/json'
+    json_file = image_tar.extractfile(image_tar.getmember(json_filename))
     image_metadata = json.load(json_file)
     json_file.close()
     image_tar.close()
index 5d29c45117acd71e924838bb9b758af77d8e9b91..54df452394e47bc7b44437bf580a3af2dc17b36e 100644 (file)
@@ -171,6 +171,7 @@ def uploadfiles(files, api, dry_run=False, num_retries=0, project=None, fnPatter
         pdh = item["portable_data_hash"]
 
     for c in files:
+        c.keepref = "%s/%s" % (pdh, c.fn)
         c.fn = fnPattern % (pdh, c.fn)
 
     os.chdir(orgdir)
index 62cc6e1a817d2261bbfcf7a5fe974640aae47b5b..db7835be3746f8f67eddd61d2aac505356e601f4 100644 (file)
@@ -9,8 +9,10 @@ import Queue
 import re
 import socket
 import ssl
+import sys
 import threading
 import timer
+import urlparse
 
 import arvados
 import arvados.config as config
@@ -22,6 +24,17 @@ _logger = logging.getLogger('arvados.keep')
 global_client_object = None
 
 
+# Monkey patch TCP constants when not available (apple). Values sourced from:
+# http://www.opensource.apple.com/source/xnu/xnu-2422.115.4/bsd/netinet/tcp.h
+if sys.platform == 'darwin':
+    if not hasattr(socket, 'TCP_KEEPALIVE'):
+        socket.TCP_KEEPALIVE = 0x010
+    if not hasattr(socket, 'TCP_KEEPINTVL'):
+        socket.TCP_KEEPINTVL = 0x101
+    if not hasattr(socket, 'TCP_KEEPCNT'):
+        socket.TCP_KEEPCNT = 0x102
+
+
 class KeepLocator(object):
     EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
     HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
@@ -237,90 +250,6 @@ class KeepClient(object):
     DEFAULT_TIMEOUT = (2, 256, 32768)
     DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)
 
-    class ThreadLimiter(object):
-        """Limit the number of threads writing to Keep at once.
-
-        This ensures that only a number of writer threads that could
-        potentially achieve the desired replication level run at once.
-        Once the desired replication level is achieved, queued threads
-        are instructed not to run.
-
-        Should be used in a "with" block.
-        """
-        def __init__(self, want_copies, max_service_replicas):
-            self._started = 0
-            self._want_copies = want_copies
-            self._done = 0
-            self._thread_failures = 0
-            self._response = None
-            self._start_lock = threading.Condition()
-            if (not max_service_replicas) or (max_service_replicas >= want_copies):
-                max_threads = 1
-            else:
-                max_threads = math.ceil(float(want_copies) / max_service_replicas)
-            _logger.debug("Limiter max threads is %d", max_threads)
-            self._todo_lock = threading.Semaphore(max_threads)
-            self._done_lock = threading.Lock()
-            self._thread_failures_lock = threading.Lock()
-            self._local = threading.local()
-
-        def __enter__(self):
-            self._start_lock.acquire()
-            if getattr(self._local, 'sequence', None) is not None:
-                # If the calling thread has used set_sequence(N), then
-                # we wait here until N other threads have started.
-                while self._started < self._local.sequence:
-                    self._start_lock.wait()
-            self._todo_lock.acquire()
-            self._started += 1
-            self._start_lock.notifyAll()
-            self._start_lock.release()
-            return self
-
-        def __exit__(self, type, value, traceback):
-            with self._thread_failures_lock:
-                if self._thread_failures > 0:
-                    self._thread_failures -= 1
-                    self._todo_lock.release()
-
-            # If work is finished, release al pending threads
-            if not self.shall_i_proceed():
-                self._todo_lock.release()
-
-        def set_sequence(self, sequence):
-            self._local.sequence = sequence
-
-        def shall_i_proceed(self):
-            """
-            Return true if the current thread should write to Keep.
-            Return false otherwise.
-            """
-            with self._done_lock:
-                return (self._done < self._want_copies)
-
-        def save_response(self, response_body, replicas_stored):
-            """
-            Records a response body (a locator, possibly signed) returned by
-            the Keep server, and the number of replicas it stored.
-            """
-            if replicas_stored == 0:
-                # Failure notification, should start a new thread to try to reach full replication
-                with self._thread_failures_lock:
-                    self._thread_failures += 1
-            else:
-                with self._done_lock:
-                    self._done += replicas_stored
-                    self._response = response_body
-
-        def response(self):
-            """Return the body from the response to a PUT request."""
-            with self._done_lock:
-                return self._response
-
-        def done(self):
-            """Return the total number of replicas successfully stored."""
-            with self._done_lock:
-                return self._done
 
     class KeepService(object):
         """Make requests to a single Keep service, and track results.
@@ -383,7 +312,9 @@ class KeepClient(object):
             """Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
             s = socket.socket(family, socktype, protocol)
             s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-            s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
+            # Will throw invalid protocol error on mac. This test prevents that.
+            if hasattr(socket, 'TCP_KEEPIDLE'):
+                s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
             s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
             return s
 
@@ -564,72 +495,130 @@ class KeepClient(object):
             self._lastheadername = name
             self._headers[name] = value
             # Returning None implies all bytes were written
-
-
+    
+
+    class KeepWriterQueue(Queue.Queue):
+        def __init__(self, copies):
+            Queue.Queue.__init__(self) # Old-style superclass
+            self.wanted_copies = copies
+            self.successful_copies = 0
+            self.response = None
+            self.successful_copies_lock = threading.Lock()
+            self.pending_tries = copies
+            self.pending_tries_notification = threading.Condition()
+        
+        def write_success(self, response, replicas_nr):
+            with self.successful_copies_lock:
+                self.successful_copies += replicas_nr
+                self.response = response
+        
+        def write_fail(self, ks, status_code):
+            with self.pending_tries_notification:
+                self.pending_tries += 1
+                self.pending_tries_notification.notify()
+        
+        def pending_copies(self):
+            with self.successful_copies_lock:
+                return self.wanted_copies - self.successful_copies
+    
+    
+    class KeepWriterThreadPool(object):
+        def __init__(self, data, data_hash, copies, max_service_replicas, timeout=None):
+            self.total_task_nr = 0
+            self.wanted_copies = copies
+            if (not max_service_replicas) or (max_service_replicas >= copies):
+                num_threads = 1
+            else:
+                num_threads = int(math.ceil(float(copies) / max_service_replicas))
+            _logger.debug("Pool max threads is %d", num_threads)
+            self.workers = []
+            self.queue = KeepClient.KeepWriterQueue(copies)
+            # Create workers
+            for _ in range(num_threads):
+                w = KeepClient.KeepWriterThread(self.queue, data, data_hash, timeout)
+                self.workers.append(w)
+        
+        def add_task(self, ks, service_root):
+            self.queue.put((ks, service_root))
+            self.total_task_nr += 1
+        
+        def done(self):
+            return self.queue.successful_copies
+        
+        def join(self):
+            # Start workers
+            for worker in self.workers:
+                worker.start()
+            # Wait for finished work
+            self.queue.join()
+            with self.queue.pending_tries_notification:
+                self.queue.pending_tries_notification.notify_all()
+            for worker in self.workers:
+                worker.join()
+        
+        def response(self):
+            return self.queue.response
+    
+    
     class KeepWriterThread(threading.Thread):
-        """
-        Write a blob of data to the given Keep server. On success, call
-        save_response() of the given ThreadLimiter to save the returned
-        locator.
-        """
-        def __init__(self, keep_service, **kwargs):
+        def __init__(self, queue, data, data_hash, timeout=None):
             super(KeepClient.KeepWriterThread, self).__init__()
-            self.service = keep_service
-            self.args = kwargs
-            self._success = False
-
-        def success(self):
-            return self._success
-
+            self.timeout = timeout
+            self.queue = queue
+            self.data = data
+            self.data_hash = data_hash
+        
         def run(self):
-            limiter = self.args['thread_limiter']
-            sequence = self.args['thread_sequence']
-            if sequence is not None:
-                limiter.set_sequence(sequence)
-            with limiter:
-                if not limiter.shall_i_proceed():
-                    # My turn arrived, but the job has been done without
-                    # me.
-                    return
-                self.run_with_limiter(limiter)
-
-        def run_with_limiter(self, limiter):
-            if self.service.finished():
-                return
-            _logger.debug("KeepWriterThread %s proceeding %s+%i %s",
-                          str(threading.current_thread()),
-                          self.args['data_hash'],
-                          len(self.args['data']),
-                          self.args['service_root'])
-            self._success = bool(self.service.put(
-                self.args['data_hash'],
-                self.args['data'],
-                timeout=self.args.get('timeout', None)))
-            result = self.service.last_result()
-            if self._success:
-                _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
-                              str(threading.current_thread()),
-                              self.args['data_hash'],
-                              len(self.args['data']),
-                              self.args['service_root'])
-                # Tick the 'done' counter for the number of replica
-                # reported stored by the server, for the case that
-                # we're talking to a proxy or other backend that
-                # stores to multiple copies for us.
-                try:
-                    replicas_stored = int(result['headers']['x-keep-replicas-stored'])
-                except (KeyError, ValueError):
-                    replicas_stored = 1
-                limiter.save_response(result['body'].strip(), replicas_stored)
-            elif result.get('status_code', None):
-                _logger.debug("Request fail: PUT %s => %s %s",
-                              self.args['data_hash'],
-                              result['status_code'],
-                              result['body'])
-            if not self._success:
-                # Notify the failure so that the Thread limiter allows
-                # a new one to run.
-                limiter.save_response(None, 0)
+            while not self.queue.empty():
+                if self.queue.pending_copies() > 0:
+                    # Avoid overreplication, wait for some needed re-attempt
+                    with self.queue.pending_tries_notification:
+                        if self.queue.pending_tries <= 0:
+                            self.queue.pending_tries_notification.wait()
+                            continue # try again when awake
+                        self.queue.pending_tries -= 1
+
+                    # Get to work
+                    try:
+                        service, service_root = self.queue.get_nowait()
+                    except Queue.Empty:
+                        continue
+                    if service.finished():
+                        self.queue.task_done()
+                        continue
+                    success = bool(service.put(self.data_hash,
+                                                self.data,
+                                                timeout=self.timeout))
+                    result = service.last_result()
+                    if success:
+                        _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
+                                      str(threading.current_thread()),
+                                      self.data_hash,
+                                      len(self.data),
+                                      service_root)
+                        try:
+                            replicas_stored = int(result['headers']['x-keep-replicas-stored'])
+                        except (KeyError, ValueError):
+                            replicas_stored = 1
+                        
+                        self.queue.write_success(result['body'].strip(), replicas_stored)
+                    else:
+                        if result.get('status_code', None):
+                            _logger.debug("Request fail: PUT %s => %s %s",
+                                          self.data_hash,
+                                          result['status_code'],
+                                          result['body'])
+                        self.queue.write_fail(service, result.get('status_code', None)) # Schedule a re-attempt with next service
+                    # Mark as done so the queue can be join()ed
+                    self.queue.task_done()
+                else:
+                    # Remove the task from the queue anyways
+                    try:
+                        self.queue.get_nowait()
+                        # Mark as done so the queue can be join()ed
+                        self.queue.task_done()
+                    except Queue.Empty:
+                        continue
 
 
     def __init__(self, api_client=None, proxy=None,
@@ -647,8 +636,9 @@ class KeepClient(object):
         :proxy:
           If specified, this KeepClient will send requests to this Keep
           proxy.  Otherwise, KeepClient will fall back to the setting of the
-          ARVADOS_KEEP_PROXY configuration setting.  If you want to ensure
-          KeepClient does not use a proxy, pass in an empty string.
+          ARVADOS_KEEP_SERVICES or ARVADOS_KEEP_PROXY configuration settings.
+          If you want to KeepClient does not use a proxy, pass in an empty
+          string.
 
         :timeout:
           The initial timeout (in seconds) for HTTP requests to Keep
@@ -691,7 +681,10 @@ class KeepClient(object):
         """
         self.lock = threading.Lock()
         if proxy is None:
-            proxy = config.get('ARVADOS_KEEP_PROXY')
+            if config.get('ARVADOS_KEEP_SERVICES'):
+                proxy = config.get('ARVADOS_KEEP_SERVICES')
+            else:
+                proxy = config.get('ARVADOS_KEEP_PROXY')
         if api_token is None:
             if api_client is None:
                 api_token = config.get('ARVADOS_API_TOKEN')
@@ -722,15 +715,21 @@ class KeepClient(object):
             self.num_retries = num_retries
             self.max_replicas_per_service = None
             if proxy:
-                if not proxy.endswith('/'):
-                    proxy += '/'
+                proxy_uris = proxy.split()
+                for i in range(len(proxy_uris)):
+                    if not proxy_uris[i].endswith('/'):
+                        proxy_uris[i] += '/'
+                    # URL validation
+                    url = urlparse.urlparse(proxy_uris[i])
+                    if not (url.scheme and url.netloc):
+                        raise arvados.errors.ArgumentError("Invalid proxy URI: {}".format(proxy_uris[i]))
                 self.api_token = api_token
                 self._gateway_services = {}
                 self._keep_services = [{
-                    'uuid': 'proxy',
+                    'uuid': "00000-bi6l4-%015d" % idx,
                     'service_type': 'proxy',
-                    '_service_root': proxy,
-                    }]
+                    '_service_root': uri,
+                    } for idx, uri in enumerate(proxy_uris)]
                 self._writable_services = self._keep_services
                 self.using_proxy = True
                 self._static_services_list = True
@@ -1068,30 +1067,22 @@ class KeepClient(object):
                 loop.save_result(error)
                 continue
 
-            thread_limiter = KeepClient.ThreadLimiter(
-                copies - done, self.max_replicas_per_service)
-            threads = []
+            writer_pool = KeepClient.KeepWriterThreadPool(data=data, 
+                                                        data_hash=data_hash,
+                                                        copies=copies - done,
+                                                        max_service_replicas=self.max_replicas_per_service,
+                                                        timeout=self.current_timeout(num_retries - tries_left))
             for service_root, ks in [(root, roots_map[root])
                                      for root in sorted_roots]:
                 if ks.finished():
                     continue
-                t = KeepClient.KeepWriterThread(
-                    ks,
-                    data=data,
-                    data_hash=data_hash,
-                    service_root=service_root,
-                    thread_limiter=thread_limiter,
-                    timeout=self.current_timeout(num_retries-tries_left),
-                    thread_sequence=len(threads))
-                t.start()
-                threads.append(t)
-            for t in threads:
-                t.join()
-            done += thread_limiter.done()
-            loop.save_result((done >= copies, len(threads)))
+                writer_pool.add_task(ks, service_root)
+            writer_pool.join()
+            done += writer_pool.done()
+            loop.save_result((done >= copies, writer_pool.total_task_nr))
 
         if loop.success():
-            return thread_limiter.response()
+            return writer_pool.response()
         if not roots_map:
             raise arvados.errors.KeepWriteError(
                 "failed to write {}: no Keep services available ({})".format(
@@ -1102,7 +1093,7 @@ class KeepClient(object):
                               if roots_map[key].last_result()['error'])
             raise arvados.errors.KeepWriteError(
                 "failed to write {} (wanted {} copies but wrote {})".format(
-                    data_hash, copies, thread_limiter.done()), service_errors, label="service")
+                    data_hash, copies, writer_pool.done()), service_errors, label="service")
 
     def local_store_put(self, data, copies=1, num_retries=None):
         """A stub for put().
index 155bcedc62c4dbcbfb03f4be8488bd5993f4efbc..e72f67dce49049f37c9b2e68794eb62cc780297c 100644 (file)
@@ -4,6 +4,7 @@ from __future__ import print_function
 import argparse
 import atexit
 import errno
+import glob
 import httplib2
 import os
 import pipes
@@ -12,8 +13,8 @@ import re
 import shutil
 import signal
 import socket
-import subprocess
 import string
+import subprocess
 import sys
 import tempfile
 import time
@@ -192,7 +193,7 @@ def _fifo2stderr(label):
             raise
     os.mkfifo(fifo, 0700)
     subprocess.Popen(
-        ['sed', '-e', 's/^/['+label+'] /', fifo],
+        ['stdbuf', '-i0', '-oL', '-eL', 'sed', '-e', 's/^/['+label+'] /', fifo],
         stdout=sys.stderr)
     return fifo
 
@@ -213,8 +214,14 @@ def run(leave_running_atexit=False):
     """
     global my_api_host
 
-    # Delete cached discovery document.
-    shutil.rmtree(arvados.http_cache('discovery'))
+    # Delete cached discovery documents.
+    #
+    # This will clear cached docs that belong to other processes (like
+    # concurrent test suites) even if they're still running. They should
+    # be able to tolerate that.
+    for fn in glob.glob(os.path.join(arvados.http_cache('discovery'),
+                                     '*,arvados,v1,rest,*')):
+        os.unlink(fn)
 
     pid_file = _pidfile('api')
     pid_file_ok = find_server_pid(pid_file, 0)
@@ -469,7 +476,7 @@ def run_keep_proxy():
         'service_type': 'proxy',
         'service_ssl_flag': False,
     }}).execute()
-    os.environ["ARVADOS_KEEP_PROXY"] = "http://localhost:{}".format(port)
+    os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(port)
     _setport('keepproxy', port)
     _wait_until_port_listens(port)
 
@@ -657,7 +664,7 @@ class TestCaseWithServers(unittest.TestCase):
         cls._orig_environ = os.environ.copy()
         cls._orig_config = arvados.config.settings().copy()
         cls._cleanup_funcs = []
-        os.environ.pop('ARVADOS_KEEP_PROXY', None)
+        os.environ.pop('ARVADOS_KEEP_SERVICES', None)
         os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
         for server_kwargs, start_func, stop_func in (
                 (cls.MAIN_SERVER, run, reset),
index 977f05ed8e245100eedda09d9229840e47c4f470..908539b8cae010f1cf0f23046bdcaf1f15f136b0 100644 (file)
@@ -240,8 +240,8 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
         super(KeepProxyTestCase, self).tearDown()
 
     def test_KeepProxyTest1(self):
-        # Will use ARVADOS_KEEP_PROXY environment variable that is set by
-        # setUpClass().
+        # Will use ARVADOS_KEEP_SERVICES environment variable that
+        # is set by setUpClass().
         keep_client = arvados.KeepClient(api_client=self.api_client,
                                          local_store='')
         baz_locator = keep_client.put('baz')
@@ -270,6 +270,22 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
                          'wrong content from Keep.get(md5("baz2"))')
         self.assertTrue(keep_client.using_proxy)
 
+    def test_KeepProxyTestMultipleURIs(self):
+        # Test using ARVADOS_KEEP_SERVICES env var overriding any
+        # existing proxy setting and setting multiple proxies
+        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'http://10.0.0.1 https://foo.example.org:1234/'
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         local_store='')
+        uris = [x['_service_root'] for x in keep_client._keep_services]
+        self.assertEqual(uris, ['http://10.0.0.1/',
+                                'https://foo.example.org:1234/'])
+
+    def test_KeepProxyTestInvalidURI(self):
+        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'bad.uri.org'
+        with self.assertRaises(arvados.errors.ArgumentError):
+            keep_client = arvados.KeepClient(api_client=self.api_client,
+                                             local_store='')
+
 
 class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
     def get_service_roots(self, api_client):
@@ -793,10 +809,10 @@ class KeepClientTimeout(unittest.TestCase, tutil.ApiClientMock):
         # Allow 10s to connect, then 1s for response. Nothing should
         # work, and everything should take at least 1s to return.
         kc = self.keepClient(timeouts=(10, 1))
-        with self.assertTakesBetween(1, 1.9):
+        with self.assertTakesBetween(1, 9):
             with self.assertRaises(arvados.errors.KeepReadError):
                 kc.get(loc, num_retries=0)
-        with self.assertTakesBetween(1, 1.9):
+        with self.assertTakesBetween(1, 9):
             with self.assertRaises(arvados.errors.KeepWriteError):
                 kc.put(self.DATA, copies=1, num_retries=0)
 
@@ -1066,65 +1082,58 @@ class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase):
 
 
 class KeepClientAvoidClientOverreplicationTestCase(unittest.TestCase, tutil.ApiClientMock):
-
-
-    class KeepFakeWriterThread(threading.Thread):
-        """
-        Just Simulating the real KeepClient.KeepWriterThread, to test the ThreadLimiter.
-        """
-        def __init__(self, delay, will_succeed, thread_limiter):
-            super(KeepClientAvoidClientOverreplicationTestCase.KeepFakeWriterThread, self).__init__()
-            self.delay = delay # in seconds
+    
+    
+    class FakeKeepService(object):
+        def __init__(self, delay, will_succeed, replicas=1):
+            self.delay = delay
             self.success = will_succeed
-            self.limiter = thread_limiter
-
-        def run(self):
-            with self.limiter:
-                if not self.limiter.shall_i_proceed():
-                    return
-                time.sleep(self.delay)
-                if self.success:
-                    self.limiter.save_response('foo', 1)
-                else:
-                    self.limiter.save_response(None, 0)
-
+            self._result = {}
+            self._result['headers'] = {}
+            self._result['headers']['x-keep-replicas-stored'] = str(replicas)
+            self._result['body'] = 'foobar'
+        
+        def put(self, data_hash, data, timeout):
+            time.sleep(self.delay)
+            return self.success
+        
+        def last_result(self):
+            return self._result
+        
+        def finished(self):
+            return False
+    
+    
     def test_only_write_enough_on_success(self):
         copies = 3
-        threads = []
-        limiter = arvados.KeepClient.ThreadLimiter(want_copies=copies, max_service_replicas=1)
-        # Setting up fake writer threads with different delays so that the bug is revealed
-        for i in range(copies*2):
-            t = self.KeepFakeWriterThread(
-                    delay=i/10.0,
-                    will_succeed=True,
-                    thread_limiter=limiter)
-            t.start()
-            threads.append(t)
-        for t in threads:
-            t.join()
-        self.assertEqual(limiter.done(), copies)
-
-    def test_only_write_enough_on_partial_failure(self):
-        copies = 3
-        threads = []
-        limiter = arvados.KeepClient.ThreadLimiter(want_copies=copies, max_service_replicas=1)
-        for i in range(copies):
-            t = self.KeepFakeWriterThread(
-                    delay=i/10.0,
-                    will_succeed=False,
-                    thread_limiter=limiter)
-            t.start()
-            threads.append(t)
-            t = self.KeepFakeWriterThread(
-                    delay=i/10.0,
-                    will_succeed=True,
-                    thread_limiter=limiter)
-            t.start()
-            threads.append(t)
-        for t in threads:
-            t.join()
-        self.assertEqual(limiter.done(), copies)
+        pool = arvados.KeepClient.KeepWriterThreadPool(
+            data = 'foo',
+            data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+            max_service_replicas = copies,
+            copies = copies
+        )
+        for i in range(10):
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            pool.add_task(ks, None)
+        pool.join()
+        self.assertEqual(pool.done(), copies)
 
+    def test_only_write_enough_on_partial_success(self):
+        copies = 3
+        pool = arvados.KeepClient.KeepWriterThreadPool(
+            data = 'foo',
+            data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+            max_service_replicas = copies,
+            copies = copies
+        )
+        for i in range(5):
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)
+            pool.add_task(ks, None)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            pool.add_task(ks, None)
+        pool.join()
+        self.assertEqual(pool.done(), copies)
+    
 
 @tutil.skip_sleep
 class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock):
index 1e25467e7485ed05337e2b212f4cf4fc4bc44d1e..5134fc4ce82d68bf4b00da91a4bedaa10e7f7e1e 100644 (file)
@@ -80,3 +80,6 @@ gem 'pg_power'
 
 gem 'puma'
 gem 'sshkey'
+gem 'safe_yaml'
+gem 'lograge'
+gem 'logstash-event'
index 3715718717b7c85495717739bce5b2d8e58261d7..1fb4369c0eca56815d189b4a9c5bf7f87ab9e4ea 100644 (file)
@@ -112,6 +112,11 @@ GEM
     launchy (2.4.3)
       addressable (~> 2.3)
     libv8 (3.16.14.3)
+    lograge (0.3.6)
+      actionpack (>= 3)
+      activesupport (>= 3)
+      railties (>= 3)
+    logstash-event (1.2.02)
     mail (2.5.4)
       mime-types (~> 1.16)
       treetop (~> 1.4.8)
@@ -182,6 +187,7 @@ GEM
     ruby-prof (0.15.2)
     rvm-capistrano (1.5.1)
       capistrano (~> 2.15.4)
+    safe_yaml (1.0.4)
     sass (3.3.4)
     sass-rails (3.2.6)
       railties (~> 3.2.0)
@@ -236,6 +242,8 @@ DEPENDENCIES
   factory_girl_rails
   faye-websocket
   jquery-rails
+  lograge
+  logstash-event
   mocha
   multi_json
   oj
@@ -248,6 +256,7 @@ DEPENDENCIES
   rails (~> 3.2.0)
   ruby-prof
   rvm-capistrano
+  safe_yaml
   sass-rails (>= 3.2.0)
   simplecov (~> 0.7.1)
   simplecov-rcov
index 3a888184f8a32dd37734228a7f8dacd51c3105f2..3c5bf94d2c4b06f8d1a1e301971cdf39673d8a44 100644 (file)
@@ -14,13 +14,11 @@ class ActsAsApi::ApiTemplate
 end
 
 require 'load_param'
-require 'record_filters'
 
 class ApplicationController < ActionController::Base
   include CurrentApiClient
   include ThemesForRails::ActionController
   include LoadParam
-  include RecordFilters
 
   respond_to :json
   protect_from_forgery
@@ -207,11 +205,7 @@ class ApplicationController < ActionController::Base
 
   def apply_filters model_class=nil
     model_class ||= self.model_class
-    ft = record_filters @filters, model_class
-    if ft[:cond_out].any?
-      @objects = @objects.where('(' + ft[:cond_out].join(') AND (') + ')',
-                                *ft[:param_out])
-    end
+    @objects = model_class.apply_filters(@objects, @filters)
   end
 
   def apply_where_limit_order_params model_class=nil
index fe4696e300738141e696ec968620f4ecd2c2dd73..6e2848ceb53f34165379e3f1afa539d67dbb5651 100644 (file)
@@ -3,4 +3,5 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
   accept_attribute_as_json :mounts, Hash
   accept_attribute_as_json :runtime_constraints, Hash
   accept_attribute_as_json :command, Array
+  accept_attribute_as_json :filters, Array
 end
index 21ee7efa53b5d4008c0f42717095194b9b0c39c6..fb748e9350d01273fbd09f6709a733815b745d78 100644 (file)
@@ -19,4 +19,14 @@ class Arvados::V1::ContainersController < ApplicationController
       super
     end
   end
+
+  def lock
+    @object.lock
+    show
+  end
+
+  def unlock
+    @object.unlock
+    show
+  end
 end
index eae6dca8c0332ae820fbedbb3965f3112453dfb9..7a5713a03c59651a3bc0050674ea38171d8f9c34 100644 (file)
@@ -61,10 +61,21 @@ class Arvados::V1::GroupsController < ApplicationController
     request_orders = @orders.clone
     @orders = []
 
-    [Group,
-     Job, PipelineInstance, PipelineTemplate,
+    request_filters = @filters
+
+    klasses = [Group,
+     Job, PipelineInstance, PipelineTemplate, ContainerRequest, Workflow,
      Collection,
-     Human, Specimen, Trait].each do |klass|
+     Human, Specimen, Trait]
+
+    table_names = klasses.map(&:table_name)
+    request_filters.each do |col, op, val|
+      if col.index('.') && !table_names.include?(col.split('.', 2)[0])
+        raise ArgumentError.new("Invalid attribute '#{col}' in filter")
+      end
+    end
+
+    klasses.each do |klass|
       # If the currently requested orders specifically match the
       # table_name for the current klass, apply that order.
       # Otherwise, order by recency.
@@ -81,6 +92,16 @@ class Arvados::V1::GroupsController < ApplicationController
         where_conds[:group_class] = "project"
       end
 
+      @filters = request_filters.map do |col, op, val|
+        if !col.index('.')
+          [col, op, val]
+        elsif (col = col.split('.', 2))[0] == klass.table_name
+          [col[1], op, val]
+        else
+          nil
+        end
+      end.compact
+
       @objects = klass.readable_by(*@read_users).
         order(request_order).where(where_conds)
       @limit = limit_all - all_objects.count
index 67963388639f9fb352ca42b0abff2a05e86d140c..243f38b78cd74740e57a64f1cefe070eb0be6686 100644 (file)
@@ -28,83 +28,20 @@ class Arvados::V1::JobsController < ApplicationController
       params[:find_or_create] = !resource_attrs.delete(:no_reuse)
     end
 
-    if params[:find_or_create]
-      return if false.equal?(load_filters_param)
-      if @filters.empty?  # Translate older creation parameters into filters.
-        @filters =
-          [["repository", "=", resource_attrs[:repository]],
-           ["script", "=", resource_attrs[:script]],
-           ["script_version", "not in git", params[:exclude_script_versions]],
-          ].reject { |filter| filter.last.nil? or filter.last.empty? }
-        if !params[:minimum_script_version].blank?
-          @filters << ["script_version", "in git",
-                       params[:minimum_script_version]]
-        else
-          add_default_git_filter("script_version", resource_attrs[:repository],
-                                 resource_attrs[:script_version])
-        end
-        if image_search = resource_attrs[:runtime_constraints].andand["docker_image"]
-          if image_tag = resource_attrs[:runtime_constraints]["docker_image_tag"]
-            image_search += ":#{image_tag}"
-          end
-          image_locator = Collection.
-            for_latest_docker_image(image_search).andand.portable_data_hash
-        else
-          image_locator = nil
-        end
-        @filters << ["docker_image_locator", "=", image_locator]
-        if sdk_version = resource_attrs[:runtime_constraints].andand["arvados_sdk_version"]
-          add_default_git_filter("arvados_sdk_version", "arvados", sdk_version)
-        end
-        begin
-          load_job_specific_filters
-        rescue ArgumentError => error
-          return send_error(error.message)
-        end
-      end
+    return super if !params[:find_or_create]
+    return if !load_filters_param
 
-      # Check specified filters for some reasonableness.
-      filter_names = @filters.map { |f| f.first }.uniq
-      ["repository", "script"].each do |req_filter|
-        if not filter_names.include?(req_filter)
-          return send_error("#{req_filter} filter required")
-        end
-      end
-
-      # Search for a reusable Job, and return it if found.
-      @objects = Job.readable_by(current_user)
-      apply_filters
-      @object = nil
-      incomplete_job = nil
-      @objects.each do |j|
-        if j.nondeterministic != true and
-            ["Queued", "Running", "Complete"].include?(j.state) and
-            j.script_parameters == resource_attrs[:script_parameters]
-          if j.state != "Complete" && j.owner_uuid == current_user.uuid
-            # We'll use this if we don't find a job that has completed
-            incomplete_job ||= j
-          else
-            if Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
-              # Record the first job in the list
-              if !@object
-                @object = j
-              end
-              # Ensure that all candidate jobs actually did produce the same output
-              if @object.output != j.output
-                @object = nil
-                break
-              end
-            end
-          end
-        end
-        @object ||= incomplete_job
-        if @object
-          return show
-        end
-      end
+    begin
+      @object = Job.find_reusable(resource_attrs, params, @filters, @read_users)
+    rescue ArgumentError => error
+      return send_error(error.message)
     end
 
-    super
+    if @object
+      show
+    else
+      super
+    end
   end
 
   def cancel
@@ -171,7 +108,7 @@ class Arvados::V1::JobsController < ApplicationController
     load_limit_offset_order_params
     load_where_param
     @where.merge!({state: Job::Queued})
-    return if false.equal?(load_filters_param)
+    return if !load_filters_param
     find_objects_for_index
     index
   end
@@ -207,92 +144,16 @@ class Arvados::V1::JobsController < ApplicationController
 
   protected
 
-  def add_default_git_filter(attr_name, repo_name, refspec)
-    # Add a filter to @filters for `attr_name` = the latest commit available
-    # in `repo_name` at `refspec`.  No filter is added if refspec can't be
-    # resolved.
-    commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
-    if commit_hash = commits.first
-      @filters << [attr_name, "=", commit_hash]
-    end
-  end
-
-  def load_job_specific_filters
-    # Convert Job-specific @filters entries into general SQL filters.
-    script_info = {"repository" => nil, "script" => nil}
-    git_filters = Hash.new do |hash, key|
-      hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
-    end
-    @filters.select! do |(attr, operator, operand)|
-      if (script_info.has_key? attr) and (operator == "=")
-        if script_info[attr].nil?
-          script_info[attr] = operand
-        elsif script_info[attr] != operand
-          raise ArgumentError.new("incompatible #{attr} filters")
-        end
-      end
-      case operator
-      when "in git"
-        git_filters[attr]["min_version"] = operand
-        false
-      when "not in git"
-        git_filters[attr]["exclude_versions"] += Array.wrap(operand)
-        false
-      when "in docker", "not in docker"
-        image_hashes = Array.wrap(operand).flat_map do |search_term|
-          image_search, image_tag = search_term.split(':', 2)
-          Collection.
-            find_all_for_docker_image(image_search, image_tag, @read_users).
-            map(&:portable_data_hash)
-        end
-        @filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
-        false
-      else
-        true
-      end
-    end
-
-    # Build a real script_version filter from any "not? in git" filters.
-    git_filters.each_pair do |attr, filter|
-      case attr
-      when "script_version"
-        script_info.each_pair do |key, value|
-          if value.nil?
-            raise ArgumentError.new("script_version filter needs #{key} filter")
-          end
-        end
-        filter["repository"] = script_info["repository"]
-        begin
-          filter["max_version"] = resource_attrs[:script_version]
-        rescue
-          # Using HEAD, set earlier by the hash default, is fine.
-        end
-      when "arvados_sdk_version"
-        filter["repository"] = "arvados"
-      else
-        raise ArgumentError.new("unknown attribute for git filter: #{attr}")
-      end
-      revisions = Commit.find_commit_range(filter["repository"],
-                                           filter["min_version"],
-                                           filter["max_version"],
-                                           filter["exclude_versions"])
-      if revisions.empty?
-        raise ArgumentError.
-          new("error searching #{filter['repository']} from " +
-              "'#{filter['min_version']}' to '#{filter['max_version']}', " +
-              "excluding #{filter['exclude_versions']}")
-      end
-      @filters.append([attr, "in", revisions])
-    end
-  end
-
   def load_filters_param
     begin
       super
-      load_job_specific_filters
+      attrs = resource_attrs rescue {}
+      @filters = Job.load_job_specific_filters attrs, @filters, @read_users
     rescue ArgumentError => error
       send_error(error.message)
       false
+    else
+      true
     end
   end
 end
diff --git a/services/api/app/controllers/arvados/v1/workflows_controller.rb b/services/api/app/controllers/arvados/v1/workflows_controller.rb
new file mode 100644 (file)
index 0000000..5177d0a
--- /dev/null
@@ -0,0 +1,2 @@
+class Arvados::V1::WorkflowsController < ApplicationController
+end
index 499a61b7d3e93116b50f3e96beffbe846466c676..f7985a986afa304806cac1a551c1603615e92196 100644 (file)
@@ -65,9 +65,7 @@ class ApiClientAuthorization < ArvadosModel
   end
 
   def logged_attributes
-    attrs = attributes.dup
-    attrs.delete('api_token')
-    attrs
+    super.except 'api_token'
   end
 
   def self.default_orders
index 6cd40a44585c6805278dd9d421c8495d5d66c1c7..672374bc6c768f4f7bd8be0ba81daf2fbefa1629 100644 (file)
@@ -1,10 +1,12 @@
 require 'has_uuid'
+require 'record_filters'
 
 class ArvadosModel < ActiveRecord::Base
   self.abstract_class = true
 
   include CurrentApiClient      # current_user, current_api_client, etc.
   include DbCurrentTime
+  extend RecordFilters
 
   attr_protected :created_at
   attr_protected :modified_by_user_uuid
@@ -40,7 +42,13 @@ class ArvadosModel < ActiveRecord::Base
 
   class AlreadyLockedError < StandardError
     def http_status
-      403
+      422
+    end
+  end
+
+  class InvalidStateTransitionError < StandardError
+    def http_status
+      422
     end
   end
 
@@ -50,6 +58,12 @@ class ArvadosModel < ActiveRecord::Base
     end
   end
 
+  class UnresolvableContainerError < StandardError
+    def http_status
+      422
+    end
+  end
+
   def self.kind_class(kind)
     kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
   end
@@ -104,10 +118,27 @@ class ArvadosModel < ActiveRecord::Base
     api_column_map
   end
 
+  def self.ignored_select_attributes
+    ["href", "kind", "etag"]
+  end
+
   def self.columns_for_attributes(select_attributes)
+    if select_attributes.empty?
+      raise ArgumentError.new("Attribute selection list cannot be empty")
+    end
+    api_column_map = attributes_required_columns
+    invalid_attrs = []
+    select_attributes.each do |s|
+      next if ignored_select_attributes.include? s
+      if not s.is_a? String or not api_column_map.include? s
+        invalid_attrs << s
+      end
+    end
+    if not invalid_attrs.empty?
+      raise ArgumentError.new("Invalid attribute(s): #{invalid_attrs.inspect}")
+    end
     # Given an array of attribute names to select, return an array of column
     # names that must be fetched from the database to satisfy the request.
-    api_column_map = attributes_required_columns
     select_attributes.flat_map { |attr| api_column_map[attr] }.uniq
   end
 
@@ -168,65 +199,47 @@ class ArvadosModel < ActiveRecord::Base
       return self
     end
 
-    # Collect the uuids for each user and any groups readable by each user.
+    # Collect the UUIDs of the authorized users.
     user_uuids = users_list.map { |u| u.uuid }
-    uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
-    sql_conds = []
-    sql_params = []
-    sql_table = kwargs.fetch(:table_name, table_name)
-    or_object_uuid = ''
 
-    # This row is owned by a member of users_list, or owned by a group
-    # readable by a member of users_list
-    # or
-    # This row uuid is the uuid of a member of users_list
-    # or
-    # A permission link exists ('write' and 'manage' implicitly include
-    # 'read') from a member of users_list, or a group readable by users_list,
-    # to this row, or to the owner of this row (see join() below).
-    sql_conds += ["#{sql_table}.uuid in (?)"]
-    sql_params += [user_uuids]
+    # Collect the UUIDs of all groups readable by any of the
+    # authorized users. If one of these (or the UUID of one of the
+    # authorized users themselves) is an object's owner_uuid, that
+    # object is readable.
+    owner_uuids = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
+    owner_uuids.uniq!
 
-    if uuid_list.any?
-      sql_conds += ["#{sql_table}.owner_uuid in (?)"]
-      sql_params += [uuid_list]
+    sql_conds = []
+    sql_table = kwargs.fetch(:table_name, table_name)
 
-      sanitized_uuid_list = uuid_list.
-        collect { |uuid| sanitize(uuid) }.join(', ')
-      permitted_uuids = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (#{sanitized_uuid_list}))"
-      sql_conds += ["#{sql_table}.uuid IN #{permitted_uuids}"]
-    end
+    # Match any object (evidently a group or user) whose UUID is
+    # listed explicitly in owner_uuids.
+    sql_conds += ["#{sql_table}.uuid in (:owner_uuids)"]
 
-    if sql_table == "links" and users_list.any?
-      # This row is a 'permission' or 'resources' link class
-      # The uuid for a member of users_list is referenced in either the head
-      # or tail of the link
-      sql_conds += ["(#{sql_table}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{sql_table}.head_uuid IN (?) OR #{sql_table}.tail_uuid IN (?)))"]
-      sql_params += [user_uuids, user_uuids]
-    end
+    # Match any object whose owner is listed explicitly in
+    # owner_uuids.
+    sql_conds += ["#{sql_table}.owner_uuid IN (:owner_uuids)"]
 
-    if sql_table == "logs" and users_list.any?
-      # Link head points to the object described by this row
-      sql_conds += ["#{sql_table}.object_uuid IN #{permitted_uuids}"]
+    # Match the head of any permission link whose tail is listed
+    # explicitly in owner_uuids.
+    sql_conds += ["#{sql_table}.uuid IN (SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (:owner_uuids))"]
 
-      # This object described by this row is owned by this user, or owned by a group readable by this user
-      sql_conds += ["#{sql_table}.object_owner_uuid in (?)"]
-      sql_params += [uuid_list]
+    if sql_table == "links"
+      # Match any permission link that gives one of the authorized
+      # users some permission _or_ gives anyone else permission to
+      # view one of the authorized users.
+      sql_conds += ["(#{sql_table}.link_class in (:permission_link_classes) AND "+
+                    "(#{sql_table}.head_uuid IN (:user_uuids) OR #{sql_table}.tail_uuid IN (:user_uuids)))"]
     end
 
-    # Link head points to this row, or to the owner of this row (the
-    # thing to be read)
-    #
-    # Link tail originates from this user, or a group that is readable
-    # by this user (the identity with authorization to read)
-    #
-    # Link class is 'permission' ('write' and 'manage' implicitly
-    # include 'read')
-    where(sql_conds.join(' OR '), *sql_params)
+    where(sql_conds.join(' OR '),
+          owner_uuids: owner_uuids,
+          user_uuids: user_uuids,
+          permission_link_classes: ['permission', 'resources'])
   end
 
   def logged_attributes
-    attributes
+    attributes.except *Rails.configuration.unlogged_attributes
   end
 
   def self.full_text_searchable_columns
@@ -247,8 +260,29 @@ class ArvadosModel < ActiveRecord::Base
     "to_tsvector('english', ' ' || #{parts.join(" || ' ' || ")})"
   end
 
+  def self.apply_filters query, filters
+    ft = record_filters filters, self
+    if not ft[:cond_out].any?
+      return query
+    end
+    query.where('(' + ft[:cond_out].join(') AND (') + ')',
+                          *ft[:param_out])
+  end
+
   protected
 
+  def self.deep_sort_hash(x)
+    if x.is_a? Hash
+      x.sort.collect do |k, v|
+        [k, deep_sort_hash(v)]
+      end.to_h
+    elsif x.is_a? Array
+      x.collect { |v| deep_sort_hash(v) }
+    else
+      x
+    end
+  end
+
   def ensure_ownership_path_leads_to_user
     if new_record? or owner_uuid_changed?
       uuid_in_path = {owner_uuid => true, uuid => true}
@@ -391,15 +425,16 @@ class ArvadosModel < ActiveRecord::Base
       x.each do |k,v|
         return true if has_symbols?(k) or has_symbols?(v)
       end
-      false
     elsif x.is_a? Array
       x.each do |k|
         return true if has_symbols?(k)
       end
-      false
-    else
-      (x.class == Symbol)
+    elsif x.is_a? Symbol
+      return true
+    elsif x.is_a? String
+      return true if x.start_with?(':') && !x.start_with?('::')
     end
+    false
   end
 
   def self.recursive_stringify x
@@ -413,6 +448,8 @@ class ArvadosModel < ActiveRecord::Base
       end
     elsif x.is_a? Symbol
       x.to_s
+    elsif x.is_a? String and x.start_with?(':') and !x.start_with?('::')
+      x[1..-1]
     else
       x
     end
@@ -493,7 +530,7 @@ class ArvadosModel < ActiveRecord::Base
   end
 
   def self.uuid_like_pattern
-    "_____-#{uuid_prefix}-_______________"
+    "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
   end
 
   def self.uuid_regex
index 4a612924b617f9b9b1dc5c4d7507f8113fdb5553..4a054413ce21e0076e79100934f20d248c49198d 100644 (file)
@@ -46,6 +46,10 @@ class Collection < ArvadosModel
                 )
   end
 
+  def self.ignored_select_attributes
+    super + ["updated_at", "file_names"]
+  end
+
   FILE_TOKEN = /^[[:digit:]]+:[[:digit:]]+:/
   def check_signatures
     return false if self.manifest_text.nil?
index 4c770083786934abdafe9461f51ee03646396415..a60ea427b78649ae15977b69bea99060e9c465c9 100644 (file)
@@ -5,6 +5,7 @@ class Container < ArvadosModel
   include KindAndEtag
   include CommonApiTemplate
   include WhitelistUpdate
+  extend CurrentApiClient
 
   serialize :environment, Hash
   serialize :mounts, Hash
@@ -18,6 +19,7 @@ class Container < ArvadosModel
   validate :validate_change
   validate :validate_lock
   after_validation :assign_auth
+  before_save :sort_serialized_attrs
   after_save :handle_completed
 
   has_many :container_requests, :foreign_key => :container_uuid, :class_name => 'ContainerRequest', :primary_key => :uuid
@@ -76,6 +78,94 @@ class Container < ArvadosModel
     end
   end
 
+  def self.find_reusable(attrs)
+    candidates = Container.
+      where('command = ?', attrs[:command].to_yaml).
+      where('cwd = ?', attrs[:cwd]).
+      where('environment = ?', self.deep_sort_hash(attrs[:environment]).to_yaml).
+      where('output_path = ?', attrs[:output_path]).
+      where('container_image = ?', attrs[:container_image]).
+      where('mounts = ?', self.deep_sort_hash(attrs[:mounts]).to_yaml).
+      where('runtime_constraints = ?', self.deep_sort_hash(attrs[:runtime_constraints]).to_yaml)
+
+    # Check for Completed candidates that had consistent outputs.
+    completed = candidates.where(state: Complete).where(exit_code: 0)
+    outputs = completed.select('output').group('output').limit(2)
+    if outputs.count.count != 1
+      Rails.logger.debug("Found #{outputs.count.length} different outputs")
+    elsif Collection.
+        readable_by(current_user).
+        where(portable_data_hash: outputs.first.output).
+        count < 1
+      Rails.logger.info("Found reusable container(s) " +
+                        "but output #{outputs.first} is not readable " +
+                        "by user #{current_user.uuid}")
+    else
+      # Return the oldest eligible container whose log is still
+      # present and readable by current_user.
+      readable_pdh = Collection.
+        readable_by(current_user).
+        select('portable_data_hash')
+      completed = completed.
+        where("log in (#{readable_pdh.to_sql})").
+        order('finished_at asc').
+        limit(1)
+      if completed.first
+        return completed.first
+      else
+        Rails.logger.info("Found reusable container(s) but none with a log " +
+                          "readable by user #{current_user.uuid}")
+      end
+    end
+
+    # Check for Running candidates and return the most likely to finish sooner.
+    running = candidates.where(state: Running).
+      order('progress desc, started_at asc').limit(1).first
+    return running if not running.nil?
+
+    # Check for Locked or Queued ones and return the most likely to start first.
+    locked_or_queued = candidates.where("state IN (?)", [Locked, Queued]).
+      order('state asc, priority desc, created_at asc').limit(1).first
+    return locked_or_queued if not locked_or_queued.nil?
+
+    # No suitable candidate found.
+    nil
+  end
+
+  def lock
+    with_lock do
+      if self.state == Locked
+        raise AlreadyLockedError
+      end
+      self.state = Locked
+      self.save!
+    end
+  end
+
+  def unlock
+    with_lock do
+      if self.state == Queued
+        raise InvalidStateTransitionError
+      end
+      self.state = Queued
+      self.save!
+    end
+  end
+
+  def self.readable_by(*users_list)
+    if users_list.select { |u| u.is_admin }.any?
+      return self
+    end
+    user_uuids = users_list.map { |u| u.uuid }
+    uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
+    uuid_list.uniq!
+    permitted = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (:uuids))"
+    joins(:container_requests).
+      where("container_requests.uuid IN #{permitted} OR "+
+            "container_requests.owner_uuid IN (:uuids)",
+            uuids: uuid_list)
+  end
+
   protected
 
   def fill_field_defaults
@@ -200,6 +290,18 @@ class Container < ArvadosModel
               api_client_id: 0)
   end
 
+  def sort_serialized_attrs
+    if self.environment_changed?
+      self.environment = self.class.deep_sort_hash(self.environment)
+    end
+    if self.mounts_changed?
+      self.mounts = self.class.deep_sort_hash(self.mounts)
+    end
+    if self.runtime_constraints_changed?
+      self.runtime_constraints = self.class.deep_sort_hash(self.runtime_constraints)
+    end
+  end
+
   def handle_completed
     # This container is finished so finalize any associated container requests
     # that are associated with this container.
@@ -207,13 +309,13 @@ class Container < ArvadosModel
       act_as_system_user do
         # Notify container requests associated with this container
         ContainerRequest.where(container_uuid: uuid,
-                               :state => ContainerRequest::Committed).each do |cr|
+                               state: ContainerRequest::Committed).each do |cr|
           cr.container_completed!
         end
 
         # Try to cancel any outstanding container requests made by this container.
         ContainerRequest.where(requesting_container_uuid: uuid,
-                               :state => ContainerRequest::Committed).each do |cr|
+                               state: ContainerRequest::Committed).each do |cr|
           cr.priority = 0
           cr.save
         end
index 496a6b141db2d4f95536430b31edad1f90fba82f..1fe8365121054e75bc326df7ca83eff4e52bcba7 100644 (file)
@@ -17,6 +17,7 @@ class ContainerRequest < ArvadosModel
   validates :command, :container_image, :output_path, :cwd, :presence => true
   validate :validate_state_change
   validate :validate_change
+  validate :validate_runtime_constraints
   after_save :update_priority
   before_create :set_requesting_container_uuid
 
@@ -63,10 +64,24 @@ class ContainerRequest < ArvadosModel
     %w(modified_by_client_uuid container_uuid requesting_container_uuid)
   end
 
+  # Finalize the container request after the container has
+  # finished/cancelled.
   def container_completed!
-    # may implement retry logic here in the future.
-    self.state = ContainerRequest::Final
-    self.save!
+    update_attributes!(state: ContainerRequest::Final)
+    c = Container.find_by_uuid(container_uuid)
+    ['output', 'log'].each do |out_type|
+      pdh = c.send(out_type)
+      next if pdh.nil?
+      manifest = Collection.where(portable_data_hash: pdh).first.manifest_text
+      Collection.create!(owner_uuid: owner_uuid,
+                         manifest_text: manifest,
+                         portable_data_hash: pdh,
+                         name: "Container #{out_type} for request #{uuid}",
+                         properties: {
+                           'type' => out_type,
+                           'container_request' => uuid,
+                         })
+    end
   end
 
   protected
@@ -82,20 +97,88 @@ class ContainerRequest < ArvadosModel
   # Create a new container (or find an existing one) to satisfy this
   # request.
   def resolve
-    # TODO: resolve symbolic git and keep references to content
-    # addresses.
+    c_mounts = mounts_for_container
+    c_runtime_constraints = runtime_constraints_for_container
+    c_container_image = container_image_for_container
     c = act_as_system_user do
-      Container.create!(command: self.command,
-                        container_image: self.container_image,
-                        cwd: self.cwd,
-                        environment: self.environment,
-                        mounts: self.mounts,
-                        output_path: self.output_path,
-                        runtime_constraints: self.runtime_constraints)
+      c_attrs = {command: self.command,
+                 cwd: self.cwd,
+                 environment: self.environment,
+                 output_path: self.output_path,
+                 container_image: c_container_image,
+                 mounts: c_mounts,
+                 runtime_constraints: c_runtime_constraints}
+      reusable = Container.find_reusable(c_attrs)
+      if not reusable.nil?
+        reusable
+      else
+        Container.create!(c_attrs)
+      end
     end
     self.container_uuid = c.uuid
   end
 
+  # Return a runtime_constraints hash that complies with
+  # self.runtime_constraints but is suitable for saving in a container
+  # record, i.e., has specific values instead of ranges.
+  #
+  # Doing this as a step separate from other resolutions, like "git
+  # revision range to commit hash", makes sense only when there is no
+  # opportunity to reuse an existing container (e.g., container reuse
+  # is not implemented yet, or we have already found that no existing
+  # containers are suitable).
+  def runtime_constraints_for_container
+    rc = {}
+    runtime_constraints.each do |k, v|
+      if v.is_a? Array
+        rc[k] = v[0]
+      else
+        rc[k] = v
+      end
+    end
+    rc
+  end
+
+  # Return a mounts hash suitable for a Container, i.e., with every
+  # readonly collection UUID resolved to a PDH.
+  def mounts_for_container
+    c_mounts = {}
+    mounts.each do |k, mount|
+      mount = mount.dup
+      c_mounts[k] = mount
+      if mount['kind'] != 'collection'
+        next
+      end
+      if (uuid = mount.delete 'uuid')
+        c = Collection.
+          readable_by(current_user).
+          where(uuid: uuid).
+          select(:portable_data_hash).
+          first
+        if !c
+          raise ArvadosModel::UnresolvableContainerError.new "cannot mount collection #{uuid.inspect}: not found"
+        end
+        if mount['portable_data_hash'].nil?
+          # PDH not supplied by client
+          mount['portable_data_hash'] = c.portable_data_hash
+        elsif mount['portable_data_hash'] != c.portable_data_hash
+          # UUID and PDH supplied by client, but they don't agree
+          raise ArgumentError.new "cannot mount collection #{uuid.inspect}: current portable_data_hash #{c.portable_data_hash.inspect} does not match #{c['portable_data_hash'].inspect} in request"
+        end
+      end
+    end
+    return c_mounts
+  end
+
+  # Return a container_image PDH suitable for a Container.
+  def container_image_for_container
+    coll = Collection.for_latest_docker_image(container_image)
+    if !coll
+      raise ArvadosModel::UnresolvableContainerError.new "docker image #{container_image.inspect} not found"
+    end
+    return coll.portable_data_hash
+  end
+
   def set_container
     if (container_uuid_changed? and
         not current_user.andand.is_admin and
@@ -108,6 +191,19 @@ class ContainerRequest < ArvadosModel
     end
   end
 
+  def validate_runtime_constraints
+    case self.state
+    when Committed
+      ['vcpus', 'ram'].each do |k|
+        if not (runtime_constraints.include? k and
+                runtime_constraints[k].is_a? Integer and
+                runtime_constraints[k] > 0)
+          errors.add :runtime_constraints, "#{k} must be a positive integer"
+        end
+      end
+    end
+  end
+
   def validate_change
     permitted = [:owner_uuid]
 
@@ -172,7 +268,7 @@ class ContainerRequest < ArvadosModel
   end
 
   def set_requesting_container_uuid
-    return true if self.requesting_container_uuid   # already set
+    return !new_record? if self.requesting_container_uuid   # already set
 
     token_uuid = current_api_client_authorization.andand.uuid
     container = Container.where('auth_uuid=?', token_uuid).order('created_at desc').first
index 0ed53535778335d11b2b12d3007058e9ad76adfc..30ca7f8cb29581b0c65b0a70dd49c02f3b59f753 100644 (file)
@@ -2,6 +2,7 @@ class Job < ArvadosModel
   include HasUuid
   include KindAndEtag
   include CommonApiTemplate
+  extend CurrentApiClient
   serialize :components, Hash
   attr_protected :arvados_sdk_version, :docker_image_locator
   serialize :script_parameters, Hash
@@ -11,6 +12,7 @@ class Job < ArvadosModel
   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
   before_validation :set_priority
   before_validation :update_state_from_old_state_attrs
+  before_validation :update_script_parameters_digest
   validate :ensure_script_version_is_commit
   validate :find_docker_image_locator
   validate :find_arvados_sdk_version
@@ -105,8 +107,181 @@ class Job < ArvadosModel
     end
   end
 
+  def update_script_parameters_digest
+    self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
+  end
+
+  def self.searchable_columns operator
+    super - ["script_parameters_digest"]
+  end
+
+  def self.load_job_specific_filters attrs, orig_filters, read_users
+    # Convert Job-specific @filters entries into general SQL filters.
+    script_info = {"repository" => nil, "script" => nil}
+    git_filters = Hash.new do |hash, key|
+      hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
+    end
+    filters = []
+    orig_filters.each do |attr, operator, operand|
+      if (script_info.has_key? attr) and (operator == "=")
+        if script_info[attr].nil?
+          script_info[attr] = operand
+        elsif script_info[attr] != operand
+          raise ArgumentError.new("incompatible #{attr} filters")
+        end
+      end
+      case operator
+      when "in git"
+        git_filters[attr]["min_version"] = operand
+      when "not in git"
+        git_filters[attr]["exclude_versions"] += Array.wrap(operand)
+      when "in docker", "not in docker"
+        image_hashes = Array.wrap(operand).flat_map do |search_term|
+          image_search, image_tag = search_term.split(':', 2)
+          Collection.
+            find_all_for_docker_image(image_search, image_tag, read_users).
+            map(&:portable_data_hash)
+        end
+        filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
+      else
+        filters << [attr, operator, operand]
+      end
+    end
+
+    # Build a real script_version filter from any "not? in git" filters.
+    git_filters.each_pair do |attr, filter|
+      case attr
+      when "script_version"
+        script_info.each_pair do |key, value|
+          if value.nil?
+            raise ArgumentError.new("script_version filter needs #{key} filter")
+          end
+        end
+        filter["repository"] = script_info["repository"]
+        if attrs[:script_version]
+          filter["max_version"] = attrs[:script_version]
+        else
+          # Using HEAD, set earlier by the hash default, is fine.
+        end
+      when "arvados_sdk_version"
+        filter["repository"] = "arvados"
+      else
+        raise ArgumentError.new("unknown attribute for git filter: #{attr}")
+      end
+      revisions = Commit.find_commit_range(filter["repository"],
+                                           filter["min_version"],
+                                           filter["max_version"],
+                                           filter["exclude_versions"])
+      if revisions.empty?
+        raise ArgumentError.
+          new("error searching #{filter['repository']} from " +
+              "'#{filter['min_version']}' to '#{filter['max_version']}', " +
+              "excluding #{filter['exclude_versions']}")
+      end
+      filters.append([attr, "in", revisions])
+    end
+
+    filters
+  end
+
+  def self.find_reusable attrs, params, filters, read_users
+    if filters.empty?  # Translate older creation parameters into filters.
+      filters =
+        [["repository", "=", attrs[:repository]],
+         ["script", "=", attrs[:script]],
+         ["script_version", "not in git", params[:exclude_script_versions]],
+        ].reject { |filter| filter.last.nil? or filter.last.empty? }
+      if !params[:minimum_script_version].blank?
+        filters << ["script_version", "in git",
+                     params[:minimum_script_version]]
+      else
+        filters += default_git_filters("script_version", attrs[:repository],
+                                       attrs[:script_version])
+      end
+      if image_search = attrs[:runtime_constraints].andand["docker_image"]
+        if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
+          image_search += ":#{image_tag}"
+        end
+        image_locator = Collection.
+          for_latest_docker_image(image_search).andand.portable_data_hash
+      else
+        image_locator = nil
+      end
+      filters << ["docker_image_locator", "=", image_locator]
+      if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
+        filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
+      end
+      filters = load_job_specific_filters(attrs, filters, read_users)
+    end
+
+    # Check specified filters for some reasonableness.
+    filter_names = filters.map { |f| f.first }.uniq
+    ["repository", "script"].each do |req_filter|
+      if not filter_names.include?(req_filter)
+        return send_error("#{req_filter} filter required")
+      end
+    end
+
+    # Search for a reusable Job, and return it if found.
+    candidates = Job.
+      readable_by(current_user).
+      where('state = ? or (owner_uuid = ? and state in (?))',
+            Job::Complete, current_user.uuid, [Job::Queued, Job::Running]).
+      where('script_parameters_digest = ?', Job.sorted_hash_digest(attrs[:script_parameters])).
+      where('nondeterministic is distinct from ?', true).
+      order('state desc, created_at') # prefer Running jobs over Queued
+    candidates = apply_filters candidates, filters
+    chosen = nil
+    incomplete_job = nil
+    candidates.each do |j|
+      if j.state != Job::Complete
+        # We'll use this if we don't find a job that has completed
+        incomplete_job ||= j
+        next
+      end
+
+      if chosen == false
+        # We have already decided not to reuse any completed job
+        next
+      elsif chosen
+        if chosen.output != j.output
+          # If two matching jobs produced different outputs, run a new
+          # job (or use one that's already running/queued) instead of
+          # choosing one arbitrarily.
+          chosen = false
+        end
+        # ...and that's the only thing we need to do once we've chosen
+        # a job to reuse.
+      elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
+        # As soon as the output we will end up returning (if any) is
+        # decided, check whether it will be visible to the user; if
+        # not, any further investigation of reusable jobs is futile.
+        chosen = false
+      else
+        chosen = j
+      end
+    end
+    chosen || incomplete_job
+  end
+
+  def self.default_git_filters(attr_name, repo_name, refspec)
+    # Add a filter to @filters for `attr_name` = the latest commit available
+    # in `repo_name` at `refspec`.  No filter is added if refspec can't be
+    # resolved.
+    commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
+    if commit_hash = commits.first
+      [[attr_name, "=", commit_hash]]
+    else
+      []
+    end
+  end
+
   protected
 
+  def self.sorted_hash_digest h
+    Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
+  end
+
   def foreign_key_attributes
     super + %w(output log)
   end
index b10a491163dc3c905c8ec52e120a6f263904457e..f8d624acb77c19261dcd16f5b2780653d774ac97 100644 (file)
@@ -53,6 +53,24 @@ class Log < ArvadosModel
     self
   end
 
+  def self.readable_by(*users_list)
+    if users_list.select { |u| u.is_admin }.any?
+      return self
+    end
+    user_uuids = users_list.map { |u| u.uuid }
+    uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
+    uuid_list.uniq!
+    permitted = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (:uuids))"
+    joins("LEFT JOIN container_requests ON container_requests.container_uuid=logs.object_uuid").
+      where("logs.object_uuid IN #{permitted} OR "+
+            "container_requests.uuid IN (:uuids) OR "+
+            "container_requests.owner_uuid IN (:uuids) OR "+
+            "logs.object_uuid IN (:uuids) OR "+
+            "logs.owner_uuid IN (:uuids) OR "+
+            "logs.object_owner_uuid IN (:uuids)",
+            uuids: uuid_list)
+  end
+
   protected
 
   def permission_to_create
index 553a3be5aebbd703690f54156a6c0efb22dbe528..18d33a6b0b0a4c2b69e27ec2e387e2be867398b4 100644 (file)
@@ -312,8 +312,8 @@ class User < ArvadosModel
       self.class.
           where("username like '#{pattern}'").
           select(:username).
-          order(username: :asc).
-          find_each do |other_user|
+          order('username asc').
+          each do |other_user|
         if other_user.username > next_username
           break
         elsif other_user.username == next_username
diff --git a/services/api/app/models/workflow.rb b/services/api/app/models/workflow.rb
new file mode 100644 (file)
index 0000000..f786914
--- /dev/null
@@ -0,0 +1,42 @@
+class Workflow < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  validate :validate_definition
+  before_save :set_name_and_description
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :description
+    t.add :definition
+  end
+
+  def validate_definition
+    begin
+      @definition_yaml = YAML.load self.definition if !definition.nil?
+    rescue => e
+      errors.add :definition, "is not valid yaml: #{e.message}"
+    end
+  end
+
+  def set_name_and_description
+    old_wf = {}
+    begin
+      old_wf = YAML.load self.definition_was if !self.definition_was.nil?
+    rescue => e
+      logger.warn "set_name_and_description error: #{e.message}"
+      return
+    end
+
+    ['name', 'description'].each do |a|
+      if !self.changes.include?(a)
+        v = self.read_attribute(a)
+        if !v.present? or v == old_wf[a]
+          val = @definition_yaml[a] if self.definition and @definition_yaml
+          self[a] = val
+        end
+      end
+    end
+  end
+end
index ddc6eede835a8cf4ceb790a3056e313e6c1e91d0..96e7596e8bda000c65d25393fe70d91761b9c04e 100644 (file)
@@ -183,6 +183,12 @@ common:
   # Default lifetime for ephemeral collections: 2 weeks.
   default_trash_lifetime: 1209600
 
+  # Maximum characters of (JSON-encoded) query parameters to include
+  # in each request log entry. When params exceed this size, they will
+  # be JSON-encoded, truncated to this size, and logged as
+  # params_truncated.
+  max_request_log_params_size: 2000
+
   # Maximum size (in bytes) allowed for a single API request.  This
   # limit is published in the discovery document for use by clients.
   # Note: You must separately configure the upstream web server or
@@ -214,6 +220,12 @@ common:
   # stderr logs from the logs table.
   clean_job_log_rows_after: <%= 30.days %>
 
+  # When you run the db:delete_old_container_logs task, it will find
+  # containers that have been finished for at least this many seconds,
+  # and delete their stdout, stderr, arv-mount, crunch-run, and
+  # crunchstat logs from the logs table.
+  clean_container_log_rows_after: <%= 30.days %>
+
   # The maximum number of compute nodes that can be in use simultaneously
   # If this limit is reduced, any existing nodes with slot number >= new limit
   # will not be counted against the new limit. In other words, the new limit
@@ -243,6 +255,14 @@ common:
   # silenced by throttling are not counted against this total.
   crunch_limit_log_bytes_per_job: 67108864
 
+  # Attributes to suppress in events and audit logs.  Notably,
+  # specifying ["manifest_text"] here typically makes the database
+  # smaller and faster.
+  #
+  # Warning: Using any non-empty value here can have undesirable side
+  # effects for any client or component that relies on event logs.
+  # Use at your own risk.
+  unlogged_attributes: []
 
   ###
   ### Crunch, DNS & compute node management
index de9770d7b7d1b45d7466b87b93f1e2fee9e9afab..76234d3e4b0f6ab148f73cb7a1242af1eacefb6a 100644 (file)
@@ -28,7 +28,7 @@ $application_config = {}
   path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
   if File.exists? path
     yaml = ERB.new(IO.read path).result(binding)
-    confs = YAML.load(yaml)
+    confs = YAML.load(yaml, deserialize_symbols: true)
     # Ignore empty YAML file:
     next if confs == false
     $application_config.merge!(confs['common'] || {})
diff --git a/services/api/config/initializers/lograge.rb b/services/api/config/initializers/lograge.rb
new file mode 100644 (file)
index 0000000..4b1aea9
--- /dev/null
@@ -0,0 +1,14 @@
+Server::Application.configure do
+  config.lograge.enabled = true
+  config.lograge.formatter = Lograge::Formatters::Logstash.new
+  config.lograge.custom_options = lambda do |event|
+    exceptions = %w(controller action format id)
+    params = event.payload[:params].except(*exceptions)
+    params_s = Oj.dump(params)
+    if params_s.length > Rails.configuration.max_request_log_params_size
+      { params_truncated: params_s[0..Rails.configuration.max_request_log_params_size] + "[...]" }
+    else
+      { params: params }
+    end
+  end
+end
index ed8f8d89af9c2d429dfe661633df2ec36a0484e9..3638c726e9bf1118540243476169db913dbc7e58 100644 (file)
@@ -31,6 +31,8 @@ Server::Application.routes.draw do
       resources :job_tasks
       resources :containers do
         get 'auth', on: :member
+        post 'lock', on: :member
+        post 'unlock', on: :member
       end
       resources :container_requests
       resources :jobs do
@@ -52,6 +54,7 @@ Server::Application.routes.draw do
       end
       resources :pipeline_instances
       resources :pipeline_templates
+      resources :workflows
       resources :repositories do
         get 'get_all_permissions', on: :collection
       end
diff --git a/services/api/db/migrate/20160808151559_create_workflows.rb b/services/api/db/migrate/20160808151559_create_workflows.rb
new file mode 100644 (file)
index 0000000..23319b6
--- /dev/null
@@ -0,0 +1,30 @@
+class CreateWorkflows < ActiveRecord::Migration
+  def up
+    create_table :workflows do |t|
+      t.string :uuid
+      t.string :owner_uuid
+      t.datetime :created_at
+      t.datetime :modified_at
+      t.string :modified_by_client_uuid
+      t.string :modified_by_user_uuid
+      t.string :name
+      t.text :description
+      t.text :workflow
+
+      t.timestamps
+    end
+
+    add_index :workflows, :uuid, :unique => true
+    add_index :workflows, :owner_uuid
+    add_index :workflows, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"], name: 'workflows_search_idx'
+    execute "CREATE INDEX workflows_full_text_search_idx ON workflows USING gin(#{Workflow.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :workflows, :name => 'workflows_full_text_search_idx'
+    remove_index :workflows, :name => 'workflows_search_idx'
+    remove_index :workflows, :owner_uuid
+    remove_index :workflows, :uuid
+    drop_table :workflows
+  end
+end
diff --git a/services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb b/services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb
new file mode 100644 (file)
index 0000000..8ed3cfe
--- /dev/null
@@ -0,0 +1,6 @@
+class AddScriptParametersDigestToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :script_parameters_digest, :string
+    add_index :jobs, :script_parameters_digest
+  end
+end
diff --git a/services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb b/services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb
new file mode 100644 (file)
index 0000000..9f6c3ee
--- /dev/null
@@ -0,0 +1,21 @@
+class PopulateScriptParametersDigest < ActiveRecord::Migration
+  def up
+    done = false
+    while !done
+      done = true
+      Job.
+        where('script_parameters_digest is null').
+        select([:id, :script_parameters, :script_parameters_digest]).
+        limit(200).
+        each do |j|
+        done = false
+        Job.
+          where('id=? or script_parameters=?', j.id, j.script_parameters.to_yaml).
+          update_all(script_parameters_digest: j.update_script_parameters_digest)
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb b/services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb
new file mode 100644 (file)
index 0000000..18eed7a
--- /dev/null
@@ -0,0 +1,17 @@
+class RepairScriptParametersDigest < ActiveRecord::Migration
+  def up
+    Job.find_each do |j|
+      have = j.script_parameters_digest
+      want = j.update_script_parameters_digest
+      if have != want
+        # where().update_all() skips validations, event logging, and
+        # timestamp updates, and just runs SQL. (This change is
+        # invisible to clients.)
+        Job.where('id=?', j.id).update_all(script_parameters_digest: want)
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb b/services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb
new file mode 100644 (file)
index 0000000..a5471ac
--- /dev/null
@@ -0,0 +1,10 @@
+class RenameWorkflowToDefinition < ActiveRecord::Migration
+  def up
+    rename_column :workflows, :workflow, :definition
+  end 
+    
+  def down
+    rename_column :workflows, :definition, :workflow
+  end
+end
+
index 4bf4a173bd9d1c31e04d0f7517c1927baf9f3ff2..5f80fb2c707dc1b5a72162b5a16db88508b3a1c1 100644 (file)
@@ -539,7 +539,8 @@ CREATE TABLE jobs (
     description character varying(524288),
     state character varying(255),
     arvados_sdk_version character varying(255),
-    components text
+    components text,
+    script_parameters_digest character varying(255)
 );
 
 
@@ -1054,6 +1055,44 @@ CREATE SEQUENCE virtual_machines_id_seq
 ALTER SEQUENCE virtual_machines_id_seq OWNED BY virtual_machines.id;
 
 
+--
+-- Name: workflows; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE workflows (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    name character varying(255),
+    description text,
+    definition text,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE workflows_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE workflows_id_seq OWNED BY workflows.id;
+
+
 --
 -- Name: id; Type: DEFAULT; Schema: public; Owner: -
 --
@@ -1222,6 +1261,13 @@ ALTER TABLE ONLY users ALTER COLUMN id SET DEFAULT nextval('users_id_seq'::regcl
 ALTER TABLE ONLY virtual_machines ALTER COLUMN id SET DEFAULT nextval('virtual_machines_id_seq'::regclass);
 
 
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY workflows ALTER COLUMN id SET DEFAULT nextval('workflows_id_seq'::regclass);
+
+
 --
 -- Name: api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
 --
@@ -1414,6 +1460,14 @@ ALTER TABLE ONLY virtual_machines
     ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
 
 
+--
+-- Name: workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY workflows
+    ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);
+
+
 --
 -- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -1799,6 +1853,13 @@ CREATE INDEX index_jobs_on_owner_uuid ON jobs USING btree (owner_uuid);
 CREATE INDEX index_jobs_on_script ON jobs USING btree (script);
 
 
+--
+-- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_script_parameters_digest ON jobs USING btree (script_parameters_digest);
+
+
 --
 -- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2191,6 +2252,20 @@ CREATE INDEX index_virtual_machines_on_owner_uuid ON virtual_machines USING btre
 CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btree (uuid);
 
 
+--
+-- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_workflows_on_owner_uuid ON workflows USING btree (owner_uuid);
+
+
+--
+-- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_workflows_on_uuid ON workflows USING btree (uuid);
+
+
 --
 -- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2331,6 +2406,20 @@ CREATE INDEX users_search_index ON users USING btree (uuid, owner_uuid, modified
 CREATE INDEX virtual_machines_search_index ON virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
 
 
+--
+-- Name: workflows_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX workflows_full_text_search_idx ON workflows USING gin (to_tsvector('english'::regconfig, (((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE(definition, ''::text))));
+
+
+--
+-- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX workflows_search_idx ON workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
 --
 -- PostgreSQL database dump complete
 --
@@ -2589,4 +2678,14 @@ INSERT INTO schema_migrations (version) VALUES ('20160324144017');
 
 INSERT INTO schema_migrations (version) VALUES ('20160506175108');
 
-INSERT INTO schema_migrations (version) VALUES ('20160509143250');
\ No newline at end of file
+INSERT INTO schema_migrations (version) VALUES ('20160509143250');
+
+INSERT INTO schema_migrations (version) VALUES ('20160808151559');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195557');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195725');
+
+INSERT INTO schema_migrations (version) VALUES ('20160901210110');
+
+INSERT INTO schema_migrations (version) VALUES ('20160909181442');
\ No newline at end of file
index aaeebdccf0cd52e8d0fb38aa38ce9d2bf436b626..16bb030941c3033ebf32cb972a645eb821a063d3 100644 (file)
@@ -93,8 +93,8 @@ class EventBus
     begin
       # Must have at least one filter set up to receive events
       if ws.filters.length > 0
-        # Start with log rows readable by user, sorted in ascending order
-        logs = Log.readable_by(ws.user).order("id asc")
+        # Start with log rows readable by user
+        logs = Log.readable_by(ws.user)
 
         cond_id = nil
         cond_out = []
@@ -132,11 +132,21 @@ class EventBus
           logs = logs.where(cond_id, *param_out)
         end
 
-        # Execute query and actually send the matching log rows
-        logs.each do |l|
+        # Execute query and actually send the matching log rows. Load
+        # the full log records only when we're ready to send them,
+        # though: otherwise, (1) postgres has to build the whole
+        # result set and return it to us before we can send the first
+        # event, and (2) we store lots of records in memory while
+        # waiting to spool them out to the client. Both of these are
+        # troublesome when log records are large (e.g., a collection
+        # update contains both old and new manifest_text).
+        #
+        # Note: find_each implies order('id asc'), which is what we
+        # want.
+        logs.select('logs.id').find_each do |l|
           if not ws.sent_ids.include?(l.id)
             # only send if not a duplicate
-            ws.send(l.as_api_response.to_json)
+            ws.send(Log.find(l.id).as_api_response.to_json)
           end
           if not ws.last_log_id.nil?
             # record ids only when sending "catchup" messages, not notifies
index 860513f2f5846dc2abc1e8e08134d802e42110b0..0ad543edbf857c8cfccd7934762fe5918374cef4 100644 (file)
@@ -1,4 +1,7 @@
+require 'current_api_client'
+
 module SimulateJobLog
+  include CurrentApiClient
   def replay(filename, multiplier = 1, simulated_job_uuid = nil)
     raise "Environment must be development or test" unless [ 'test', 'development' ].include? ENV['RAILS_ENV']
 
diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake
new file mode 100644 (file)
index 0000000..3421fb8
--- /dev/null
@@ -0,0 +1,14 @@
+# This task finds containers that have been finished for at least as long as
+# the duration specified in the `clean_container_log_rows_after` config setting,
+# and deletes their stdout, stderr, arv-mount, crunch-run, and  crunchstat logs
+# from the logs table.
+
+namespace :db do
+  desc "Remove old container log entries from the logs table"
+
+  task delete_old_container_logs: :environment do
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND containers.finished_at < '#{Rails.configuration.clean_container_log_rows_after.ago}')"
+
+    ActiveRecord::Base.connection.execute(delete_sql)
+  end
+end
index 7f2b31e1d61ae26aa1b8cfd3bb91f0b74f38c8bc..18a5f02277a670e007ad15e008740e0d011c4d2e 100644 (file)
@@ -5,13 +5,8 @@
 namespace :db do
   desc "Remove old job stderr entries from the logs table"
   task delete_old_job_logs: :environment do
-    Log.select("logs.id").
-        joins("JOIN jobs ON object_uuid = jobs.uuid").
-        where("event_type = :etype AND jobs.log IS NOT NULL AND jobs.finished_at < :age",
-              etype: "stderr",
-              age: Rails.configuration.clean_job_log_rows_after.ago).
-        find_in_batches do |old_log_ids|
-      Log.where(id: old_log_ids.map(&:id)).delete_all
-    end
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND jobs.finished_at < '#{Rails.configuration.clean_job_log_rows_after.ago}')"
+
+    ActiveRecord::Base.connection.execute(delete_sql)
   end
 end
index 7921c35865a179b5bf7f0fcbb981003dc830d0f5..78c70fdaaccce6e9e47fe5b12c0a3ef98a982641 100644 (file)
@@ -2,7 +2,7 @@ FactoryGirl.define do
   factory :api_client do
     is_trusted false
     to_create do |instance|
-      act_as_system_user do
+      CurrentApiClientHelper.act_as_system_user do
         instance.save!
       end
     end
index 8bd569e8eb2562449227f96c33428e434c4045aa..c3883246eb622c91687f3ccac66683e6092491b6 100644 (file)
@@ -11,7 +11,7 @@ FactoryGirl.define do
     end
 
     to_create do |instance|
-      act_as_user instance.user do
+      CurrentApiClientHelper.act_as_user instance.user do
         instance.save!
       end
     end
index 56e91252171d4ab4d70c633dc69ceaebe95505b1..6ec9e9f05d5ad7cdeff29cda76c20abbe7a4eae1 100644 (file)
@@ -1,4 +1,6 @@
-include CurrentApiClient
+class CurrentApiClientHelper
+  extend CurrentApiClient
+end
 
 FactoryGirl.define do
   factory :user do
@@ -6,7 +8,7 @@ FactoryGirl.define do
       join_groups []
     end
     after :create do |user, evaluator|
-      act_as_system_user do
+      CurrentApiClientHelper.act_as_system_user do
         evaluator.join_groups.each do |g|
           Link.create!(tail_uuid: user.uuid,
                        head_uuid: g.uuid,
@@ -27,7 +29,7 @@ FactoryGirl.define do
     factory :active_user do
       is_active true
       after :create do |user|
-        act_as_system_user do
+        CurrentApiClientHelper.act_as_system_user do
           Link.create!(tail_uuid: user.uuid,
                        head_uuid: Group.where('uuid ~ ?', '-f+$').first.uuid,
                        link_class: 'permission',
@@ -36,7 +38,7 @@ FactoryGirl.define do
       end
     end
     to_create do |instance|
-      act_as_system_user do
+      CurrentApiClientHelper.act_as_system_user do
         instance.save!
       end
     end
index 1e3d773550579b03a188d2ea129928cd457cf291..1daccda55802ece4b18ca3d273bda7789ea5cab4 100644 (file)
@@ -4,15 +4,18 @@ queued:
   name: queued
   state: Committed
   priority: 1
-  created_at: 2016-01-11 11:11:11.111111111 Z
-  updated_at: 2016-01-11 11:11:11.111111111 Z
-  modified_at: 2016-01-11 11:11:11.111111111 Z
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
   modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   container_image: test
   cwd: test
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-queuedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
 running:
   uuid: zzzzz-xvhdp-cr4runningcntnr
@@ -20,17 +23,20 @@ running:
   name: running
   state: Committed
   priority: 1
-  created_at: 2016-01-11 11:11:11.111111111 Z
-  updated_at: 2016-01-11 11:11:11.111111111 Z
-  modified_at: 2016-01-11 11:11:11.111111111 Z
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
   modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   container_image: test
   cwd: test
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontainr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
-running-older:
+running_older:
   uuid: zzzzz-xvhdp-cr4runningcntn2
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   name: running
@@ -45,6 +51,9 @@ running-older:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontain2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
 completed:
   uuid: zzzzz-xvhdp-cr4completedctr
@@ -61,6 +70,9 @@ completed:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-compltcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
 completed-older:
   uuid: zzzzz-xvhdp-cr4completedcr2
@@ -77,6 +89,28 @@ completed-older:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-compltcontainr2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+requester:
+  uuid: zzzzz-xvhdp-9zacv3o1xw6sxz5
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: requester
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: /
+  output_path: /output
+  command: ["request-another-container", "echo", "hello"]
+  container_uuid: zzzzz-dz642-requestingcntnr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
 cr_for_requester:
   uuid: zzzzz-xvhdp-cr4requestercnt
@@ -94,6 +128,9 @@ cr_for_requester:
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-requestercntnr1
   requesting_container_uuid: zzzzz-dz642-requestingcntnr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
 
 cr_for_requester2:
   uuid: zzzzz-xvhdp-cr4requestercn2
@@ -110,3 +147,537 @@ cr_for_requester2:
   output_path: test
   command: ["echo", "hello"]
   requesting_container_uuid: zzzzz-dz642-requestercntnr1
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+running_anonymous_accessible:
+  uuid: zzzzz-xvhdp-runninganonaccs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: running anonymously accessible cr
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontain2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+cr_for_failed:
+  uuid: zzzzz-xvhdp-cr4failedcontnr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: cr for container exit code not 0
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-failedcontainr1
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_queued_container:
+  uuid: zzzzz-xvhdp-canceledqueuedc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with queued container
+  state: Committed
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-queuedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_locked_container:
+  uuid: zzzzz-xvhdp-canceledlocekdc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with locked container
+  state: Committed
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-lockedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_running_container:
+  uuid: zzzzz-xvhdp-canceledrunning
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with running container
+  state: Committed
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontainr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+uncommitted:
+  uuid: zzzzz-xvhdp-cr4uncommittedc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: "Uncommitted"
+  container_image: arvados/jobs
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "$graph": [{
+                "id": "#main",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "doc": "a longer documentation string for this parameter (optional)",
+                        "type": "boolean",
+                        "id": "ex_boolean",
+                        "label": "a short label for this parameter (optional)",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "boolean"],
+                        "id": "ex_boolean_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "directory selection should present the workbench collection picker",
+                        "type": "Directory",
+                        "id": "ex_dir",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "double",
+                        "id": "ex_double",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "file selection should present the workbench file picker",
+                        "type": "File",
+                        "id": "ex_file",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "float",
+                        "id": "ex_float",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "int",
+                        "id": "ex_int",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "ex_int_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "long",
+                        "id": "ex_long",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "string",
+                        "id": "ex_string",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "string"],
+                        "id": "ex_string_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        },
+                        "id": "ex_enum",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        }],
+                        "id": "ex_enum_opt",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }]
+          }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted_ready_to_run:
+  uuid: zzzzz-xvhdp-cr4uncommittedd
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted_ready_to_run
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: "Uncommitted"
+  container_image: arvados/jobs
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "doc": "a longer documentation string for this parameter (optional)",
+                        "type": "boolean",
+                        "id": "ex_boolean",
+                        "label": "a short label for this parameter (optional)",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "boolean"],
+                        "id": "ex_boolean_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "directory selection should present the workbench collection picker",
+                        "type": "Directory",
+                        "id": "ex_dir",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "double",
+                        "id": "ex_double",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "file selection should present the workbench file picker",
+                        "type": "File",
+                        "id": "ex_file",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "float",
+                        "id": "ex_float",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "int",
+                        "id": "ex_int",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "ex_int_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "long",
+                        "id": "ex_long",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "string",
+                        "id": "ex_string",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "string"],
+                        "id": "ex_string_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        },
+                        "id": "ex_enum",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        }],
+                        "id": "ex_enum_opt",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {
+              "ex_string_opt": null,
+              "ex_int_opt": null,
+              "ex_boolean": false,
+              "ex_boolean_opt": true,
+              "ex_dir": {
+                "class": "Directory",
+                "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+                "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w"
+              },
+              "ex_double": 66.0,
+              "ex_file": {
+                "class": "File",
+                "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45/foo",
+                "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w/foo"
+              },
+              "ex_float": 55.0,
+              "ex_int": 55,
+              "ex_long": 22,
+              "ex_string": "qq",
+              "ex_enum": "a"
+            }
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-directory-input:
+  uuid: zzzzz-xvhdp-cr4uncommitted2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with directory input
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "Directory",
+                        "id": "directory_type",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-file-input:
+  uuid: zzzzz-xvhdp-cr4uncommittedf
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with directory input
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "File",
+                        "id": "file_type",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-required-and-optional-inputs:
+  uuid: zzzzz-xvhdp-cr4uncommitted3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with required and optional inputs
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "int",
+                        "id": "int_required",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "int_optional",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# container requests in project_with_2_pipelines_and_60_crs
+<% for i in 1..60 do %>
+cr_<%=i%>_of_60:
+  uuid: zzzzz-xvhdp-oneof60crs<%= i.to_s.rjust(5, '0') %>
+  created_at: <%= ((i+5)/5).hour.ago.to_s(:db) %>
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
+  name: cr-<%= i.to_s %>
+  output_path: test
+  command: ["echo", "hello"]
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
index 1796e498f4c04a4bad2c0d7796c8d4e9838aca84..29266d3ab8f50f87586086c8702c676c2d7a7cb1 100644 (file)
@@ -1,13 +1,12 @@
 queued:
   uuid: zzzzz-dz642-queuedcontainer
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Queued
   priority: 1
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
   container_image: test
   cwd: test
-  output: test
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -16,7 +15,7 @@ queued:
 
 running:
   uuid: zzzzz-dz642-runningcontainr
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Running
   priority: 1
   created_at: <%= 1.minute.ago.to_s(:db) %>
@@ -24,7 +23,6 @@ running:
   started_at: <%= 1.minute.ago.to_s(:db) %>
   container_image: test
   cwd: test
-  output: test
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -32,9 +30,9 @@ running:
     vcpus: 4
   auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
 
-running-older:
+running_older:
   uuid: zzzzz-dz642-runningcontain2
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Running
   priority: 1
   created_at: <%= 2.minute.ago.to_s(:db) %>
@@ -42,7 +40,6 @@ running-older:
   started_at: <%= 2.minute.ago.to_s(:db) %>
   container_image: test
   cwd: test
-  output: test
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -51,14 +48,13 @@ running-older:
 
 locked:
   uuid: zzzzz-dz642-lockedcontainer
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Locked
   priority: 2
   created_at: <%= 2.minute.ago.to_s(:db) %>
   updated_at: <%= 2.minute.ago.to_s(:db) %>
   container_image: test
   cwd: test
-  output: test
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -67,8 +63,9 @@ locked:
 
 completed:
   uuid: zzzzz-dz642-compltcontainer
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Complete
+  exit_code: 0
   priority: 1
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
@@ -76,7 +73,8 @@ completed:
   finished_at: 2016-01-12 11:12:13.111111111 Z
   container_image: test
   cwd: test
-  output: zzzzz-4zz18-znfnqtbbv4spc3w
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -85,8 +83,9 @@ completed:
 
 completed_older:
   uuid: zzzzz-dz642-compltcontainr2
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Complete
+  exit_code: 0
   priority: 1
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
@@ -94,7 +93,7 @@ completed_older:
   finished_at: 2016-01-14 11:12:13.111111111 Z
   container_image: test
   cwd: test
-  output: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -103,14 +102,15 @@ completed_older:
 
 requester:
   uuid: zzzzz-dz642-requestingcntnr
-  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  owner_uuid: zzzzz-tpzed-000000000000000
   state: Complete
+  exit_code: 0
   priority: 1
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
   container_image: test
   cwd: test
-  output: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
@@ -119,17 +119,88 @@ requester:
 
 requester_container:
   uuid: zzzzz-dz642-requestercntnr1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
+
+failed_container:
+  uuid: zzzzz-dz642-failedcontainr1
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   state: Complete
+  exit_code: 33
   priority: 1
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
   container_image: test
   cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+
+ancient_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer01
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  finished_at: <%= 2.year.ago.to_s(:db) %>
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  output: test
+
+previous_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer02
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  finished_at: <%= 1.month.ago.to_s(:db) %>
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
   output: test
+
+running_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer03
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Running
+  priority: 1
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
   output_path: test
   command: ["echo", "hello"]
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
-  auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
index 4029846484d41a79acc2443246bae76a8c526fa3..b90a25ced816e2a19fdeb5d70e5b5fa6c4a2f7a7 100644 (file)
@@ -196,15 +196,15 @@ project_with_10_pipelines:
   description: project with 10 pipelines
   group_class: project
 
-project_with_2_pipelines_and_60_jobs:
-  uuid: zzzzz-j7d0g-nnjobspipelines
+project_with_2_pipelines_and_60_crs:
+  uuid: zzzzz-j7d0g-nnncrspipelines
   owner_uuid: zzzzz-tpzed-user1withloadab
   created_at: 2014-04-21 15:37:48 -0400
   modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
   modified_by_user_uuid: zzzzz-tpzed-user1withloadab
   modified_at: 2014-04-21 15:37:48 -0400
   updated_at: 2014-04-21 15:37:48 -0400
-  name: project with 2 pipelines and 60 jobs
+  name: project with 2 pipelines and 60 crs
   description: This will result in two pages in the display
   group_class: project
 
index d0c22d305954a2e832d3e8c4dac43725a982db26..1a06d573d914462bdcabd8a1a0bcb25008bc36ff 100644 (file)
@@ -23,6 +23,7 @@ running:
     done: 1
   runtime_constraints: {}
   state: Running
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 running_cancelled:
   uuid: zzzzz-8i9sb-4cf0nhn6xte809j
@@ -49,6 +50,7 @@ running_cancelled:
     done: 1
   runtime_constraints: {}
   state: Cancelled
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 uses_nonexistent_script_version:
   uuid: zzzzz-8i9sb-7m339pu0x9mla88
@@ -75,6 +77,7 @@ uses_nonexistent_script_version:
     done: 1
   runtime_constraints: {}
   state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 foobar:
   uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
@@ -103,6 +106,7 @@ foobar:
     done: 1
   runtime_constraints: {}
   state: Complete
+  script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
 
 barbaz:
   uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
@@ -131,6 +135,7 @@ barbaz:
     done: 1
   runtime_constraints: {}
   state: Complete
+  script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
 
 runningbarbaz:
   uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
@@ -159,6 +164,7 @@ runningbarbaz:
     done: 0
   runtime_constraints: {}
   state: Running
+  script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
 
 previous_job_run:
   uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
@@ -175,6 +181,7 @@ previous_job_run:
   log: d41d8cd98f00b204e9800998ecf8427e+0
   output: ea10d51bcf88862dbcc36eb292017dfd+45
   state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
 
 previous_ancient_job_run:
   uuid: zzzzz-8i9sb-ahd7cie8jah9qui
@@ -191,6 +198,7 @@ previous_ancient_job_run:
   log: d41d8cd98f00b204e9800998ecf8427e+0
   output: ea10d51bcf88862dbcc36eb292017dfd+45
   state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
 
 previous_docker_job_run:
   uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
@@ -208,6 +216,7 @@ previous_docker_job_run:
   output: ea10d51bcf88862dbcc36eb292017dfd+45
   docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
   state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
 
 previous_ancient_docker_image_job_run:
   uuid: zzzzz-8i9sb-t3b460aolxxuldl
@@ -225,6 +234,7 @@ previous_ancient_docker_image_job_run:
   output: ea10d51bcf88862dbcc36eb292017dfd+45
   docker_image_locator: b519d9cb706a29fc7ea24dbea2f05851+93
   state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
 
 previous_job_run_with_arvados_sdk_version:
   uuid: zzzzz-8i9sb-eoo0321or2dw2jg
@@ -244,6 +254,7 @@ previous_job_run_with_arvados_sdk_version:
   success: true
   output: ea10d51bcf88862dbcc36eb292017dfd+45
   state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
 
 previous_job_run_no_output:
   uuid: zzzzz-8i9sb-cjs4pklxxjykppp
@@ -258,6 +269,7 @@ previous_job_run_no_output:
   success: true
   output: ~
   state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
 
 previous_job_run_superseded_by_hash_branch:
   # This supplied_script_version is a branch name with later commits.
@@ -272,6 +284,7 @@ previous_job_run_superseded_by_hash_branch:
   success: true
   output: d41d8cd98f00b204e9800998ecf8427e+0
   state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 nondeterminisic_job_run:
   uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
@@ -286,6 +299,7 @@ nondeterminisic_job_run:
   success: true
   nondeterministic: true
   state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
 
 nearly_finished_job:
   uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
@@ -307,6 +321,7 @@ nearly_finished_job:
     done: 0
   runtime_constraints: {}
   state: Complete
+  script_parameters_digest: 7ea26d58a79b7f5db9f90fb1e33d3006
 
 queued:
   uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
@@ -329,6 +344,7 @@ queued:
   tasks_summary: {}
   runtime_constraints: {}
   state: Queued
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 # A job with a log collection that can be parsed by the log viewer.
 job_with_real_log:
@@ -338,6 +354,7 @@ job_with_real_log:
   log: 0b9a7787660e1fce4a93f33e01376ba6+81
   script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
   state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 cancelled:
   uuid: zzzzz-8i9sb-4cf0abc123e809j
@@ -362,6 +379,7 @@ cancelled:
     done: 1
   runtime_constraints: {}
   state: Cancelled
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 job_in_subproject:
   uuid: zzzzz-8i9sb-subprojectjob01
@@ -372,6 +390,7 @@ job_in_subproject:
   script: hash
   script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
   state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 running_will_be_completed:
   uuid: zzzzz-8i9sb-rshmckwoma9pjh8
@@ -396,6 +415,7 @@ running_will_be_completed:
     done: 1
   runtime_constraints: {}
   state: Running
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 graph_stage1:
   uuid: zzzzz-8i9sb-graphstage10000
@@ -405,6 +425,7 @@ graph_stage1:
   script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
   state: Complete
   output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
 
 graph_stage2:
   uuid: zzzzz-8i9sb-graphstage20000
@@ -417,6 +438,7 @@ graph_stage2:
     input: fa7aeb5140e2848d39b416daeef4ffc5+45
     input2: "stuff"
   output: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+  script_parameters_digest: 4900033ec5cfaf8a63566f3664aeaa70
 
 graph_stage3:
   uuid: zzzzz-8i9sb-graphstage30000
@@ -429,6 +451,7 @@ graph_stage3:
     input: fa7aeb5140e2848d39b416daeef4ffc5+45
     input2: "stuff2"
   output: ea10d51bcf88862dbcc36eb292017dfd+45
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
 
 job_with_latest_version:
   uuid: zzzzz-8i9sb-nj8ioxnrvjtyk2b
@@ -442,9 +465,9 @@ job_with_latest_version:
   supplied_script_version: master
   script_parameters:
     input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
-  created_at: <%= 4.minute.ago.to_s(:db) %>
-  started_at: <%= 3.minute.ago.to_s(:db) %>
-  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 2.minute.ago.to_s(:db) %>
+  finished_at: <%= 1.minute.ago.to_s(:db) %>
   running: false
   success: true
   output: fa7aeb5140e2848d39b416daeef4ffc5+45
@@ -458,6 +481,7 @@ job_with_latest_version:
     done: 1
   runtime_constraints: {}
   state: Complete
+  script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
 
 running_job_in_publicly_accessible_project:
   uuid: zzzzz-8i9sb-n7omg50bvt0m1nf
@@ -470,6 +494,7 @@ running_job_in_publicly_accessible_project:
   script_parameters:
     input: fa7aeb5140e2848d39b416daeef4ffc5+45
     input2: "stuff2"
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
 
 completed_job_in_publicly_accessible_project:
   uuid: zzzzz-8i9sb-jyq01m7in1jlofj
@@ -484,6 +509,7 @@ completed_job_in_publicly_accessible_project:
     input2: "stuff2"
   log: zzzzz-4zz18-4en62shvi99lxd4
   output: b519d9cb706a29fc7ea24dbea2f05851+93
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
 
 job_in_publicly_accessible_project_but_other_objects_elsewhere:
   uuid: zzzzz-8i9sb-jyq01muyhgr4ofj
@@ -498,6 +524,7 @@ job_in_publicly_accessible_project_but_other_objects_elsewhere:
     input2: "stuff2"
   log: zzzzz-4zz18-fy296fx3hot09f7
   output: zzzzz-4zz18-bv31uwvy3neko21
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
 
 running_job_with_components:
   uuid: zzzzz-8i9sb-with2components
@@ -527,19 +554,4 @@ running_job_with_components:
   components:
     component1: zzzzz-8i9sb-jyq01m7in1jlofj
     component2: zzzzz-d1hrv-partdonepipelin
-
-# Test Helper trims the rest of the file
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
-
-# jobs in project_with_2_pipelines_and_60_jobs
-<% for i in 1..60 do %>
-job_<%=i%>_of_60:
-  uuid: zzzzz-8i9sb-oneof100jobs<%= i.to_s.rjust(3, '0') %>
-  created_at: <%= ((i+5)/5).minute.ago.to_s(:db) %>
-  owner_uuid: zzzzz-j7d0g-nnjobspipelines
-  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
-  state: Complete
-<% end %>
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
index 9179e6dff92a4c62a0271dd78786b98dc726fef4..d83cf967e5b4c46c42b3713b581c768612443a75 100644 (file)
@@ -1,7 +1,9 @@
-noop:
+noop: # nothing happened ...to the 'spectator' user
   id: 1
   uuid: zzzzz-xxxxx-pshmckwoma9plh7
+  owner_uuid: zzzzz-tpzed-000000000000000
   object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  object_owner_uuid: zzzzz-tpzed-000000000000000
   event_at: <%= 1.minute.ago.to_s(:db) %>
 
 admin_changes_repository2: # admin changes repository2, which is owned by active user
@@ -139,3 +141,117 @@ crunchstat_for_ancient_job:
   updated_at: 2013-11-07 23:33:42.347455000 Z
   modified_at: 2013-11-07 23:33:42.347455000 Z
   object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_ancient_container:
+  id: 12
+  uuid: zzzzz-57u5n-containerlog001
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer01
+  event_at: <%= 2.year.ago.to_s(:db) %>
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  modified_at: <%= 2.year.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_ancient_container:
+  id: 13
+  uuid: zzzzz-57u5n-containerlog002
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer01
+  event_at: <%= 2.year.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  modified_at: <%= 2.year.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_previous_container:
+  id: 14
+  uuid: zzzzz-57u5n-containerlog003
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer02
+  event_at: <%= 1.month.ago.to_s(:db) %>
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  modified_at: <%= 1.month.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_previous_container:
+  id: 15
+  uuid: zzzzz-57u5n-containerlog004
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer02
+  event_at: <%= 1.month.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  modified_at: <%= 1.month.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_running_container:
+  id: 16
+  uuid: zzzzz-57u5n-containerlog005
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer03
+  event_at: <%= 1.hour.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  modified_at: <%= 1.hour.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_running_container:
+  id: 17
+  uuid: zzzzz-57u5n-containerlog006
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer03
+  event_at: <%= 1.hour.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  modified_at: <%= 1.hour.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
index 04a200ddb08d38304926d8babeafe181f7d1752e..34dbe9603bcc9c53ec6fd15f7c78a4a082643dfc 100644 (file)
@@ -445,13 +445,13 @@ pipeline_<%=i%>_of_10:
           title: foo instance input
 <% end %>
 
-# pipelines in project_with_2_pipelines_and_100_jobs
+# pipelines in project_with_2_pipelines_and_60_crs
 <% for i in 1..2 do %>
-pipeline_<%=i%>_of_2_pipelines_and_100_jobs:
+pipeline_<%=i%>_of_2_pipelines_and_60_crs:
   name: pipeline_<%= i %>
   state: New
   uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
-  owner_uuid: zzzzz-j7d0g-nnjobspipelines
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
   created_at: <%= i.minute.ago.to_s(:db) %>
   components:
     foo:
diff --git a/services/api/test/fixtures/workflows.yml b/services/api/test/fixtures/workflows.yml
new file mode 100644 (file)
index 0000000..4badf9e
--- /dev/null
@@ -0,0 +1,46 @@
+workflow_with_definition_yml:
+  uuid: zzzzz-7fd4e-validworkfloyml
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Valid workflow with name and desc
+  description: this workflow has a valid definition yaml
+  definition: "name: foo\ndesc: bar"
+  created_at: 2016-08-15 12:00:00
+
+workflow_with_no_definition_yml:
+  uuid: zzzzz-7fd4e-validbutnoyml00
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Valid workflow with no definition yaml
+  description: this workflow does not have a definition yaml
+  created_at: 2016-08-15 12:00:00
+
+workflow_with_no_name_and_desc:
+  uuid: zzzzz-7fd4e-validnonamedesc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  definition: this is valid yaml
+  created_at: 2016-08-15 12:00:01
+
+workflow_with_input_specifications:
+  uuid: zzzzz-7fd4e-validwithinputs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Workflow with input specifications
+  description: this workflow has inputs specified
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  definition: |
+    cwlVersion: v1.0
+    class: CommandLineTool
+    baseCommand:
+    - echo
+    inputs:
+    - doc: a longer documentation string for this parameter (optional)
+      type: boolean
+      id: ex_boolean
+      label: a short label for this parameter (optional)
+      inputBinding:
+        position: 1
+    - type:
+      - 'null'
+      - boolean
+      id: ex_boolean_opt
+      inputBinding:
+        position: 1
+    outputs: []
index d9f7d96225a651c825544c4ded685d57fcad6777..cf1f5765b4d2460ae854356088d15bc7c8061add 100644 (file)
@@ -24,7 +24,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
   test 'cannot get auth with wrong token' do
     authorize_with :dispatch1
     c = containers(:queued)
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
 
     authorize_with :system_user
     get :auth, id: c.uuid
@@ -34,7 +34,7 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
   test 'get auth' do
     authorize_with :dispatch1
     c = containers(:queued)
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
     get :auth, id: c.uuid
     assert_response :success
     assert_operator 32, :<, json_response['api_token'].length
@@ -44,9 +44,47 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
   test 'no auth in container response' do
     authorize_with :dispatch1
     c = containers(:queued)
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
     get :show, id: c.uuid
     assert_response :success
     assert_nil json_response['auth']
   end
+
+  test "lock container" do
+    authorize_with :dispatch1
+    uuid = containers(:queued).uuid
+    post :lock, {id: uuid}
+    assert_response :success
+    container = Container.where(uuid: uuid).first
+    assert_equal 'Locked', container.state
+    assert_not_nil container.locked_by_uuid
+    assert_not_nil container.auth_uuid
+  end
+
+  test "unlock container" do
+    authorize_with :dispatch1
+    uuid = containers(:locked).uuid
+    post :unlock, {id: uuid}
+    assert_response :success
+    container = Container.where(uuid: uuid).first
+    assert_equal 'Queued', container.state
+    assert_nil container.locked_by_uuid
+    assert_nil container.auth_uuid
+  end
+
+  [
+    [:queued, :lock, :success, 'Locked'],
+    [:queued, :unlock, 422, 'Queued'],
+    [:locked, :lock, 422, 'Locked'],
+    [:running, :lock, 422, 'Running'],
+    [:running, :unlock, 422, 'Running'],
+  ].each do |fixture, action, response, state|
+    test "state transitions from #{fixture } to #{action}" do
+      authorize_with :dispatch1
+      uuid = containers(fixture).uuid
+      post action, {id: uuid}
+      assert_response response
+      assert_equal state, Container.where(uuid: uuid).first.state
+    end
+  end
 end
index 00846795b4d7f7501964d0b888ba87739ce6c9d7..10534a70610a8188d35863992f2810ac29195937 100644 (file)
@@ -423,4 +423,29 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     end
     assert_equal true, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)
   end
+
+  [
+    [['owner_uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 200,
+        'zzzzz-d1hrv-subprojpipeline', 'zzzzz-d1hrv-1xfj6xkicf2muk2'],
+    [["pipeline_instances.state", "not in", ["Complete", "Failed"]], 200,
+        'zzzzz-d1hrv-1xfj6xkicf2muk2', 'zzzzz-d1hrv-i3e77t9z5y8j9cc'],
+    [['container_requests.requesting_container_uuid', '=', nil], 200,
+        'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4requestercn2'],
+    [['container_requests.no_such_column', '=', nil], 422],
+    [['container_requests.', '=', nil], 422],
+    [['.requesting_container_uuid', '=', nil], 422],
+    [['no_such_table.uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 422],
+  ].each do |filter, expect_code, expect_uuid, not_expect_uuid|
+    test "get contents with '#{filter}' filter" do
+      authorize_with :active
+      get :contents, filters: [filter], format: :json
+      assert_response expect_code
+      if expect_code == 200
+        assert_not_empty json_response['items']
+        item_uuids = json_response['items'].collect {|item| item['uuid']}
+        assert_includes(item_uuids, expect_uuid)
+        assert_not_includes(item_uuids, not_expect_uuid)
+      end
+    end
+  end
 end
index 64d559107c19257ca7a954d323f42cba60c7a9c2..8007fd26f8c8b64bf1295d8c7be091fed42cc1d6 100644 (file)
@@ -19,8 +19,8 @@ class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
       script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
       repository: "active/foo",
       script_parameters: {
-        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
-        an_integer: '1'
+        an_integer: '1',
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
       }
     }
     assert_response :success
@@ -669,7 +669,7 @@ class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
     errors = json_response.fetch("errors", [])
     assert(errors.any?, "no errors assigned from #{params}")
     refute(errors.any? { |msg| msg =~ /^#<[A-Za-z]+: / },
-           "errors include raw exception")
+           "errors include raw exception: #{errors.inspect}")
     errors
   end
 
index 601f9a7af56f3f4260724eb8eae3bc28f5014ea7..b84c93df08a5993da973da4ebc05683ff08ad573 100644 (file)
@@ -390,7 +390,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
   test "job lock conflict" do
     authorize_with :active
     post :lock, {id: jobs(:running).uuid}
-    assert_response 403 # forbidden
+    assert_response 422 # invalid state transition
   end
 
   test 'reject invalid commit in remote repository' do
index 4251047cea6b74ece4d8e4b1473d554e59daeb7e..0bedc0726a08549711f3455f509778b1f9901de3 100644 (file)
@@ -57,6 +57,34 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
     assert_equal "arvados#collectionList", json_response['kind']
   end
 
+  test "get index with select= (valid attribute)" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => ['portable_data_hash'].to_json
+        }, auth(:active)
+    assert_response :success
+    assert json_response['items'][0].keys.include?('portable_data_hash')
+    assert not(json_response['items'][0].keys.include?('uuid'))
+  end
+
+  test "get index with select= (invalid attribute) responds 422" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => ['bogus'].to_json
+        }, auth(:active)
+    assert_response 422
+    assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+  end
+
+  test "get index with select= (invalid attribute type) responds 422" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => [['bogus']].to_json
+        }, auth(:active)
+    assert_response 422
+    assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+  end
+
   test "controller 404 response is json" do
     get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, auth(:active)
     assert_response 404
index 44b5e6e377b01d7a337f748804b0e95a8de31a2d..e4db862415835be5a430837a999d9e2aa75531ea 100644 (file)
@@ -1,9 +1,14 @@
 require 'test_helper'
 
 class PermissionsTest < ActionDispatch::IntegrationTest
+  include DbCurrentTime
   include CurrentApiClient  # for empty_collection
   fixtures :users, :groups, :api_client_authorizations, :collections
 
+  teardown do
+    User.invalidate_permissions_cache db_current_time.to_i
+  end
+
   test "adding and removing direct can_read links" do
     # try to read collection as spectator
     get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
@@ -341,11 +346,6 @@ class PermissionsTest < ActionDispatch::IntegrationTest
     assert_response 404
   end
 
-  test "get_permissions returns 404 for unreadable uuid" do
-    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
-    assert_response 404
-  end
-
   test "get_permissions returns 403 if user can read but not manage" do
     post "/arvados/v1/links", {
       :link => {
index 98ae103d1a464fbb550ba46f8c3669736eae3981..99ca7ac960b3dac2fc4e0f9b82d89949afd6e76c 100644 (file)
@@ -23,9 +23,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
 
     EM.run {
       if token
-        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket?api_token=#{api_client_authorizations(token).api_token}")
+        ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket?api_token=#{api_client_authorizations(token).api_token}")
       else
-        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket")
+        ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket")
       end
 
       ws.on :open do |event|
@@ -69,7 +69,7 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   test "connect, subscribe and get response" do
     status = nil
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe'}.to_json)
       end
@@ -89,9 +89,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     spec = nil
     ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe'}.to_json)
       end
@@ -126,9 +126,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     spec_ev_uuid = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe'}.to_json)
       end
@@ -166,9 +166,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     human = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
       end
@@ -204,9 +204,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     spec_ev_uuid = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
         ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#specimen']]}.to_json)
@@ -249,9 +249,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     state = 1
     t1 = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#trait'], ['event_type', '=', 'update']]}.to_json)
       end
@@ -285,13 +285,13 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     human = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
     lastid = logs(:admin_changes_specimen).id
     l1 = nil
     l2 = nil
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe', last_log_id: lastid}.to_json)
       end
@@ -329,9 +329,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     spec_ev_uuid = nil
     filter_id = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin, false do |ws|
+    ws_helper :active, false do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe'}.to_json)
         EM::Timer.new 3 do
@@ -378,9 +378,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     spec = nil
     spec_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin, false do |ws|
+    ws_helper :active, false do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
         EM::Timer.new 6 do
@@ -430,9 +430,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     human = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'subscribe'}.to_json)
       end
@@ -477,9 +477,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
 
   test "connected, not subscribed, no event" do
     slow_test
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin, false do |ws|
+    ws_helper :active, false do |ws|
       ws.on :open do |event|
         EM::Timer.new 1 do
           Specimen.create
@@ -530,7 +530,7 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   test "connect, try bogus method" do
     status = nil
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({method: 'frobnabble'}.to_json)
       end
@@ -548,7 +548,7 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   test "connect, missing method" do
     status = nil
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send ({fizzbuzz: 'frobnabble'}.to_json)
       end
@@ -566,7 +566,7 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   test "connect, send malformed request" do
     status = nil
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         ws.send '<XML4EVER></XML4EVER>'
       end
@@ -585,9 +585,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
   test "connect, try subscribe too many filters" do
     state = 1
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         (1..17).each do |i|
           ws.send ({method: 'subscribe', filters: [['object_uuid', '=', i]]}.to_json)
@@ -618,9 +618,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     event_count = 0
     log_start = Log.order(:id).last.id
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin, false do |ws|
+    ws_helper :active, false do |ws|
       EM::Timer.new 45 do
         # Needs a longer timeout than the default
         ws.close
@@ -661,9 +661,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     human = nil
     human_ev_uuid = nil
 
-    authorize_with :admin
+    authorize_with :active
 
-    ws_helper :admin do |ws|
+    ws_helper :active do |ws|
       ws.on :open do |event|
         # test that #6451 is fixed (invalid filter crashes websockets)
         ws.send ({method: 'subscribe', filters: [['object_blarg', 'is_a', 'arvados#human']]}.to_json)
diff --git a/services/api/test/tasks/delete_old_container_logs_test.rb b/services/api/test/tasks/delete_old_container_logs_test.rb
new file mode 100644 (file)
index 0000000..82a5752
--- /dev/null
@@ -0,0 +1,50 @@
+require 'test_helper'
+require 'rake'
+
+Rake.application.rake_require "tasks/delete_old_container_logs"
+Rake::Task.define_task(:environment)
+
+class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase
+  TASK_NAME = "db:delete_old_container_logs"
+
+  def log_uuids(*fixture_names)
+    fixture_names.map { |name| logs(name).uuid }
+  end
+
+  def run_with_expiry(clean_after)
+    Rails.configuration.clean_container_log_rows_after = clean_after
+    Rake::Task[TASK_NAME].reenable
+    Rake.application.invoke_task TASK_NAME
+  end
+
+  def check_log_existence(test_method, fixture_uuids)
+    uuids_now = Log.where("object_uuid LIKE :pattern AND event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat')", pattern: "%-dz642-%").map(&:uuid)
+    fixture_uuids.each do |expect_uuid|
+      send(test_method, uuids_now, expect_uuid)
+    end
+  end
+
+  test "delete all finished logs" do
+    uuids_to_keep = log_uuids(:stderr_for_running_container,
+                              :crunchstat_for_running_container)
+    uuids_to_clean = log_uuids(:stderr_for_previous_container,
+                               :crunchstat_for_previous_container,
+                               :stderr_for_ancient_container,
+                               :crunchstat_for_ancient_container)
+    run_with_expiry(1)
+    check_log_existence(:assert_includes, uuids_to_keep)
+    check_log_existence(:refute_includes, uuids_to_clean)
+  end
+
+  test "delete old finished logs" do
+    uuids_to_keep = log_uuids(:stderr_for_running_container,
+                              :crunchstat_for_running_container,
+                              :stderr_for_previous_container,
+                              :crunchstat_for_previous_container)
+    uuids_to_clean = log_uuids(:stderr_for_ancient_container,
+                               :crunchstat_for_ancient_container)
+    run_with_expiry(360.days)
+    check_log_existence(:assert_includes, uuids_to_keep)
+    check_log_existence(:refute_includes, uuids_to_clean)
+  end
+end
index ef08c726ae2432481409fd054b12122fb4b7ce25..417ddf6bee8eeee96d8e960099ccc227cee4950a 100644 (file)
@@ -47,6 +47,7 @@ class ActiveSupport::TestCase
   fixtures :all
 
   include ArvadosTestSupport
+  include CurrentApiClient
 
   setup do
     Rails.logger.warn "\n\n#{'=' * 70}\n#{self.class}\##{method_name}\n#{'-' * 70}\n\n"
index df89b93bf4bba59b90bd11e0cff3afd41b921739..372d94a3bbe2ba9355cc9a6b4f8cec8a5548a051 100644 (file)
@@ -1,97 +1,43 @@
 require 'test_helper'
 
 class ContainerRequestTest < ActiveSupport::TestCase
-  def check_illegal_modify c
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.command = ["echo", "bar"]
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.container_image = "img2"
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.cwd = "/tmp2"
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.environment = {"FOO" => "BAR"}
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.mounts = {"FOO" => "BAR"}
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.output_path = "/tmp3"
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.runtime_constraints = {"FOO" => "BAR"}
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.name = "baz"
-        c.save!
-      end
-
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.description = "baz"
-        c.save!
-      end
-
+  def create_minimal_req! attrs={}
+    defaults = {
+      command: ["echo", "foo"],
+      container_image: links(:docker_image_collection_tag).name,
+      cwd: "/tmp",
+      environment: {},
+      mounts: {"/out" => {"kind" => "tmp", "capacity" => 1000000}},
+      output_path: "/out",
+      runtime_constraints: {"vcpus" => 1, "ram" => 2},
+      name: "foo",
+      description: "bar",
+    }
+    cr = ContainerRequest.create!(defaults.merge(attrs))
+    cr.reload
+    return cr
   end
 
-  def check_bogus_states c
-      assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.state = nil
-        c.save!
-      end
-
+  def check_bogus_states cr
+    [nil, "Flubber"].each do |state|
       assert_raises(ActiveRecord::RecordInvalid) do
-        c.reload
-        c.state = "Flubber"
-        c.save!
+        cr.state = state
+        cr.save!
       end
+      cr.reload
+    end
   end
 
   test "Container request create" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.command = ["echo", "foo"]
-    cr.container_image = "img"
-    cr.cwd = "/tmp"
-    cr.environment = {}
-    cr.mounts = {"BAR" => "FOO"}
-    cr.output_path = "/tmpout"
-    cr.runtime_constraints = {}
-    cr.name = "foo"
-    cr.description = "bar"
-    cr.save!
+    set_user_from_auth :active
+    cr = create_minimal_req!
 
     assert_nil cr.container_uuid
     assert_nil cr.priority
 
     check_bogus_states cr
 
-    cr.reload
+    # Ensure we can modify all attributes
     cr.command = ["echo", "foo3"]
     cr.container_image = "img3"
     cr.cwd = "/tmp3"
@@ -99,7 +45,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     cr.mounts = {"BAR" => "BAZ"}
     cr.output_path = "/tmp4"
     cr.priority = 2
-    cr.runtime_constraints = {"X" => "Y"}
+    cr.runtime_constraints = {"vcpus" => 4}
     cr.name = "foo3"
     cr.description = "bar3"
     cr.save!
@@ -107,21 +53,54 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_nil cr.container_uuid
   end
 
-  test "Container request priority must be non-nil" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.command = ["echo", "foo"]
-    cr.container_image = "img"
-    cr.cwd = "/tmp"
-    cr.environment = {}
-    cr.mounts = {"BAR" => "FOO"}
-    cr.output_path = "/tmpout"
-    cr.runtime_constraints = {}
-    cr.name = "foo"
-    cr.description = "bar"
-    cr.save!
+  [
+    {"vcpus" => 1},
+    {"vcpus" => 1, "ram" => nil},
+    {"vcpus" => 0, "ram" => 123},
+    {"vcpus" => "1", "ram" => "123"}
+  ].each do |invalid_constraints|
+    test "Create with #{invalid_constraints}" do
+      set_user_from_auth :active
+      assert_raises(ActiveRecord::RecordInvalid) do
+        cr = create_minimal_req!(state: "Committed",
+                                 priority: 1,
+                                 runtime_constraints: invalid_constraints)
+        cr.save!
+      end
+    end
 
-    cr.reload
+    test "Update with #{invalid_constraints}" do
+      set_user_from_auth :active
+      cr = create_minimal_req!(state: "Uncommitted", priority: 1)
+      cr.save!
+      assert_raises(ActiveRecord::RecordInvalid) do
+        cr = ContainerRequest.find_by_uuid cr.uuid
+        cr.update_attributes!(state: "Committed",
+                              runtime_constraints: invalid_constraints)
+      end
+    end
+  end
+
+  test "Update from fixture" do
+    set_user_from_auth :active
+    cr = ContainerRequest.find_by_uuid(container_requests(:running).uuid)
+    cr.update_attributes!(description: "New description")
+    assert_equal "New description", cr.description
+  end
+
+  test "Update with valid runtime constraints" do
+      set_user_from_auth :active
+      cr = create_minimal_req!(state: "Uncommitted", priority: 1)
+      cr.save!
+      cr = ContainerRequest.find_by_uuid cr.uuid
+      cr.update_attributes!(state: "Committed",
+                            runtime_constraints: {"vcpus" => 1, "ram" => 23})
+      assert_not_nil cr.container_uuid
+  end
+
+  test "Container request priority must be non-nil" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: nil)
     cr.state = "Committed"
     assert_raises(ActiveRecord::RecordInvalid) do
       cr.save!
@@ -129,37 +108,28 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "Container request commit" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.command = ["echo", "foo"]
-    cr.container_image = "img"
-    cr.cwd = "/tmp"
-    cr.environment = {}
-    cr.mounts = {"BAR" => "FOO"}
-    cr.output_path = "/tmpout"
-    cr.priority = 1
-    cr.runtime_constraints = {}
-    cr.name = "foo"
-    cr.description = "bar"
-    cr.save!
+    set_user_from_auth :active
+    cr = create_minimal_req!(runtime_constraints: {"vcpus" => 2, "ram" => 30})
 
-    cr.reload
     assert_nil cr.container_uuid
 
     cr.reload
     cr.state = "Committed"
+    cr.priority = 1
     cr.save!
 
     cr.reload
 
+    assert_not_nil cr.container_uuid
     c = Container.find_by_uuid cr.container_uuid
+    assert_not_nil c
     assert_equal ["echo", "foo"], c.command
-    assert_equal "img", c.container_image
+    assert_equal collections(:docker_image).portable_data_hash, c.container_image
     assert_equal "/tmp", c.cwd
     assert_equal({}, c.environment)
-    assert_equal({"BAR" => "FOO"}, c.mounts)
-    assert_equal "/tmpout", c.output_path
-    assert_equal({}, c.runtime_constraints)
+    assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
+    assert_equal "/out", c.output_path
+    assert_equal({"vcpus" => 2, "ram" => 30}, c.runtime_constraints)
     assert_equal 1, c.priority
 
     assert_raises(ActiveRecord::RecordInvalid) do
@@ -174,181 +144,118 @@ class ContainerRequestTest < ActiveSupport::TestCase
     c.reload
     assert_equal 0, cr.priority
     assert_equal 0, c.priority
-
   end
 
 
   test "Container request max priority" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.state = "Committed"
-    cr.container_image = "img"
-    cr.command = ["foo", "bar"]
-    cr.output_path = "/tmp"
-    cr.cwd = "/tmp"
-    cr.priority = 5
-    cr.save!
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 5, state: "Committed")
 
     c = Container.find_by_uuid cr.container_uuid
     assert_equal 5, c.priority
 
-    cr2 = ContainerRequest.new
-    cr2.container_image = "img"
-    cr2.command = ["foo", "bar"]
-    cr2.output_path = "/tmp"
-    cr2.cwd = "/tmp"
+    cr2 = create_minimal_req!
     cr2.priority = 10
-    cr2.save!
-
+    cr2.state = "Committed"
+    cr2.container_uuid = cr.container_uuid
     act_as_system_user do
-      cr2.state = "Committed"
-      cr2.container_uuid = cr.container_uuid
       cr2.save!
     end
 
+    # cr and cr2 have priority 5 and 10, and are being satisfied by
+    # the same container c, so c's priority should be
+    # max(priority)=10.
     c.reload
     assert_equal 10, c.priority
 
-    cr2.reload
-    cr2.priority = 0
-    cr2.save!
+    cr2.update_attributes!(priority: 0)
 
     c.reload
     assert_equal 5, c.priority
 
-    cr.reload
-    cr.priority = 0
-    cr.save!
+    cr.update_attributes!(priority: 0)
 
     c.reload
     assert_equal 0, c.priority
-
   end
 
 
   test "Independent container requests" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.state = "Committed"
-    cr.container_image = "img"
-    cr.command = ["foo", "bar"]
-    cr.output_path = "/tmp"
-    cr.cwd = "/tmp"
-    cr.priority = 5
-    cr.save!
+    set_user_from_auth :active
+    cr1 = create_minimal_req!(command: ["foo", "1"], priority: 5, state: "Committed")
+    cr2 = create_minimal_req!(command: ["foo", "2"], priority: 10, state: "Committed")
 
-    cr2 = ContainerRequest.new
-    cr2.state = "Committed"
-    cr2.container_image = "img"
-    cr2.command = ["foo", "bar"]
-    cr2.output_path = "/tmp"
-    cr2.cwd = "/tmp"
-    cr2.priority = 10
-    cr2.save!
-
-    c = Container.find_by_uuid cr.container_uuid
-    assert_equal 5, c.priority
+    c1 = Container.find_by_uuid cr1.container_uuid
+    assert_equal 5, c1.priority
 
     c2 = Container.find_by_uuid cr2.container_uuid
     assert_equal 10, c2.priority
 
-    cr.priority = 0
-    cr.save!
+    cr1.update_attributes!(priority: 0)
 
-    c.reload
-    assert_equal 0, c.priority
+    c1.reload
+    assert_equal 0, c1.priority
 
     c2.reload
     assert_equal 10, c2.priority
   end
 
-
-  test "Container cancelled finalizes request" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.state = "Committed"
-    cr.container_image = "img"
-    cr.command = ["foo", "bar"]
-    cr.output_path = "/tmp"
-    cr.cwd = "/tmp"
-    cr.priority = 5
-    cr.save!
-
-    cr.reload
-    assert_equal "Committed", cr.state
-
-    c = Container.find_by_uuid cr.container_uuid
-    assert_equal "Queued", c.state
+  test "Request is finalized when its container is cancelled" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 1, state: "Committed")
 
     act_as_system_user do
-      c.state = "Cancelled"
-      c.save!
+      Container.find_by_uuid(cr.container_uuid).
+        update_attributes!(state: Container::Cancelled)
     end
 
     cr.reload
     assert_equal "Final", cr.state
-
   end
 
-
-  test "Container complete finalizes request" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.state = "Committed"
-    cr.container_image = "img"
-    cr.command = ["foo", "bar"]
-    cr.output_path = "/tmp"
-    cr.cwd = "/tmp"
-    cr.priority = 5
-    cr.save!
-
-    cr.reload
-    assert_equal "Committed", cr.state
-
-    c = Container.find_by_uuid cr.container_uuid
-    assert_equal Container::Queued, c.state
-
-    act_as_system_user do
-      c.update_attributes! state: Container::Locked
-      c.update_attributes! state: Container::Running
+  test "Request is finalized when its container is completed" do
+    set_user_from_auth :active
+    project = groups(:private)
+    cr = create_minimal_req!(owner_uuid: project.uuid,
+                             priority: 1,
+                             state: "Committed")
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c
     end
 
     cr.reload
     assert_equal "Committed", cr.state
 
     act_as_system_user do
-      c.update_attributes! state: Container::Complete
-      c.save!
+      c.update_attributes!(state: Container::Complete,
+                           output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
+                           log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')
     end
 
     cr.reload
     assert_equal "Final", cr.state
-
+    ['output', 'log'].each do |out_type|
+      pdh = Container.find_by_uuid(cr.container_uuid).send(out_type)
+      assert_equal(1, Collection.where(portable_data_hash: pdh,
+                                       owner_uuid: project.uuid).count,
+                   "Container #{out_type} should be copied to #{project.uuid}")
+    end
   end
 
   test "Container makes container request, then is cancelled" do
-    set_user_from_auth :active_trustedclient
-    cr = ContainerRequest.new
-    cr.state = "Committed"
-    cr.container_image = "img"
-    cr.command = ["foo", "bar"]
-    cr.output_path = "/tmp"
-    cr.cwd = "/tmp"
-    cr.priority = 5
-    cr.save!
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 5, state: "Committed")
 
     c = Container.find_by_uuid cr.container_uuid
     assert_equal 5, c.priority
 
-    cr2 = ContainerRequest.new
-    cr2.state = "Committed"
-    cr2.container_image = "img"
-    cr2.command = ["foo", "bar"]
-    cr2.output_path = "/tmp"
-    cr2.cwd = "/tmp"
-    cr2.priority = 10
-    cr2.requesting_container_uuid = c.uuid
-    cr2.save!
+    cr2 = create_minimal_req!
+    cr2.update_attributes!(priority: 10, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"])
+    cr2.reload
 
     c2 = Container.find_by_uuid cr2.container_uuid
     assert_equal 10, c2.priority
@@ -379,4 +286,161 @@ class ContainerRequestTest < ActiveSupport::TestCase
       assert_equal expected, cr.requesting_container_uuid
     end
   end
+
+  [[{"vcpus" => [2, nil]},
+    lambda { |resolved| resolved["vcpus"] == 2 }],
+   [{"vcpus" => [3, 7]},
+    lambda { |resolved| resolved["vcpus"] == 3 }],
+   [{"vcpus" => 4},
+    lambda { |resolved| resolved["vcpus"] == 4 }],
+   [{"ram" => [1000000000, 2000000000]},
+    lambda { |resolved| resolved["ram"] == 1000000000 }],
+   [{"ram" => [1234234234]},
+    lambda { |resolved| resolved["ram"] == 1234234234 }],
+  ].each do |rc, okfunc|
+    test "resolve runtime constraint range #{rc} to values" do
+      cr = ContainerRequest.new(runtime_constraints: rc)
+      resolved = cr.send :runtime_constraints_for_container
+      assert(okfunc.call(resolved),
+             "container runtime_constraints was #{resolved.inspect}")
+    end
+  end
+
+  [[{"/out" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+   [{"/out" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+  ].each do |mounts, okfunc|
+    test "resolve mounts #{mounts.inspect} to values" do
+      set_user_from_auth :active
+      cr = ContainerRequest.new(mounts: mounts)
+      resolved = cr.send :mounts_for_container
+      assert(okfunc.call(resolved),
+             "mounts_for_container returned #{resolved.inspect}")
+    end
+  end
+
+  test 'mount unreadable collection' do
+    set_user_from_auth :spectator
+    m = {
+      "/foo" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "path" => "/foo",
+      },
+    }
+    cr = ContainerRequest.new(mounts: m)
+    assert_raises(ArvadosModel::UnresolvableContainerError) do
+      cr.send :mounts_for_container
+    end
+  end
+
+  test 'mount collection with mismatched UUID and PDH' do
+    set_user_from_auth :active
+    m = {
+      "/foo" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "portable_data_hash" => "fa7aeb5140e2848d39b416daeef4ffc5+45",
+        "path" => "/foo",
+      },
+    }
+    cr = ContainerRequest.new(mounts: m)
+    assert_raises(ArgumentError) do
+      cr.send :mounts_for_container
+    end
+  end
+
+  ['arvados/apitestfixture:latest',
+   'arvados/apitestfixture',
+   'd8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678',
+  ].each do |tag|
+    test "container_image_for_container(#{tag.inspect})" do
+      set_user_from_auth :active
+      cr = ContainerRequest.new(container_image: tag)
+      resolved = cr.send :container_image_for_container
+      assert_equal resolved, collections(:docker_image).portable_data_hash
+    end
+  end
+
+  test "container_image_for_container(pdh)" do
+    set_user_from_auth :active
+    pdh = collections(:docker_image).portable_data_hash
+    cr = ContainerRequest.new(container_image: pdh)
+    resolved = cr.send :container_image_for_container
+    assert_equal resolved, pdh
+  end
+
+  ['acbd18db4cc2f85cedef654fccc4a4d8+3',
+   'ENOEXIST',
+   'arvados/apitestfixture:ENOEXIST',
+  ].each do |img|
+    test "container_image_for_container(#{img.inspect}) => 422" do
+      set_user_from_auth :active
+      cr = ContainerRequest.new(container_image: img)
+      assert_raises(ArvadosModel::UnresolvableContainerError) do
+        cr.send :container_image_for_container
+      end
+    end
+  end
+
+  test "requestor can retrieve container owned by dispatch" do
+    assert_not_empty Container.readable_by(users(:admin)).where(uuid: containers(:running).uuid)
+    assert_not_empty Container.readable_by(users(:active)).where(uuid: containers(:running).uuid)
+    assert_empty Container.readable_by(users(:spectator)).where(uuid: containers(:running).uuid)
+  end
+
+  [
+    [{"var" => "value1"}, {"var" => "value1"}],
+    [{"var" => "value1"}, {"var" => "value2"}]
+  ].each do |env1, env2|
+    test "Container request #{(env1 == env2) ? 'does' : 'does not'} reuse container when committed" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      runtime_constraints: {"vcpus" => 4,
+                                            "ram" => 12000000000},
+                      mounts: {"test" => {"kind" => "json"}}}
+      set_user_from_auth :active
+      cr1 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Committed,
+                                                    environment: env1}))
+      cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,
+                                                    environment: env2}))
+      assert_not_nil cr1.container_uuid
+      assert_nil cr2.container_uuid
+
+      # Update cr2 to commited state and check for container equality on both cases,
+      # when env1 and env2 are equal the same container should be assigned, and
+      # when env1 and env2 are different, cr2 container should be different.
+      cr2.update_attributes!({state: ContainerRequest::Committed})
+      assert_equal (env1 == env2), (cr1.container_uuid == cr2.container_uuid)
+    end
+  end
+
+  test "requesting_container_uuid at create is not allowed" do
+    set_user_from_auth :active
+    assert_raises(ActiveRecord::RecordNotSaved) do
+      create_minimal_req!(state: "Uncommitted", priority: 1, requesting_container_uuid: 'youcantdothat')
+    end
+  end
 end
index 9cc098117f68f3434b80ca7ee557f1ecfa89fa51..8894ed9d4c0e16dc32a8b5bbc95d0a1371d3f41f 100644 (file)
@@ -8,8 +8,18 @@ class ContainerTest < ActiveSupport::TestCase
     container_image: 'img',
     output_path: '/tmp',
     priority: 1,
+    runtime_constraints: {"vcpus" => 1, "ram" => 1},
   }
 
+  REUSABLE_COMMON_ATTRS = {container_image: "test",
+                           cwd: "test",
+                           command: ["echo", "hello"],
+                           output_path: "test",
+                           runtime_constraints: {"vcpus" => 4,
+                                                 "ram" => 12000000000},
+                           mounts: {"test" => {"kind" => "json"}},
+                           environment: {"var" => 'val'}}
+
   def minimal_new attrs={}
     cr = ContainerRequest.new DEFAULT_ATTRS.merge(attrs)
     act_as_user users(:active) do
@@ -66,7 +76,7 @@ class ContainerTest < ActiveSupport::TestCase
                       mounts: {"BAR" => "FOO"},
                       output_path: "/tmp",
                       priority: 1,
-                      runtime_constraints: {})
+                      runtime_constraints: {"vcpus" => 1, "ram" => 1})
 
       check_illegal_modify c
       check_bogus_states c
@@ -77,6 +87,228 @@ class ContainerTest < ActiveSupport::TestCase
     end
   end
 
+  test "Container serialized hash attributes sorted before save" do
+    env = {"C" => 3, "B" => 2, "A" => 1}
+    m = {"F" => 3, "E" => 2, "D" => 1}
+    rc = {"vcpus" => 1, "ram" => 1}
+    c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
+    assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
+    assert_equal c.mounts.to_json, Container.deep_sort_hash(m).to_json
+    assert_equal c.runtime_constraints.to_json, Container.deep_sort_hash(rc).to_json
+  end
+
+  test 'deep_sort_hash on array of hashes' do
+    a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}
+    b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}
+    assert_equal Container.deep_sort_hash(a).to_json, Container.deep_sort_hash(b).to_json
+  end
+
+  test "find_reusable method should select higher priority queued container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
+    c_low_priority, _ = minimal_new(common_attrs.merge({priority:1}))
+    c_high_priority, _ = minimal_new(common_attrs.merge({priority:2}))
+    assert_equal Container::Queued, c_low_priority.state
+    assert_equal Container::Queued, c_high_priority.state
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_high_priority.uuid
+  end
+
+  test "find_reusable method should select latest completed container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}})
+    completed_attrs = {
+      state: Container::Complete,
+      exit_code: 0,
+      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+      output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    }
+
+    c_older, _ = minimal_new(common_attrs)
+    c_recent, _ = minimal_new(common_attrs)
+
+    set_user_from_auth :dispatch1
+    c_older.update_attributes!({state: Container::Locked})
+    c_older.update_attributes!({state: Container::Running})
+    c_older.update_attributes!(completed_attrs)
+
+    c_recent.update_attributes!({state: Container::Locked})
+    c_recent.update_attributes!({state: Container::Running})
+    c_recent.update_attributes!(completed_attrs)
+
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_older.uuid
+  end
+
+  test "find_reusable method should not select completed container when inconsistent outputs exist" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}})
+    completed_attrs = {
+      state: Container::Complete,
+      exit_code: 0,
+      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+    }
+
+    c_output1, _ = minimal_new(common_attrs)
+    c_output2, _ = minimal_new(common_attrs)
+
+    set_user_from_auth :dispatch1
+    c_output1.update_attributes!({state: Container::Locked})
+    c_output1.update_attributes!({state: Container::Running})
+    c_output1.update_attributes!(completed_attrs.merge({output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}))
+
+    c_output2.update_attributes!({state: Container::Locked})
+    c_output2.update_attributes!({state: Container::Running})
+    c_output2.update_attributes!(completed_attrs.merge({output: 'fa7aeb5140e2848d39b416daeef4ffc5+45'}))
+
+    reused = Container.find_reusable(common_attrs)
+    assert_nil reused
+  end
+
+  test "find_reusable method should select running container by start date" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running"}})
+    c_slower, _ = minimal_new(common_attrs)
+    c_faster_started_first, _ = minimal_new(common_attrs)
+    c_faster_started_second, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_slower.update_attributes!({state: Container::Locked})
+    c_slower.update_attributes!({state: Container::Running,
+                                 progress: 0.1})
+    c_faster_started_first.update_attributes!({state: Container::Locked})
+    c_faster_started_first.update_attributes!({state: Container::Running,
+                                               progress: 0.15})
+    c_faster_started_second.update_attributes!({state: Container::Locked})
+    c_faster_started_second.update_attributes!({state: Container::Running,
+                                                progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    # Selected container is the one that started first
+    assert_equal reused.uuid, c_faster_started_first.uuid
+  end
+
+  test "find_reusable method should select running container by progress" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running2"}})
+    c_slower, _ = minimal_new(common_attrs)
+    c_faster_started_first, _ = minimal_new(common_attrs)
+    c_faster_started_second, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_slower.update_attributes!({state: Container::Locked})
+    c_slower.update_attributes!({state: Container::Running,
+                                 progress: 0.1})
+    c_faster_started_first.update_attributes!({state: Container::Locked})
+    c_faster_started_first.update_attributes!({state: Container::Running,
+                                               progress: 0.15})
+    c_faster_started_second.update_attributes!({state: Container::Locked})
+    c_faster_started_second.update_attributes!({state: Container::Running,
+                                                progress: 0.2})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    # Selected container is the one with most progress done
+    assert_equal reused.uuid, c_faster_started_second.uuid
+  end
+
+  test "find_reusable method should select locked container most likely to start sooner" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "locked"}})
+    c_low_priority, _ = minimal_new(common_attrs)
+    c_high_priority_older, _ = minimal_new(common_attrs)
+    c_high_priority_newer, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_low_priority.update_attributes!({state: Container::Locked,
+                                       priority: 1})
+    c_high_priority_older.update_attributes!({state: Container::Locked,
+                                              priority: 2})
+    c_high_priority_newer.update_attributes!({state: Container::Locked,
+                                              priority: 2})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_high_priority_older.uuid
+  end
+
+  test "find_reusable method should select running over failed container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed_vs_running"}})
+    c_failed, _ = minimal_new(common_attrs)
+    c_running, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_failed.update_attributes!({state: Container::Locked})
+    c_failed.update_attributes!({state: Container::Running})
+    c_failed.update_attributes!({state: Container::Complete,
+                                 exit_code: 42,
+                                 log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+                                 output: 'ea10d51bcf88862dbcc36eb292017dfd+45'})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_running.uuid
+  end
+
+  test "find_reusable method should select complete over running container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "completed_vs_running"}})
+    c_completed, _ = minimal_new(common_attrs)
+    c_running, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_completed.update_attributes!({state: Container::Locked})
+    c_completed.update_attributes!({state: Container::Running})
+    c_completed.update_attributes!({state: Container::Complete,
+                                    exit_code: 0,
+                                    log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+                                    output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal c_completed.uuid, reused.uuid
+  end
+
+  test "find_reusable method should select running over locked container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
+    c_locked, _ = minimal_new(common_attrs)
+    c_running, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_locked.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_running.uuid
+  end
+
+  test "find_reusable method should select locked over queued container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
+    c_locked, _ = minimal_new(common_attrs)
+    c_queued, _ = minimal_new(common_attrs)
+    set_user_from_auth :dispatch1
+    c_locked.update_attributes!({state: Container::Locked})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_locked.uuid
+  end
+
+  test "find_reusable method should not select failed container" do
+    set_user_from_auth :active
+    attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed"}})
+    c, _ = minimal_new(attrs)
+    set_user_from_auth :dispatch1
+    c.update_attributes!({state: Container::Locked})
+    c.update_attributes!({state: Container::Running})
+    c.update_attributes!({state: Container::Complete,
+                          exit_code: 33})
+    reused = Container.find_reusable(attrs)
+    assert_nil reused
+  end
+
   test "Container running" do
     c, _ = minimal_new priority: 1
 
@@ -84,7 +316,7 @@ class ContainerTest < ActiveSupport::TestCase
     check_illegal_updates c, [{state: Container::Running},
                               {state: Container::Complete}]
 
-    c.update_attributes! state: Container::Locked
+    c.lock
     c.update_attributes! state: Container::Running
 
     check_illegal_modify c
@@ -102,7 +334,7 @@ class ContainerTest < ActiveSupport::TestCase
     set_user_from_auth :dispatch1
     assert_equal Container::Queued, c.state
 
-    refute c.update_attributes(state: Container::Locked), "no priority"
+    assert_raise(ActiveRecord::RecordInvalid) {c.lock} # "no priority"
     c.reload
     assert cr.update_attributes priority: 1
 
@@ -111,11 +343,14 @@ class ContainerTest < ActiveSupport::TestCase
     refute c.update_attributes(state: Container::Complete), "not locked"
     c.reload
 
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
     assert c.locked_by_uuid
     assert c.auth_uuid
 
-    assert c.update_attributes(state: Container::Queued), show_errors(c)
+    assert_raise(ArvadosModel::AlreadyLockedError) {c.lock}
+    c.reload
+
+    assert c.unlock, show_errors(c)
     refute c.locked_by_uuid
     refute c.auth_uuid
 
@@ -124,16 +359,16 @@ class ContainerTest < ActiveSupport::TestCase
     refute c.locked_by_uuid
     refute c.auth_uuid
 
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
     assert c.update_attributes(state: Container::Running), show_errors(c)
     assert c.locked_by_uuid
     assert c.auth_uuid
 
     auth_uuid_was = c.auth_uuid
 
-    refute c.update_attributes(state: Container::Locked), "already running"
+    assert_raise(ActiveRecord::RecordInvalid) {c.lock} # Running to Locked is not allowed
     c.reload
-    refute c.update_attributes(state: Container::Queued), "already running"
+    assert_raise(ActiveRecord::RecordInvalid) {c.unlock} # Running to Queued is not allowed
     c.reload
 
     assert c.update_attributes(state: Container::Complete), show_errors(c)
@@ -154,7 +389,7 @@ class ContainerTest < ActiveSupport::TestCase
   test "Container locked cancel" do
     c, _ = minimal_new
     set_user_from_auth :dispatch1
-    assert c.update_attributes(state: Container::Locked), show_errors(c)
+    assert c.lock, show_errors(c)
     assert c.update_attributes(state: Container::Cancelled), show_errors(c)
     check_no_change_from_cancelled c
   end
@@ -162,8 +397,7 @@ class ContainerTest < ActiveSupport::TestCase
   test "Container running cancel" do
     c, _ = minimal_new
     set_user_from_auth :dispatch1
-    c.update_attributes! state: Container::Queued
-    c.update_attributes! state: Container::Locked
+    c.lock
     c.update_attributes! state: Container::Running
     c.update_attributes! state: Container::Cancelled
     check_no_change_from_cancelled c
@@ -185,7 +419,7 @@ class ContainerTest < ActiveSupport::TestCase
   test "Container only set exit code on complete" do
     c, _ = minimal_new
     set_user_from_auth :dispatch1
-    c.update_attributes! state: Container::Locked
+    c.lock
     c.update_attributes! state: Container::Running
 
     check_illegal_updates c, [{exit_code: 1},
index 832338a3cc5de1ce742eb5088992ba91d4fe5fdc..3da2c836ed61579fe1d5058e4c367db2c9dd3eab 100644 (file)
@@ -440,4 +440,48 @@ class JobTest < ActiveSupport::TestCase
     assert_equal('077ba2ad3ea24a929091a9e6ce545c93199b8e57',
                  internal_tag(j.uuid))
   end
+
+  test 'script_parameters_digest is independent of key order' do
+    j1 = Job.new(job_attrs(script_parameters: {'a' => 'a', 'ddee' => {'d' => 'd', 'e' => 'e'}}))
+    j2 = Job.new(job_attrs(script_parameters: {'ddee' => {'e' => 'e', 'd' => 'd'}, 'a' => 'a'}))
+    assert j1.valid?
+    assert j2.valid?
+    assert_equal(j1.script_parameters_digest, j2.script_parameters_digest)
+  end
+
+  test 'job fixtures have correct script_parameters_digest' do
+    Job.all.each do |j|
+      d = j.script_parameters_digest
+      assert_equal(j.update_script_parameters_digest, d,
+                   "wrong script_parameters_digest for #{j.uuid}")
+    end
+  end
+
+  test 'deep_sort_hash on array of hashes' do
+    a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}
+    b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}
+    assert_equal Job.deep_sort_hash(a).to_json, Job.deep_sort_hash(b).to_json
+  end
+
+  test 'find_reusable' do
+    foobar = jobs(:foobar)
+    example_attrs = {
+      script_version: foobar.script_version,
+      script: foobar.script,
+      script_parameters: foobar.script_parameters,
+      repository: foobar.repository,
+    }
+
+    # Two matching jobs exist with identical outputs. The older one
+    # should be reused.
+    j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
+    assert j
+    assert_equal foobar.uuid, j.uuid
+
+    # Two matching jobs exist with different outputs. Neither should
+    # be reused.
+    Job.where(uuid: jobs(:job_with_latest_version).uuid).
+      update_all(output: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1')
+    assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
+  end
 end
index 22808c5ed6f8718d1f0b4cfb117562822e556c1f..632271e98c263efad7a5869e1831da69ceaf3b97 100644 (file)
@@ -9,7 +9,7 @@ class LogTest < ActiveSupport::TestCase
     :destroy => [nil, :assert_not_nil, :assert_nil],
   }
 
-  def setup
+  setup do
     @start_time = Time.now
     @log_count = 1
   end
@@ -54,12 +54,13 @@ class LogTest < ActiveSupport::TestCase
     yield props if block_given?
   end
 
-  def assert_auth_logged_with_clean_properties(auth, event_type)
-    assert_logged(auth, event_type) do |props|
-      ['old_attributes', 'new_attributes'].map { |k| props[k] }.compact
-        .each do |attributes|
-        refute_includes(attributes, 'api_token',
-                        "auth log properties include sensitive API token")
+  def assert_logged_with_clean_properties(obj, event_type, excluded_attr)
+    assert_logged(obj, event_type) do |props|
+      ['old_attributes', 'new_attributes'].map do |logattr|
+        attributes = props[logattr]
+        next if attributes.nil?
+        refute_includes(attributes, excluded_attr,
+                        "log #{logattr} includes #{excluded_attr}")
       end
       yield props if block_given?
     end
@@ -224,12 +225,12 @@ class LogTest < ActiveSupport::TestCase
     auth.user = users(:spectator)
     auth.api_client = api_clients(:untrusted)
     auth.save!
-    assert_auth_logged_with_clean_properties(auth, :create)
+    assert_logged_with_clean_properties(auth, :create, 'api_token')
     auth.expires_at = Time.now
     auth.save!
-    assert_auth_logged_with_clean_properties(auth, :update)
+    assert_logged_with_clean_properties(auth, :update, 'api_token')
     auth.destroy
-    assert_auth_logged_with_clean_properties(auth, :destroy)
+    assert_logged_with_clean_properties(auth, :destroy, 'api_token')
   end
 
   test "use ownership and permission links to determine which logs a user can see" do
@@ -252,7 +253,8 @@ class LogTest < ActiveSupport::TestCase
                                       :crunchstat_for_running_job] # log & job owned by active
 
     c = Log.readable_by(users(:spectator)).order("id asc").each.to_a
-    assert_log_result c, known_logs, [:admin_changes_specimen, # owned by spectator
+    assert_log_result c, known_logs, [:noop,                   # object_uuid is spectator
+                                      :admin_changes_specimen, # object_uuid is a specimen owned by spectator
                                       :system_adds_baz] # readable via 'all users' group
   end
 
@@ -269,4 +271,40 @@ class LogTest < ActiveSupport::TestCase
       refute_includes result_ids, logs(notwant).id
     end
   end
+
+  test "non-empty configuration.unlogged_attributes" do
+    Rails.configuration.unlogged_attributes = ["manifest_text"]
+    txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+    act_as_system_user do
+      coll = Collection.create(manifest_text: txt)
+      assert_logged_with_clean_properties(coll, :create, 'manifest_text')
+      coll.name = "testing"
+      coll.save!
+      assert_logged_with_clean_properties(coll, :update, 'manifest_text')
+      coll.destroy
+      assert_logged_with_clean_properties(coll, :destroy, 'manifest_text')
+    end
+  end
+
+  test "empty configuration.unlogged_attributes" do
+    Rails.configuration.unlogged_attributes = []
+    txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+    act_as_system_user do
+      coll = Collection.create(manifest_text: txt)
+      assert_logged(coll, :create) do |props|
+        assert_equal(txt, props['new_attributes']['manifest_text'])
+      end
+      coll.update_attributes!(name: "testing")
+      assert_logged(coll, :update) do |props|
+        assert_equal(txt, props['old_attributes']['manifest_text'])
+        assert_equal(txt, props['new_attributes']['manifest_text'])
+      end
+      coll.destroy
+      assert_logged(coll, :destroy) do |props|
+        assert_equal(txt, props['old_attributes']['manifest_text'])
+      end
+    end
+  end
 end
index 4a6ddc69fbcb1703c3234e4e6e1360a779b2f4b3..79fc1f29c7bf46a2f1efb3ae8f9dd298f0222015 100644 (file)
@@ -353,4 +353,27 @@ class PermissionTest < ActiveSupport::TestCase
       ob.update_attributes!(owner_uuid: groups(:aproject).uuid)
     end
   end
+
+  def container_logs(container, user)
+    Log.readable_by(users(user)).
+      where(object_uuid: containers(container).uuid, event_type: "test")
+  end
+
+  test "container logs created by dispatch are visible to container requestor" do
+    set_user_from_auth :dispatch1
+    Log.create!(object_uuid: containers(:running).uuid,
+                event_type: "test")
+
+    assert_not_empty container_logs(:running, :admin)
+    assert_not_empty container_logs(:running, :active)
+    assert_empty container_logs(:running, :spectator)
+  end
+
+  test "container logs created by dispatch are public if container request is public" do
+    set_user_from_auth :dispatch1
+    Log.create!(object_uuid: containers(:running_older).uuid,
+                event_type: "test")
+
+    assert_not_empty container_logs(:running_older, :anonymous)
+  end
 end
index 288e1184fa2be2cd2ab955edeff6356f5d6e5cd3..3fb0cce60ad4011b1749b6cea84395a5a3e784d9 100644 (file)
@@ -35,8 +35,8 @@ class RepositoryTest < ActiveSupport::TestCase
 
   {active: "active/", admin: "admin/", system_user: ""}.
       each_pair do |user_sym, name_prefix|
-    %w(a aa a0 aA Aa AA A0).each do |name|
-      test "'#{name_prefix}#{name}' is a valid name for #{user_sym} repo" do
+    test "valid names for #{user_sym} repo" do
+      %w(a aa a0 aA Aa AA A0).each do |name|
         repo = new_repo(user_sym, name: name_prefix + name)
         assert(repo.valid?)
       end
@@ -51,8 +51,8 @@ class RepositoryTest < ActiveSupport::TestCase
       refute(repo.valid?)
     end
 
-    "\\.-_/!@#$%^&*()[]{}".each_char do |bad_char|
-      test "name containing #{bad_char.inspect} is invalid for #{user_sym}" do
+    test "name containing bad char is invalid for #{user_sym}" do
+      "\\.-_/!@#$%^&*()[]{}".each_char do |bad_char|
         repo = new_repo(user_sym, name: "#{name_prefix}bad#{bad_char}reponame")
         refute(repo.valid?)
       end
index b96645ce263d2cb97b6028693b88e2334df51819..4df6cc0b369a67cd99907cb128979a4cf1c21956 100644 (file)
@@ -93,6 +93,16 @@ class UserTest < ActiveSupport::TestCase
   test "new username set with deduplication" do
     name = users(:active).username
     check_new_username_setting(name, "#{name}2")
+    check_new_username_setting(name, "#{name}3")
+    # Insert some out-of-order conflicts, to ensure our "sort by
+    # username, stop when we see a hole" strategy doesn't depend on
+    # insert order.
+    check_new_username_setting("#{name}13", "#{name}13")
+    check_new_username_setting("#{name}5", "#{name}5")
+    check_new_username_setting(name, "#{name}4")
+    6.upto(12).each do |n|
+      check_new_username_setting(name, "#{name}#{n}")
+    end
   end
 
   test "new username set avoiding blacklist" do
diff --git a/services/api/test/unit/workflow_test.rb b/services/api/test/unit/workflow_test.rb
new file mode 100644 (file)
index 0000000..c7c5288
--- /dev/null
@@ -0,0 +1,125 @@
+require 'test_helper'
+
+class WorkflowTest < ActiveSupport::TestCase
+  test "create workflow with no definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with valid definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "k1:\n v1: x\n v2: y"
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with simple string as definition" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "this is valid yaml"
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with invalid definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "k1:\n v1: x\n  v2: y"
+    }
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      Workflow.create! wf
+    end
+  end
+
+  test "update workflow with invalid definition yaml" do
+    set_user_from_auth :active
+
+    w = Workflow.find_by_uuid(workflows(:workflow_with_definition_yml).uuid)
+    definition = "k1:\n v1: x\n  v2: y"
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      w.update_attributes!(definition: definition)
+    end
+  end
+
+  test "update workflow and verify name and description" do
+    set_user_from_auth :active
+
+    # Workflow name and desc should be set with values from definition yaml
+    # when it does not already have custom values for these fields
+    w = Workflow.find_by_uuid(workflows(:workflow_with_no_name_and_desc).uuid)
+    definition = "name: test name 1\ndescription: test desc 1\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "test name 1", w.name
+    assert_equal "test desc 1", w.description
+
+    # Workflow name and desc should be set with values from definition yaml
+    # when it does not already have custom values for these fields
+    definition = "name: test name 2\ndescription: test desc 2\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "test name 2", w.name
+    assert_equal "test desc 2", w.description
+
+    # Workflow name and desc should be set with values from definition yaml
+    # even if it means emptying them out
+    definition = "more: etc"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal nil, w.name
+    assert_equal nil, w.description
+
+    # Workflow name and desc set using definition yaml should be cleared
+    # if definition yaml is cleared
+    definition = "name: test name 2\ndescription: test desc 2\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    definition = nil
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal nil, w.name
+    assert_equal nil, w.description
+
+    # Workflow name and desc should be set to provided custom values
+    definition = "name: test name 3\ndescription: test desc 3\nother: some more"
+    w.update_attributes!(name: "remains", description: "remains", definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+
+    # Workflow name and desc should retain provided custom values
+    # and should not be overwritten by values from yaml
+    definition = "name: test name 4\ndescription: test desc 4\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+
+    # Workflow name and desc should retain provided custom values
+    # and not be affected by the clearing of the definition yaml
+    definition = nil
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+  end
+end
index 65af8ce2bd9a732e1e7d2ace6772f618670cc5dc..be32a0f299d0b396b867c0ff9943fc3234da7ce3 100644 (file)
@@ -1,7 +1,12 @@
 require 'bundler'
+require 'socket'
 
 $ARV_API_SERVER_DIR = File.expand_path('../..', __FILE__)
-SERVER_PID_PATH = 'tmp/pids/passenger.3002.pid'
+
+s = TCPServer.new('0.0.0.0', 0)
+WEBSOCKET_PORT = s.addr[1]
+s.close
+SERVER_PID_PATH = "tmp/pids/passenger.#{WEBSOCKET_PORT}.pid"
 
 class WebsocketTestRunner < MiniTest::Unit
   def _system(*cmd)
@@ -15,7 +20,7 @@ class WebsocketTestRunner < MiniTest::Unit
   def _run(args=[])
     server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
       # Only passenger seems to be able to run the websockets server successfully.
-      _system('passenger', 'start', '-d', '-p3002')
+      _system('passenger', 'start', '-d', "-p#{WEBSOCKET_PORT}")
       timeout = Time.now.tv_sec + 10
       begin
         sleep 0.2
@@ -35,7 +40,7 @@ class WebsocketTestRunner < MiniTest::Unit
       super(args)
     ensure
       Dir.chdir($ARV_API_SERVER_DIR) do
-        _system('passenger', 'stop', '-p3002')
+        _system('passenger', 'stop', "-p#{WEBSOCKET_PORT}")
       end
       # DatabaseCleaner leaves the database empty. Prefer to leave it full.
       dc = DatabaseController.new
diff --git a/services/arv-git-httpd/arv-git-httpd.service b/services/arv-git-httpd/arv-git-httpd.service
new file mode 100644 (file)
index 0000000..f71c2ff
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Arvados git server
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/arvados-git-httpd/arvados-git-httpd.yml
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/arvados-git-httpd
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
index fccb0c9576864634481a2e69b7237def54b6f0ec..9f92cd1b7213f5a720a771ae349fac7dd6558b39 100644 (file)
@@ -5,6 +5,7 @@ import (
        "net/http"
        "os"
        "strings"
+       "sync"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
@@ -12,13 +13,24 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
 )
 
-var clientPool = arvadosclient.MakeClientPool()
-
 type authHandler struct {
-       handler http.Handler
+       handler    http.Handler
+       clientPool *arvadosclient.ClientPool
+       setupOnce  sync.Once
+}
+
+func (h *authHandler) setup() {
+       ac, err := arvadosclient.New(&theConfig.Client)
+       if err != nil {
+               log.Fatal(err)
+       }
+       h.clientPool = &arvadosclient.ClientPool{Prototype: ac}
+       log.Printf("%+v", h.clientPool.Prototype)
 }
 
 func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
+       h.setupOnce.Do(h.setup)
+
        var statusCode int
        var statusText string
        var apiToken string
@@ -68,12 +80,12 @@ func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        repoName = pathParts[0]
        repoName = strings.TrimRight(repoName, "/")
 
-       arv := clientPool.Get()
+       arv := h.clientPool.Get()
        if arv == nil {
-               statusCode, statusText = http.StatusInternalServerError, "connection pool failed: "+clientPool.Err().Error()
+               statusCode, statusText = http.StatusInternalServerError, "connection pool failed: "+h.clientPool.Err().Error()
                return
        }
-       defer clientPool.Put(arv)
+       defer h.clientPool.Put(arv)
 
        // Ask API server whether the repository is readable using
        // this token (by trying to read it!)
@@ -129,7 +141,7 @@ func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                "/" + repoName + "/.git",
        }
        for _, dir := range tryDirs {
-               if fileInfo, err := os.Stat(theConfig.Root + dir); err != nil {
+               if fileInfo, err := os.Stat(theConfig.RepoRoot + dir); err != nil {
                        if !os.IsNotExist(err) {
                                statusCode, statusText = http.StatusInternalServerError, err.Error()
                                return
@@ -141,7 +153,7 @@ func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        }
        if rewrittenPath == "" {
                log.Println("WARNING:", repoUUID,
-                       "git directory not found in", theConfig.Root, tryDirs)
+                       "git directory not found in", theConfig.RepoRoot, tryDirs)
                // We say "content not found" to disambiguate from the
                // earlier "API says that repo does not exist" error.
                statusCode, statusText = http.StatusNotFound, "content not found"
index 0312b296fc938da2f4950de7ca9240a5c0550825..f0b98fab72382dfa02c2b12a144e2a6b9f5190c4 100644 (file)
@@ -19,11 +19,11 @@ func newGitHandler() http.Handler {
        return &gitHandler{
                Handler: cgi.Handler{
                        Path: theConfig.GitCommand,
-                       Dir:  theConfig.Root,
+                       Dir:  theConfig.RepoRoot,
                        Env: []string{
-                               "GIT_PROJECT_ROOT=" + theConfig.Root,
+                               "GIT_PROJECT_ROOT=" + theConfig.RepoRoot,
                                "GIT_HTTP_EXPORT_ALL=",
-                               "SERVER_ADDR=" + theConfig.Addr,
+                               "SERVER_ADDR=" + theConfig.Listen,
                        },
                        InheritEnv: []string{
                                "PATH",
index 35c2f4884f4f99e2894c5125776edfb0db32895c..d87162dca3aa6f80ac16411c4a138e6286fc40e2 100644 (file)
@@ -37,7 +37,7 @@ func (s *GitHandlerSuite) TestEnvVars(c *check.C) {
        c.Check(body, check.Matches, `(?ms).*^GL_BYPASS_ACCESS_CHECKS=yesplease$.*`)
        c.Check(body, check.Matches, `(?ms).*^REMOTE_HOST=::1$.*`)
        c.Check(body, check.Matches, `(?ms).*^REMOTE_PORT=12345$.*`)
-       c.Check(body, check.Matches, `(?ms).*^SERVER_ADDR=`+regexp.QuoteMeta(theConfig.Addr)+`$.*`)
+       c.Check(body, check.Matches, `(?ms).*^SERVER_ADDR=`+regexp.QuoteMeta(theConfig.Listen)+`$.*`)
 }
 
 func (s *GitHandlerSuite) TestCGIErrorOnSplitHostPortError(c *check.C) {
index 20bdae7ec13a5534ebd4f69248869d4980688fa7..74c2b8cf4d91a8ac3da2835b12e90a35e6dd0380 100644 (file)
@@ -6,6 +6,8 @@ import (
        "os/exec"
        "strings"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
 )
 
@@ -41,10 +43,14 @@ func (s *GitoliteSuite) SetUpTest(c *check.C) {
        runGitolite("gitolite", "setup", "--admin", "root")
 
        s.tmpRepoRoot = s.gitoliteHome + "/repositories"
-       s.Config = &config{
-               Addr:       ":0",
+       s.Config = &Config{
+               Client: arvados.Client{
+                       APIHost:  arvadostest.APIHost(),
+                       Insecure: true,
+               },
+               Listen:     ":0",
                GitCommand: "/usr/share/gitolite3/gitolite-shell",
-               Root:       s.tmpRepoRoot,
+               RepoRoot:   s.tmpRepoRoot,
        }
        s.IntegrationSuite.SetUpTest(c)
 
@@ -62,6 +68,10 @@ func (s *GitoliteSuite) TearDownTest(c *check.C) {
        // upgrade to Go 1.4.
        os.Setenv("GITOLITE_HTTP_HOME", "")
        os.Setenv("GL_BYPASS_ACCESS_CHECKS", "")
+       if s.gitoliteHome != "" {
+               err := os.RemoveAll(s.gitoliteHome)
+               c.Check(err, check.Equals, nil)
+       }
        s.IntegrationSuite.TearDownTest(c)
 }
 
index 61d83ff8e85a0da8b6579b8a86ffd8c50d5b2551..5e55eca754838d97d2aaa8888482c686306a42cf 100644 (file)
@@ -8,6 +8,7 @@ import (
        "strings"
        "testing"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
 )
@@ -23,7 +24,7 @@ type IntegrationSuite struct {
        tmpRepoRoot string
        tmpWorkdir  string
        testServer  *server
-       Config      *config
+       Config      *Config
 }
 
 func (s *IntegrationSuite) SetUpSuite(c *check.C) {
@@ -67,19 +68,27 @@ func (s *IntegrationSuite) SetUpTest(c *check.C) {
        c.Assert(err, check.Equals, nil)
 
        if s.Config == nil {
-               s.Config = &config{
-                       Addr:       ":0",
+               s.Config = &Config{
+                       Client: arvados.Client{
+                               APIHost:  arvadostest.APIHost(),
+                               Insecure: true,
+                       },
+                       Listen:     ":0",
                        GitCommand: "/usr/bin/git",
-                       Root:       s.tmpRepoRoot,
+                       RepoRoot:   s.tmpRepoRoot,
                }
        }
+
+       // Clear ARVADOS_API_* env vars before starting up the server,
+       // to make sure arv-git-httpd doesn't use them or complain
+       // about them being missing.
+       os.Unsetenv("ARVADOS_API_HOST")
+       os.Unsetenv("ARVADOS_API_HOST_INSECURE")
+       os.Unsetenv("ARVADOS_API_TOKEN")
+
        theConfig = s.Config
        err = s.testServer.Start()
        c.Assert(err, check.Equals, nil)
-
-       // Clear ARVADOS_API_TOKEN after starting up the server, to
-       // make sure arv-git-httpd doesn't use it.
-       os.Setenv("ARVADOS_API_TOKEN", "unused-token-placates-client-library")
 }
 
 func (s *IntegrationSuite) TearDownTest(c *check.C) {
index 98695c9a9df806164afc59c128e204cb52547cc9..dd281366b29ac886365056ad8c2c4e2250a0d739 100644 (file)
@@ -1,49 +1,80 @@
 package main
 
 import (
+       "encoding/json"
        "flag"
        "log"
        "os"
+       "regexp"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/coreos/go-systemd/daemon"
 )
 
-type config struct {
-       Addr       string
+// Server configuration
+type Config struct {
+       Client     arvados.Client
+       Listen     string
        GitCommand string
-       Root       string
+       RepoRoot   string
 }
 
-var theConfig *config
+var theConfig = defaultConfig()
 
-func init() {
-       theConfig = &config{}
-       flag.StringVar(&theConfig.Addr, "address", "0.0.0.0:80",
-               "Address to listen on, \"host:port\".")
-       flag.StringVar(&theConfig.GitCommand, "git-command", "/usr/bin/git",
-               "Path to git or gitolite-shell executable. Each authenticated request will execute this program with a single argument, \"http-backend\".")
+func defaultConfig() *Config {
        cwd, err := os.Getwd()
        if err != nil {
                log.Fatalln("Getwd():", err)
        }
-       flag.StringVar(&theConfig.Root, "repo-root", cwd,
-               "Path to git repositories.")
-
-       // MakeArvadosClient returns an error if token is unset (even
-       // though we don't need to do anything requiring
-       // authentication yet). We can't do this in newArvadosClient()
-       // just before calling MakeArvadosClient(), though, because
-       // that interferes with the env var needed by "run test
-       // servers".
-       os.Setenv("ARVADOS_API_TOKEN", "xxx")
+       return &Config{
+               Listen:     ":80",
+               GitCommand: "/usr/bin/git",
+               RepoRoot:   cwd,
+       }
 }
 
-func main() {
+func init() {
+       const defaultCfgPath = "/etc/arvados/git-httpd/git-httpd.yml"
+       const deprecated = " (DEPRECATED -- use config file instead)"
+       flag.StringVar(&theConfig.Listen, "address", theConfig.Listen,
+               "Address to listen on, \"host:port\" or \":port\"."+deprecated)
+       flag.StringVar(&theConfig.GitCommand, "git-command", theConfig.GitCommand,
+               "Path to git or gitolite-shell executable. Each authenticated request will execute this program with a single argument, \"http-backend\"."+deprecated)
+       flag.StringVar(&theConfig.RepoRoot, "repo-root", theConfig.RepoRoot,
+               "Path to git repositories."+deprecated)
+
+       cfgPath := flag.String("config", defaultCfgPath, "Configuration file `path`.")
+       flag.Usage = usage
        flag.Parse()
+
+       err := config.LoadFile(theConfig, *cfgPath)
+       if err != nil {
+               h := os.Getenv("ARVADOS_API_HOST")
+               if h == "" || !os.IsNotExist(err) || *cfgPath != defaultCfgPath {
+                       log.Fatal(err)
+               }
+               log.Print("DEPRECATED: No config file found, but ARVADOS_API_HOST environment variable is set. Please use a config file instead.")
+               theConfig.Client.APIHost = h
+               if regexp.MustCompile("^(?i:1|yes|true)$").MatchString(os.Getenv("ARVADOS_API_HOST_INSECURE")) {
+                       theConfig.Client.Insecure = true
+               }
+               if j, err := json.MarshalIndent(theConfig, "", "    "); err == nil {
+                       log.Print("Current configuration:\n", string(j))
+               }
+       }
+}
+
+func main() {
        srv := &server{}
        if err := srv.Start(); err != nil {
                log.Fatal(err)
        }
+       if _, err := daemon.SdNotify("READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
        log.Println("Listening at", srv.Addr)
-       log.Println("Repository root", theConfig.Root)
+       log.Println("Repository root", theConfig.RepoRoot)
        if err := srv.Wait(); err != nil {
                log.Fatal(err)
        }
index 40e77a812a6ff4c04a524ca76b819555b9e6e69e..e2311d22e876861ecbe8749b8f552aff2c2c2871 100644 (file)
@@ -12,8 +12,8 @@ type server struct {
 
 func (srv *server) Start() error {
        mux := http.NewServeMux()
-       mux.Handle("/", &authHandler{newGitHandler()})
+       mux.Handle("/", &authHandler{handler: newGitHandler()})
        srv.Handler = mux
-       srv.Addr = theConfig.Addr
+       srv.Addr = theConfig.Listen
        return srv.Server.Start()
 }
diff --git a/services/arv-git-httpd/usage.go b/services/arv-git-httpd/usage.go
new file mode 100644 (file)
index 0000000..666edc0
--- /dev/null
@@ -0,0 +1,62 @@
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "os"
+)
+
+func usage() {
+       c := defaultConfig()
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := json.MarshalIndent(c, "    ", "  ")
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+arv-git-httpd provides authenticated access to Arvados-hosted git repositories.
+
+See http://doc.arvados.org/install/install-arv-git-httpd.html.
+
+Usage: arv-git-httpd [-config path/to/arv-git-httpd.yml]
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+    %s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Unused. Normally empty, or omitted entirely.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+Listen:
+
+    Local port to listen on. Can be "address:port" or ":port", where
+    "address" is a host IP address or name and "port" is a port number
+    or name.
+
+GitCommand:
+
+    Path to git or gitolite-shell executable. Each authenticated
+    request will execute this program with the single argument
+    "http-backend".
+
+RepoRoot:
+
+    Path to git repositories. Defaults to current working directory.
+
+`, exampleConfigFile)
+}
index 936a9088ed0c3d3affe6c3e0f9555d9e230d0c99..0ca765185119c152dd11870641c15f905042311e 100644 (file)
@@ -168,7 +168,7 @@ func run(dispatcher *dispatch.Dispatcher,
        }
 
        // drain any subsequent status changes
-       for _ = range status {
+       for range status {
        }
 
        log.Printf("Finalized container %v", uuid)
index 9628bf2f0aac3beb8ccc58768d1498fc3371a9a2..bcb406eb8e47667143ae5b4c246b00fbf3a53260 100644 (file)
@@ -88,7 +88,7 @@ func (s *TestSuite) TestIntegration(c *C) {
 
        // There should be no queued containers now
        params := arvadosclient.Dict{
-               "filters": [][]string{[]string{"state", "=", "Queued"}},
+               "filters": [][]string{{"state", "=", "Queued"}},
        }
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
@@ -116,13 +116,15 @@ func (s *MockArvadosServerSuite) Test_APIErrorUpdatingContainerState(c *C) {
        apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx1"] =
                arvadostest.StubResponse{500, string(`{}`)}
 
-       testWithServerStub(c, apiStubResponses, "echo", "Error updating container zzzzz-dz642-xxxxxxxxxxxxxx1 to state \"Locked\"")
+       testWithServerStub(c, apiStubResponses, "echo", "Error locking container zzzzz-dz642-xxxxxxxxxxxxxx1")
 }
 
 func (s *MockArvadosServerSuite) Test_ContainerStillInRunningAfterRun(c *C) {
        apiStubResponses := make(map[string]arvadostest.StubResponse)
        apiStubResponses["/arvados/v1/containers"] =
                arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2","State":"Queued","Priority":1}]}`)}
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2/lock"] =
+               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Locked", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
        apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2"] =
                arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Running", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
 
@@ -135,8 +137,8 @@ func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
        apiStubResponses["/arvados/v1/containers"] =
                arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3","State":"Queued","Priority":1}]}`)}
 
-       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3"] =
-               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Running", "priority":1}`)}
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3/lock"] =
+               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Locked", "priority":1}`)}
 
        testWithServerStub(c, apiStubResponses, "nosuchcommand", "Error starting nosuchcommand for zzzzz-dz642-xxxxxxxxxxxxxx3")
 }
@@ -150,7 +152,7 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        api := httptest.NewServer(&apiStub)
        defer api.Close()
 
-       arv := arvadosclient.ArvadosClient{
+       arv := &arvadosclient.ArvadosClient{
                Scheme:    "http",
                ApiServer: api.URL[7:],
                ApiToken:  "abc123",
index 4bfff6a5f0ccfe15a5a5e452f4536c01693df976..5a1ebc54e017069507e0778f5a20c990a6d001c8 100644 (file)
@@ -7,7 +7,10 @@ import (
        "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/config"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/coreos/go-systemd/daemon"
+       "io"
        "io/ioutil"
        "log"
        "math"
@@ -17,34 +20,75 @@ import (
        "time"
 )
 
+// Config used by crunch-dispatch-slurm
+type Config struct {
+       Client arvados.Client
+
+       SbatchArguments []string
+       PollPeriod      arvados.Duration
+
+       // crunch-run command to invoke. The container UUID will be
+       // appended. If nil, []string{"crunch-run"} will be used.
+       //
+       // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+       CrunchRunCommand []string
+}
+
 func main() {
        err := doMain()
        if err != nil {
-               log.Fatalf("%q", err)
+               log.Fatal(err)
        }
 }
 
 var (
-       crunchRunCommand *string
-       squeueUpdater    Squeue
+       theConfig     Config
+       squeueUpdater Squeue
 )
 
+const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
 func doMain() error {
        flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+       flags.Usage = func() { usage(flags) }
 
-       pollInterval := flags.Int(
-               "poll-interval",
-               10,
-               "Interval in seconds to poll for queued containers")
-
-       crunchRunCommand = flags.String(
-               "crunch-run-command",
-               "/usr/bin/crunch-run",
-               "Crunch command to run container")
+       configPath := flags.String(
+               "config",
+               defaultConfigPath,
+               "`path` to JSON or YAML configuration file")
 
        // Parse args; omit the first arg which is the command name
        flags.Parse(os.Args[1:])
 
+       err := readConfig(&theConfig, *configPath)
+       if err != nil {
+               return err
+       }
+
+       if theConfig.CrunchRunCommand == nil {
+               theConfig.CrunchRunCommand = []string{"crunch-run"}
+       }
+
+       if theConfig.PollPeriod == 0 {
+               theConfig.PollPeriod = arvados.Duration(10 * time.Second)
+       }
+
+       if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
+               // Copy real configs into env vars so [a]
+               // MakeArvadosClient() uses them, and [b] they get
+               // propagated to crunch-run via SLURM.
+               os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
+               os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
+               os.Setenv("ARVADOS_API_INSECURE", "")
+               if theConfig.Client.Insecure {
+                       os.Setenv("ARVADOS_API_INSECURE", "1")
+               }
+               os.Setenv("ARVADOS_KEEP_SERVICES", "")
+               os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+       } else {
+               log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+       }
+
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                log.Printf("Error making Arvados client: %v", err)
@@ -52,15 +96,19 @@ func doMain() error {
        }
        arv.Retries = 25
 
-       squeueUpdater.StartMonitor(time.Duration(*pollInterval) * time.Second)
+       squeueUpdater.StartMonitor(time.Duration(theConfig.PollPeriod))
        defer squeueUpdater.Done()
 
        dispatcher := dispatch.Dispatcher{
                Arv:            arv,
                RunContainer:   run,
-               PollInterval:   time.Duration(*pollInterval) * time.Second,
+               PollInterval:   time.Duration(theConfig.PollPeriod),
                DoneProcessing: make(chan struct{})}
 
+       if _, err := daemon.SdNotify("READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+
        err = dispatcher.RunDispatcher()
        if err != nil {
                return err
@@ -72,11 +120,15 @@ func doMain() error {
 // sbatchCmd
 func sbatchFunc(container arvados.Container) *exec.Cmd {
        memPerCPU := math.Ceil(float64(container.RuntimeConstraints.RAM) / (float64(container.RuntimeConstraints.VCPUs) * 1048576))
-       return exec.Command("sbatch", "--share", "--parsable",
-               fmt.Sprintf("--job-name=%s", container.UUID),
-               fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)),
-               fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
-               fmt.Sprintf("--priority=%d", container.Priority))
+
+       var sbatchArgs []string
+       sbatchArgs = append(sbatchArgs, "--share")
+       sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
+       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
+       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)))
+       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
+
+       return exec.Command("sbatch", sbatchArgs...)
 }
 
 // scancelCmd
@@ -90,9 +142,7 @@ var scancelCmd = scancelFunc
 
 // Submit job to slurm using sbatch.
 func submit(dispatcher *dispatch.Dispatcher,
-       container arvados.Container, crunchRunCommand string) (jobid string, submitErr error) {
-       submitErr = nil
-
+       container arvados.Container, crunchRunCommand []string) (submitErr error) {
        defer func() {
                // If we didn't get as far as submitting a slurm job,
                // unlock the container and return it to the queue.
@@ -100,10 +150,7 @@ func submit(dispatcher *dispatch.Dispatcher,
                        // OK, no cleanup needed
                        return
                }
-               err := dispatcher.Arv.Update("containers", container.UUID,
-                       arvadosclient.Dict{
-                               "container": arvadosclient.Dict{"state": "Queued"}},
-                       nil)
+               err := dispatcher.Unlock(container.UUID)
                if err != nil {
                        log.Printf("Error unlocking container %s: %v", container.UUID, err)
                }
@@ -155,7 +202,7 @@ func submit(dispatcher *dispatch.Dispatcher,
 
        // Send a tiny script on stdin to execute the crunch-run command
        // slurm actually enforces that this must be a #! script
-       fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
+       io.WriteString(stdinWriter, execScript(append(crunchRunCommand, container.UUID)))
        stdinWriter.Close()
 
        err = cmd.Wait()
@@ -167,13 +214,11 @@ func submit(dispatcher *dispatch.Dispatcher,
        close(stderrChan)
 
        if err != nil {
-               submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
+               submitErr = fmt.Errorf("Container submission failed: %v: %v (stderr: %q)", cmd.Args, err, stderrmsg)
                return
        }
 
-       // If everything worked out, got the jobid on stdout
-       jobid = strings.TrimSpace(string(stdoutMsg))
-
+       log.Printf("sbatch succeeded: %s", strings.TrimSpace(string(stdoutMsg)))
        return
 }
 
@@ -194,11 +239,11 @@ func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Co
 
                        log.Printf("About to submit queued container %v", container.UUID)
 
-                       if _, err := submit(dispatcher, container, *crunchRunCommand); err != nil {
+                       if err := submit(dispatcher, container, theConfig.CrunchRunCommand); err != nil {
                                log.Printf("Error submitting container %s to slurm: %v",
                                        container.UUID, err)
                                // maybe sbatch is broken, put it back to queued
-                               dispatcher.UpdateState(container.UUID, dispatch.Queued)
+                               dispatcher.Unlock(container.UUID)
                        }
                        submitted = true
                } else {
@@ -214,20 +259,20 @@ func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Co
                                log.Printf("Error getting final container state: %v", err)
                        }
 
-                       var st arvados.ContainerState
                        switch con.State {
                        case dispatch.Locked:
-                               st = dispatch.Queued
+                               log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+                                       container.UUID, con.State, dispatch.Queued)
+                               dispatcher.Unlock(container.UUID)
                        case dispatch.Running:
-                               st = dispatch.Cancelled
+                               st := dispatch.Cancelled
+                               log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+                                       container.UUID, con.State, st)
+                               dispatcher.UpdateState(container.UUID, st)
                        default:
                                // Container state is Queued, Complete or Cancelled so stop monitoring it.
                                return
                        }
-
-                       log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
-                               container.UUID, con.State, st)
-                       dispatcher.UpdateState(container.UUID, st)
                }
        }
 }
@@ -272,3 +317,12 @@ func run(dispatcher *dispatch.Dispatcher,
        }
        monitorDone = true
 }
+
+func readConfig(dst interface{}, path string) error {
+       err := config.LoadFile(dst, path)
+       if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
+               log.Printf("Config not specified. Continue with default configuration.")
+               err = nil
+       }
+       return err
+}
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.service b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.service
new file mode 100644 (file)
index 0000000..34ba80b
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Arvados Crunch Dispatcher for SLURM
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/crunch-dispatch-slurm
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
index b72ad9fa9dea802bd30a9aa70d84c817493cec0f..e7ccf25fc9f1d6f9012ce8c5ca063374431d7a91 100644 (file)
@@ -8,6 +8,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
        "io"
+       "io/ioutil"
        "log"
        "net/http"
        "net/http/httptest"
@@ -95,11 +96,10 @@ func (s *TestSuite) TestIntegrationCancel(c *C) {
 }
 
 func (s *TestSuite) TestIntegrationMissingFromSqueue(c *C) {
-       container := s.integrationTest(c, func() *exec.Cmd { return exec.Command("echo") }, []string{"sbatch", "--share", "--parsable",
+       container := s.integrationTest(c, func() *exec.Cmd { return exec.Command("echo") }, []string{"sbatch", "--share",
                fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
                fmt.Sprintf("--mem-per-cpu=%d", 2862),
-               fmt.Sprintf("--cpus-per-task=%d", 4),
-               fmt.Sprintf("--priority=%d", 1)},
+               fmt.Sprintf("--cpus-per-task=%d", 4)},
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
                        dispatcher.UpdateState(container.UUID, dispatch.Running)
                        time.Sleep(3 * time.Second)
@@ -136,15 +136,14 @@ func (s *TestSuite) integrationTest(c *C,
 
        // There should be no queued containers now
        params := arvadosclient.Dict{
-               "filters": [][]string{[]string{"state", "=", "Queued"}},
+               "filters": [][]string{{"state", "=", "Queued"}},
        }
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
        c.Check(err, IsNil)
        c.Check(len(containers.Items), Equals, 1)
 
-       echo := "echo"
-       crunchRunCommand = &echo
+       theConfig.CrunchRunCommand = []string{"echo"}
 
        doneProcessing := make(chan struct{})
        dispatcher := dispatch.Dispatcher{
@@ -180,7 +179,7 @@ func (s *TestSuite) integrationTest(c *C,
        return container
 }
 
-func (s *MockArvadosServerSuite) Test_APIErrorGettingContainers(c *C) {
+func (s *MockArvadosServerSuite) TestAPIErrorGettingContainers(c *C) {
        apiStubResponses := make(map[string]arvadostest.StubResponse)
        apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
        apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
@@ -194,7 +193,7 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        api := httptest.NewServer(&apiStub)
        defer api.Close()
 
-       arv := arvadosclient.ArvadosClient{
+       arv := &arvadosclient.ArvadosClient{
                Scheme:    "http",
                ApiServer: api.URL[7:],
                ApiToken:  "abc123",
@@ -206,7 +205,7 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        log.SetOutput(io.MultiWriter(buf, os.Stderr))
        defer log.SetOutput(os.Stderr)
 
-       crunchRunCommand = &crunchCmd
+       theConfig.CrunchRunCommand = []string{crunchCmd}
 
        doneProcessing := make(chan struct{})
        dispatcher := dispatch.Dispatcher{
@@ -237,3 +236,82 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
 
        c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
 }
+
+func (s *MockArvadosServerSuite) TestNoSuchConfigFile(c *C) {
+       var config Config
+       err := readConfig(&config, "/nosuchdir89j7879/8hjwr7ojgyy7")
+       c.Assert(err, NotNil)
+}
+
+func (s *MockArvadosServerSuite) TestBadSbatchArgsConfig(c *C) {
+       var config Config
+
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       _, err = tmpfile.Write([]byte(`{"SbatchArguments": "oops this is not a string array"}`))
+       c.Check(err, IsNil)
+
+       err = readConfig(&config, tmpfile.Name())
+       c.Assert(err, NotNil)
+}
+
+func (s *MockArvadosServerSuite) TestNoSuchArgInConfigIgnored(c *C) {
+       var config Config
+
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       _, err = tmpfile.Write([]byte(`{"NoSuchArg": "Nobody loves me, not one tiny hunk."}`))
+       c.Check(err, IsNil)
+
+       err = readConfig(&config, tmpfile.Name())
+       c.Assert(err, IsNil)
+       c.Check(0, Equals, len(config.SbatchArguments))
+}
+
+func (s *MockArvadosServerSuite) TestReadConfig(c *C) {
+       var config Config
+
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       args := []string{"--arg1=v1", "--arg2", "--arg3=v3"}
+       argsS := `{"SbatchArguments": ["--arg1=v1",  "--arg2", "--arg3=v3"]}`
+       _, err = tmpfile.Write([]byte(argsS))
+       c.Check(err, IsNil)
+
+       err = readConfig(&config, tmpfile.Name())
+       c.Assert(err, IsNil)
+       c.Check(3, Equals, len(config.SbatchArguments))
+       c.Check(args, DeepEquals, config.SbatchArguments)
+}
+
+func (s *MockArvadosServerSuite) TestSbatchFuncWithNoConfigArgs(c *C) {
+       testSbatchFuncWithArgs(c, nil)
+}
+
+func (s *MockArvadosServerSuite) TestSbatchFuncWithEmptyConfigArgs(c *C) {
+       testSbatchFuncWithArgs(c, []string{})
+}
+
+func (s *MockArvadosServerSuite) TestSbatchFuncWithConfigArgs(c *C) {
+       testSbatchFuncWithArgs(c, []string{"--arg1=v1", "--arg2"})
+}
+
+func testSbatchFuncWithArgs(c *C, args []string) {
+       theConfig.SbatchArguments = append(theConfig.SbatchArguments, args...)
+
+       container := arvados.Container{UUID: "123", RuntimeConstraints: arvados.RuntimeConstraints{RAM: 1000000, VCPUs: 2}}
+       sbatchCmd := sbatchFunc(container)
+
+       var expected []string
+       expected = append(expected, "sbatch", "--share")
+       expected = append(expected, theConfig.SbatchArguments...)
+       expected = append(expected, "--job-name=123", "--mem-per-cpu=1", "--cpus-per-task=2")
+
+       c.Check(sbatchCmd.Args, DeepEquals, expected)
+}
diff --git a/services/crunch-dispatch-slurm/script.go b/services/crunch-dispatch-slurm/script.go
new file mode 100644 (file)
index 0000000..93ae6b5
--- /dev/null
@@ -0,0 +1,15 @@
+package main
+
+import (
+       "strings"
+)
+
+func execScript(args []string) string {
+       s := "#!/bin/sh\nexec"
+       for _, w := range args {
+               s += ` '`
+               s += strings.Replace(w, `'`, `'\''`, -1)
+               s += `'`
+       }
+       return s + "\n"
+}
diff --git a/services/crunch-dispatch-slurm/script_test.go b/services/crunch-dispatch-slurm/script_test.go
new file mode 100644 (file)
index 0000000..3cb407d
--- /dev/null
@@ -0,0 +1,24 @@
+package main
+
+import (
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&ScriptSuite{})
+
+type ScriptSuite struct{}
+
+func (s *ScriptSuite) TestExecScript(c *C) {
+       for _, test := range []struct {
+               args   []string
+               script string
+       }{
+               {nil, `exec`},
+               {[]string{`foo`}, `exec 'foo'`},
+               {[]string{`foo`, `bar baz`}, `exec 'foo' 'bar baz'`},
+               {[]string{`foo"`, "'waz 'qux\n"}, `exec 'foo"' ''\''waz '\''qux` + "\n" + `'`},
+       } {
+               c.Logf("%+v -> %+v", test.args, test.script)
+               c.Check(execScript(test.args), Equals, "#!/bin/sh\n"+test.script+"\n")
+       }
+}
diff --git a/services/crunch-dispatch-slurm/usage.go b/services/crunch-dispatch-slurm/usage.go
new file mode 100644 (file)
index 0000000..e8a1f18
--- /dev/null
@@ -0,0 +1,33 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+)
+
+var exampleConfigFile = []byte(`
+    {
+       "Client": {
+           "APIHost": "zzzzz.arvadosapi.com",
+           "AuthToken": "xyzzy",
+           "Insecure": false
+       },
+       "CrunchRunCommand": ["crunch-run"],
+       "PollPeriod": "10s",
+       "SbatchArguments": ["--partition=foo", "--exclude=node13"]
+    }`)
+
+func usage(fs *flag.FlagSet) {
+       fmt.Fprintf(os.Stderr, `
+crunch-dispatch-slurm runs queued Arvados containers by submitting
+SLURM batch jobs.
+
+Options:
+`)
+       fs.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+%s
+`, exampleConfigFile)
+}
diff --git a/services/crunch-run/cgroup.go b/services/crunch-run/cgroup.go
new file mode 100644 (file)
index 0000000..78123e4
--- /dev/null
@@ -0,0 +1,29 @@
+package main
+
+import (
+       "bytes"
+       "io/ioutil"
+       "log"
+)
+
+// Return the current process's cgroup for the given subsystem.
+func findCgroup(subsystem string) string {
+       subsys := []byte(subsystem)
+       cgroups, err := ioutil.ReadFile("/proc/self/cgroup")
+       if err != nil {
+               log.Fatal(err)
+       }
+       for _, line := range bytes.Split(cgroups, []byte("\n")) {
+               toks := bytes.SplitN(line, []byte(":"), 4)
+               if len(toks) < 3 {
+                       continue
+               }
+               for _, s := range bytes.Split(toks[1], []byte(",")) {
+                       if bytes.Compare(s, subsys) == 0 {
+                               return string(toks[2])
+                       }
+               }
+       }
+       log.Fatalf("subsystem %q not found in /proc/self/cgroup", subsystem)
+       return ""
+}
diff --git a/services/crunch-run/cgroup_test.go b/services/crunch-run/cgroup_test.go
new file mode 100644 (file)
index 0000000..bb18836
--- /dev/null
@@ -0,0 +1,17 @@
+package main
+
+import (
+       . "gopkg.in/check.v1"
+)
+
+type CgroupSuite struct{}
+
+var _ = Suite(&CgroupSuite{})
+
+func (s *CgroupSuite) TestFindCgroup(c *C) {
+       for _, s := range []string{"devices", "cpu", "cpuset"} {
+               g := findCgroup(s)
+               c.Check(g, Not(Equals), "")
+               c.Logf("cgroup(%q) == %q", s, g)
+       }
+}
index 7da1beb20a4d5e4986eec2f8643d1ae99edeea2f..d804c01cad16700bc2b9ca52e3ff561499328095 100644 (file)
@@ -5,6 +5,7 @@ import (
        "errors"
        "flag"
        "fmt"
+       "git.curoverse.com/arvados.git/lib/crunchstat"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
@@ -17,6 +18,7 @@ import (
        "os/exec"
        "os/signal"
        "path"
+       "path/filepath"
        "strings"
        "sync"
        "syscall"
@@ -27,8 +29,9 @@ import (
 type IArvadosClient interface {
        Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
        Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
-       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error)
-       Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) (err error)
+       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
+       Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
+       Discovery(key string) (interface{}, error)
 }
 
 // ErrCancelled is the error returned when the container is cancelled.
@@ -91,6 +94,25 @@ type ContainerRunner struct {
        SigChan        chan os.Signal
        ArvMountExit   chan error
        finalState     string
+       trashLifetime  time.Duration
+
+       statLogger   io.WriteCloser
+       statReporter *crunchstat.Reporter
+       statInterval time.Duration
+       cgroupRoot   string
+       // What we expect the container's cgroup parent to be.
+       expectCgroupParent string
+       // What we tell docker to use as the container's cgroup
+       // parent. Note: Ideally we would use the same field for both
+       // expectCgroupParent and setCgroupParent, and just make it
+       // default to "docker". However, when using docker < 1.10 with
+       // systemd, specifying a non-empty cgroup parent (even the
+       // default value "docker") hits a docker bug
+       // (https://github.com/docker/docker/issues/17126). Using two
+       // separate fields makes it possible to use the "expect cgroup
+       // parent to be X" feature even on sites where the "specify
+       // cgroup parent" feature breaks.
+       setCgroupParent string
 }
 
 // SetupSignals sets up signal handling to gracefully terminate the underlying
@@ -102,7 +124,7 @@ func (runner *ContainerRunner) SetupSignals() {
        signal.Notify(runner.SigChan, syscall.SIGQUIT)
 
        go func(sig <-chan os.Signal) {
-               for _ = range sig {
+               for range sig {
                        if !runner.Cancelled {
                                runner.CancelLock.Lock()
                                runner.Cancelled = true
@@ -248,7 +270,8 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        }
                }
 
-               if mnt.Kind == "collection" {
+               switch {
+               case mnt.Kind == "collection":
                        var src string
                        if mnt.UUID != "" && mnt.PortableDataHash != "" {
                                return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
@@ -279,25 +302,47 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
                        }
                        collectionPaths = append(collectionPaths, src)
-               } else if mnt.Kind == "tmp" {
-                       if bind == runner.Container.OutputPath {
-                               runner.HostOutputDir, err = runner.MkTempDir("", "")
-                               if err != nil {
-                                       return fmt.Errorf("While creating mount temp dir: %v", err)
-                               }
-                               st, staterr := os.Stat(runner.HostOutputDir)
-                               if staterr != nil {
-                                       return fmt.Errorf("While Stat on temp dir: %v", staterr)
-                               }
-                               err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
-                               if staterr != nil {
-                                       return fmt.Errorf("While Chmod temp dir: %v", err)
-                               }
-                               runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
-                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
-                       } else {
-                               runner.Binds = append(runner.Binds, bind)
+
+               case mnt.Kind == "tmp" && bind == runner.Container.OutputPath:
+                       runner.HostOutputDir, err = runner.MkTempDir("", "")
+                       if err != nil {
+                               return fmt.Errorf("While creating mount temp dir: %v", err)
+                       }
+                       st, staterr := os.Stat(runner.HostOutputDir)
+                       if staterr != nil {
+                               return fmt.Errorf("While Stat on temp dir: %v", staterr)
+                       }
+                       err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
+                       if staterr != nil {
+                               return fmt.Errorf("While Chmod temp dir: %v", err)
                        }
+                       runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
+                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
+
+               case mnt.Kind == "tmp":
+                       runner.Binds = append(runner.Binds, bind)
+
+               case mnt.Kind == "json":
+                       jsondata, err := json.Marshal(mnt.Content)
+                       if err != nil {
+                               return fmt.Errorf("encoding json data: %v", err)
+                       }
+                       // Create a tempdir with a single file
+                       // (instead of just a tempfile): this way we
+                       // can ensure the file is world-readable
+                       // inside the container, without having to
+                       // make it world-readable on the docker host.
+                       tmpdir, err := runner.MkTempDir("", "")
+                       if err != nil {
+                               return fmt.Errorf("creating temp dir: %v", err)
+                       }
+                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
+                       tmpfn := filepath.Join(tmpdir, "mountdata.json")
+                       err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+                       if err != nil {
+                               return fmt.Errorf("writing temp file: %v", err)
+                       }
+                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
                }
        }
 
@@ -366,6 +411,14 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
                                runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
                        }
 
+                       if runner.statReporter != nil {
+                               runner.statReporter.Stop()
+                               closeerr = runner.statLogger.Close()
+                               if closeerr != nil {
+                                       runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
+                               }
+                       }
+
                        runner.loggingDone <- true
                        close(runner.loggingDone)
                        return
@@ -373,6 +426,18 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
        }
 }
 
+func (runner *ContainerRunner) StartCrunchstat() {
+       runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
+       runner.statReporter = &crunchstat.Reporter{
+               CID:          runner.ContainerID,
+               Logger:       log.New(runner.statLogger, "", 0),
+               CgroupParent: runner.expectCgroupParent,
+               CgroupRoot:   runner.cgroupRoot,
+               PollPeriod:   runner.statInterval,
+       }
+       runner.statReporter.Start()
+}
+
 // AttachLogs connects the docker container stdout and stderr logs to the
 // Arvados logger which logs to Keep and the API server logs table.
 func (runner *ContainerRunner) AttachStreams() (err error) {
@@ -453,8 +518,13 @@ func (runner *ContainerRunner) CreateContainer() error {
                return fmt.Errorf("While creating container: %v", err)
        }
 
-       runner.HostConfig = dockerclient.HostConfig{Binds: runner.Binds,
-               LogConfig: dockerclient.LogConfig{Type: "none"}}
+       runner.HostConfig = dockerclient.HostConfig{
+               Binds:        runner.Binds,
+               CgroupParent: runner.setCgroupParent,
+               LogConfig: dockerclient.LogConfig{
+                       Type: "none",
+               },
+       }
 
        return runner.AttachStreams()
 }
@@ -533,18 +603,25 @@ func (runner *ContainerRunner) CaptureOutput() error {
        err = runner.ArvClient.Create("collections",
                arvadosclient.Dict{
                        "collection": arvadosclient.Dict{
+                               "expires_at":    time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
+                               "name":          "output for " + runner.Container.UUID,
                                "manifest_text": manifestText}},
                &response)
        if err != nil {
                return fmt.Errorf("While creating output collection: %v", err)
        }
-
-       runner.OutputPDH = new(string)
-       *runner.OutputPDH = response.PortableDataHash
-
+       runner.OutputPDH = &response.PortableDataHash
        return nil
 }
 
+func (runner *ContainerRunner) loadDiscoveryVars() {
+       tl, err := runner.ArvClient.Discovery("defaultTrashLifetime")
+       if err != nil {
+               log.Fatalf("getting defaultTrashLifetime from discovery document: %s", err)
+       }
+       runner.trashLifetime = time.Duration(tl.(float64)) * time.Second
+}
+
 func (runner *ContainerRunner) CleanupDirs() {
        if runner.ArvMount != nil {
                umount := exec.Command("fusermount", "-z", "-u", runner.ArvMountPoint)
@@ -597,15 +674,14 @@ func (runner *ContainerRunner) CommitLogs() error {
        err = runner.ArvClient.Create("collections",
                arvadosclient.Dict{
                        "collection": arvadosclient.Dict{
+                               "expires_at":    time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
                                "name":          "logs for " + runner.Container.UUID,
                                "manifest_text": mt}},
                &response)
        if err != nil {
                return fmt.Errorf("While creating log collection: %v", err)
        }
-
        runner.LogsPDH = &response.PortableDataHash
-
        return nil
 }
 
@@ -752,6 +828,8 @@ func (runner *ContainerRunner) Run() (err error) {
                return
        }
 
+       runner.StartCrunchstat()
+
        if runner.IsCancelled() {
                return
        }
@@ -788,10 +866,15 @@ func NewContainerRunner(api IArvadosClient,
        cr.Container.UUID = containerUUID
        cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
        cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
+       cr.loadDiscoveryVars()
        return cr
 }
 
 func main() {
+       statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
+       cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
+       cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
+       cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
        flag.Parse()
 
        containerId := flag.Arg(0)
@@ -803,7 +886,7 @@ func main() {
        api.Retries = 8
 
        var kc *keepclient.KeepClient
-       kc, err = keepclient.MakeKeepClient(&api)
+       kc, err = keepclient.MakeKeepClient(api)
        if err != nil {
                log.Fatalf("%s: %v", containerId, err)
        }
@@ -816,6 +899,14 @@ func main() {
        }
 
        cr := NewContainerRunner(api, kc, docker, containerId)
+       cr.statInterval = *statInterval
+       cr.cgroupRoot = *cgroupRoot
+       cr.expectCgroupParent = *cgroupParent
+       if *cgroupParentSubsystem != "" {
+               p := findCgroup(*cgroupParentSubsystem)
+               cr.setCgroupParent = p
+               cr.expectCgroupParent = p
+       }
 
        err = cr.Run()
        if err != nil {
index 9880230ce8785a3d61d4c836b52f1091b65274dc..0ce658721eaf63675ff0e132eb6d59ffb3f00587 100644 (file)
@@ -14,9 +14,9 @@ import (
        . "gopkg.in/check.v1"
        "io"
        "io/ioutil"
-       "log"
        "os"
        "os/exec"
+       "path/filepath"
        "sort"
        "strings"
        "sync"
@@ -139,7 +139,7 @@ func (client *ArvTestClient) Create(resourceType string,
        client.Mutex.Lock()
        defer client.Mutex.Unlock()
 
-       client.Calls += 1
+       client.Calls++
        client.Content = append(client.Content, parameters)
 
        if resourceType == "logs" {
@@ -192,7 +192,7 @@ func (client *ArvTestClient) Get(resourceType string, uuid string, parameters ar
 func (client *ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
        client.Mutex.Lock()
        defer client.Mutex.Unlock()
-       client.Calls += 1
+       client.Calls++
        client.Content = append(client.Content, parameters)
        if resourceType == "containers" {
                if parameters["container"].(arvadosclient.Dict)["state"] == "Running" {
@@ -202,11 +202,17 @@ func (client *ArvTestClient) Update(resourceType string, uuid string, parameters
        return nil
 }
 
+var discoveryMap = map[string]interface{}{"defaultTrashLifetime": float64(1209600)}
+
+func (client *ArvTestClient) Discovery(key string) (interface{}, error) {
+       return discoveryMap[key], nil
+}
+
 // CalledWith returns the parameters from the first API call whose
 // parameters match jpath/string. E.g., CalledWith(c, "foo.bar",
 // "baz") returns parameters with parameters["foo"]["bar"]=="baz". If
 // no call matches, it returns nil.
-func (client *ArvTestClient) CalledWith(jpath, expect string) arvadosclient.Dict {
+func (client *ArvTestClient) CalledWith(jpath string, expect interface{}) arvadosclient.Dict {
 call:
        for _, content := range client.Content {
                var v interface{} = content
@@ -217,7 +223,7 @@ call:
                                v = dict[k]
                        }
                }
-               if v, ok := v.(string); ok && v == expect {
+               if v == expect {
                        return content
                }
        }
@@ -307,6 +313,10 @@ func (ArvErrorTestClient) Update(resourceType string, uuid string, parameters ar
        return nil
 }
 
+func (ArvErrorTestClient) Discovery(key string) (interface{}, error) {
+       return discoveryMap[key], nil
+}
+
 type KeepErrorTestClient struct{}
 
 func (KeepErrorTestClient) PutHB(hash string, buf []byte) (string, int, error) {
@@ -518,6 +528,7 @@ func FullRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvT
 
        api = &ArvTestClient{Container: rec}
        cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr.statInterval = 100 * time.Millisecond
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
 
@@ -553,14 +564,45 @@ func (s *TestSuite) TestFullRunHello(c *C) {
                t.finish <- dockerclient.WaitResult{}
        })
 
-       c.Check(api.Calls, Equals, 7)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
-
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello world\n"), Equals, true)
 
 }
 
+func (s *TestSuite) TestCrunchstat(c *C) {
+       api, _ := FullRunHelper(c, `{
+               "command": ["sleep", "1"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {}
+       }`, func(t *TestDockerClient) {
+               time.Sleep(time.Second)
+               t.logWriter.Close()
+               t.finish <- dockerclient.WaitResult{}
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+
+       // We didn't actually start a container, so crunchstat didn't
+       // find accounting files and therefore didn't log any stats.
+       // It should have logged a "can't find accounting files"
+       // message after one poll interval, though, so we can confirm
+       // it's alive:
+       c.Assert(api.Logs["crunchstat"], NotNil)
+       c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files have not appeared after 100ms.*`)
+
+       // The "files never appeared" log assures us that we called
+       // (*crunchstat.Reporter)Stop(), and that we set it up with
+       // the correct container ID "abcde":
+       c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files never appeared for abcde\n`)
+}
+
 func (s *TestSuite) TestFullRunStderr(c *C) {
        api, _ := FullRunHelper(c, `{
     "command": ["/bin/sh", "-c", "echo hello ; echo world 1>&2 ; exit 1"],
@@ -578,10 +620,10 @@ func (s *TestSuite) TestFullRunStderr(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 1}
        })
 
-       c.Assert(api.Calls, Equals, 8)
-       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["log"], NotNil)
-       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
-       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       final := api.CalledWith("container.state", "Complete")
+       c.Assert(final, NotNil)
+       c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
+       c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil)
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello\n"), Equals, true)
        c.Check(strings.HasSuffix(api.Logs["stderr"].String(), "world\n"), Equals, true)
@@ -603,12 +645,9 @@ func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Calls, Equals, 7)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
-
-       log.Print(api.Logs["stdout"].String())
-
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Log(api.Logs["stdout"])
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/\n"), Equals, true)
 }
 
@@ -628,10 +667,8 @@ func (s *TestSuite) TestFullRunSetCwd(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Calls, Equals, 7)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
-
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/bin\n"), Equals, true)
 }
 
@@ -682,9 +719,8 @@ func (s *TestSuite) TestCancel(c *C) {
                }
        }
 
-       c.Assert(api.Calls, Equals, 6)
-       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["log"], IsNil)
-       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
+       c.Check(api.CalledWith("container.log", nil), NotNil)
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "foo\n"), Equals, true)
 
 }
@@ -705,10 +741,8 @@ func (s *TestSuite) TestFullRunSetEnv(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Calls, Equals, 7)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
-
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "bilbo\n"), Equals, true)
 }
 
@@ -730,73 +764,117 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
 
+       realTemp, err := ioutil.TempDir("", "crunchrun_test-")
+       c.Assert(err, IsNil)
+       defer os.RemoveAll(realTemp)
+
        i := 0
-       cr.MkTempDir = func(string, string) (string, error) {
-               i += 1
-               d := fmt.Sprintf("/tmp/mktmpdir%d", i)
-               os.Mkdir(d, os.ModePerm)
-               return d, nil
+       cr.MkTempDir = func(_ string, prefix string) (string, error) {
+               i++
+               d := fmt.Sprintf("%s/%s%d", realTemp, prefix, i)
+               err := os.Mkdir(d, os.ModePerm)
+               if err != nil && strings.Contains(err.Error(), ": file exists") {
+                       // Test case must have pre-populated the tempdir
+                       err = nil
+               }
+               return d, err
+       }
+
+       checkEmpty := func() {
+               filepath.Walk(realTemp, func(path string, _ os.FileInfo, err error) error {
+                       c.Check(path, Equals, realTemp)
+                       c.Check(err, IsNil)
+                       return nil
+               })
        }
 
        {
+               i = 0
                cr.Container.Mounts = make(map[string]arvados.Mount)
                cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
                cr.OutputPath = "/tmp"
 
                err := cr.SetupMounts()
                c.Check(err, IsNil)
-               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-by-pdh", "by_id", "/tmp/mktmpdir1"})
-               c.Check(cr.Binds, DeepEquals, []string{"/tmp/mktmpdir2:/tmp"})
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2:/tmp"})
                cr.CleanupDirs()
+               checkEmpty()
        }
 
        {
                i = 0
-               cr.Container.Mounts = make(map[string]arvados.Mount)
-               cr.Container.Mounts["/keeptmp"] = arvados.Mount{Kind: "collection", Writable: true}
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/keeptmp": {Kind: "collection", Writable: true},
+               }
                cr.OutputPath = "/keeptmp"
 
-               os.MkdirAll("/tmp/mktmpdir1/tmp0", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
 
                err := cr.SetupMounts()
                c.Check(err, IsNil)
-               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "/tmp/mktmpdir1"})
-               c.Check(cr.Binds, DeepEquals, []string{"/tmp/mktmpdir1/tmp0:/keeptmp"})
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/tmp0:/keeptmp"})
                cr.CleanupDirs()
+               checkEmpty()
        }
 
        {
                i = 0
-               cr.Container.Mounts = make(map[string]arvados.Mount)
-               cr.Container.Mounts["/keepinp"] = arvados.Mount{Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"}
-               cr.Container.Mounts["/keepout"] = arvados.Mount{Kind: "collection", Writable: true}
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/keepinp": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"},
+                       "/keepout": {Kind: "collection", Writable: true},
+               }
                cr.OutputPath = "/keepout"
 
-               os.MkdirAll("/tmp/mktmpdir1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
-               os.MkdirAll("/tmp/mktmpdir1/tmp0", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
 
                err := cr.SetupMounts()
                c.Check(err, IsNil)
-               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "/tmp/mktmpdir1"})
-               var ss sort.StringSlice = cr.Binds
-               ss.Sort()
-               c.Check(cr.Binds, DeepEquals, []string{"/tmp/mktmpdir1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro",
-                       "/tmp/mktmpdir1/tmp0:/keepout"})
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other", "--read-write", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               sort.StringSlice(cr.Binds).Sort()
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro",
+                       realTemp + "/keep1/tmp0:/keepout"})
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       for _, test := range []struct {
+               in  interface{}
+               out string
+       }{
+               {in: "foo", out: `"foo"`},
+               {in: nil, out: `null`},
+               {in: map[string]int{"foo": 123}, out: `{"foo":123}`},
+       } {
+               i = 0
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/mnt/test.json": {Kind: "json", Content: test.in},
+               }
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               sort.StringSlice(cr.Binds).Sort()
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2/mountdata.json:/mnt/test.json:ro"})
+               content, err := ioutil.ReadFile(realTemp + "/2/mountdata.json")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte(test.out))
                cr.CleanupDirs()
+               checkEmpty()
        }
 }
 
 func (s *TestSuite) TestStdout(c *C) {
-       helperRecord := `{`
-       helperRecord += `"command": ["/bin/sh", "-c", "echo $FROBIZ"],`
-       helperRecord += `"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",`
-       helperRecord += `"cwd": "/bin",`
-       helperRecord += `"environment": {"FROBIZ": "bilbo"},`
-       helperRecord += `"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} },`
-       helperRecord += `"output_path": "/tmp",`
-       helperRecord += `"priority": 1,`
-       helperRecord += `"runtime_constraints": {}`
-       helperRecord += `}`
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {}
+       }`
 
        api, _ := FullRunHelper(c, helperRecord, func(t *TestDockerClient) {
                t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
@@ -804,10 +882,9 @@ func (s *TestSuite) TestStdout(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Assert(api.Calls, Equals, 6)
-       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
-       c.Check(api.CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), Not(IsNil))
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(api.CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), NotNil)
 }
 
 // Used by the TestStdoutWithWrongPath*()
index 20928dbef769b0d4dd419ec0f8693541c93ba369..4f8f95c9a6df5725dfc3bb3f4bb6a5eb9962250d 100644 (file)
@@ -18,7 +18,7 @@ type Timestamper func(t time.Time) string
 // Logging plumbing:
 //
 // ThrottledLogger.Logger -> ThrottledLogger.Write ->
-// ThrottledLogger.buf -> ThrottledLogger.flusher -> goWriter ->
+// ThrottledLogger.buf -> ThrottledLogger.flusher ->
 // ArvLogWriter.Write -> CollectionFileWriter.Write | Api.Create
 //
 // For stdout/stderr ReadWriteLines additionally runs as a goroutine to pull
@@ -38,73 +38,68 @@ type ThrottledLogger struct {
        Immediate *log.Logger
 }
 
-// RFC3339Fixed is a fixed-width version of RFC3339 with microsecond precision,
-// because the RFC3339Nano format isn't fixed width.
-const RFC3339Fixed = "2006-01-02T15:04:05.000000Z07:00"
+// RFC3339NanoFixed is a fixed-width version of time.RFC3339Nano.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
 
-// RFC3339Timestamp return a RFC3339 formatted timestamp using RFC3339Fixed
-func RFC3339Timestamp(now time.Time) string {
-       return now.Format(RFC3339Fixed)
+// RFC3339Timestamp formats t as RFC3339NanoFixed.
+func RFC3339Timestamp(t time.Time) string {
+       return t.Format(RFC3339NanoFixed)
 }
 
-// Write to the internal buffer.  Prepend a timestamp to each line of the input
-// data.
+// Write prepends a timestamp to each line of the input data and
+// appends to the internal buffer. Each line is also logged to
+// tl.Immediate, if tl.Immediate is not nil.
 func (tl *ThrottledLogger) Write(p []byte) (n int, err error) {
        tl.Mutex.Lock()
+       defer tl.Mutex.Unlock()
+
        if tl.buf == nil {
                tl.buf = &bytes.Buffer{}
        }
-       defer tl.Mutex.Unlock()
 
        now := tl.Timestamper(time.Now().UTC())
        sc := bufio.NewScanner(bytes.NewBuffer(p))
-       for sc.Scan() {
-               _, err = fmt.Fprintf(tl.buf, "%s %s\n", now, sc.Text())
+       for err == nil && sc.Scan() {
+               out := fmt.Sprintf("%s %s\n", now, sc.Bytes())
                if tl.Immediate != nil {
-                       tl.Immediate.Printf("%s %s\n", now, sc.Text())
+                       tl.Immediate.Print(out[:len(out)-1])
+               }
+               _, err = io.WriteString(tl.buf, out)
+       }
+       if err == nil {
+               err = sc.Err()
+               if err == nil {
+                       n = len(p)
                }
        }
-       return len(p), err
+       return
 }
 
 // Periodically check the current buffer; if not empty, send it on the
 // channel to the goWriter goroutine.
 func (tl *ThrottledLogger) flusher() {
-       bufchan := make(chan *bytes.Buffer)
-       bufterm := make(chan bool)
+       ticker := time.NewTicker(time.Second)
+       defer ticker.Stop()
+       for range ticker.C {
+               // We use a separate "stopping" var here to ensure we flush
+               // tl.buf after tl.stop becomes true.
+               stopping := tl.stop
+
+               var ready *bytes.Buffer
 
-       // Use a separate goroutine for the actual write so that the writes are
-       // actually initiated closer every 1s instead of every
-       // 1s + (time to it takes to write).
-       go goWriter(tl.writer, bufchan, bufterm)
-       for {
-               if !tl.stop {
-                       time.Sleep(1 * time.Second)
-               }
                tl.Mutex.Lock()
-               if tl.buf != nil && tl.buf.Len() > 0 {
-                       oldbuf := tl.buf
-                       tl.buf = nil
-                       tl.Mutex.Unlock()
-                       bufchan <- oldbuf
-               } else if tl.stop {
-                       tl.Mutex.Unlock()
-                       break
-               } else {
-                       tl.Mutex.Unlock()
+               ready, tl.buf = tl.buf, nil
+               tl.Mutex.Unlock()
+
+               if ready != nil && ready.Len() > 0 {
+                       tl.writer.Write(ready.Bytes())
                }
-       }
-       close(bufchan)
-       <-bufterm
-       tl.flusherDone <- true
-}
 
-// Receive buffers from a channel and send to the underlying Writer
-func goWriter(writer io.Writer, c <-chan *bytes.Buffer, t chan<- bool) {
-       for b := range c {
-               writer.Write(b.Bytes())
+               if stopping {
+                       break
+               }
        }
-       t <- true
+       close(tl.flusherDone)
 }
 
 // Close the flusher goroutine and wait for it to complete, then close the
@@ -158,17 +153,18 @@ func ReadWriteLines(in io.Reader, writer io.Writer, done chan<- bool) {
 // (b) batches log messages and only calls the underlying Writer at most once
 // per second.
 func NewThrottledLogger(writer io.WriteCloser) *ThrottledLogger {
-       alw := &ThrottledLogger{}
-       alw.flusherDone = make(chan bool)
-       alw.writer = writer
-       alw.Logger = log.New(alw, "", 0)
-       alw.Timestamper = RFC3339Timestamp
-       go alw.flusher()
-       return alw
+       tl := &ThrottledLogger{}
+       tl.flusherDone = make(chan bool)
+       tl.writer = writer
+       tl.Logger = log.New(tl, "", 0)
+       tl.Timestamper = RFC3339Timestamp
+       go tl.flusher()
+       return tl
 }
 
-// ArvLogWriter implements a writer that writes to each of a WriteCloser
-// (typically CollectionFileWriter) and creates an API server log entry.
+// ArvLogWriter is an io.WriteCloser that processes each write by
+// writing it through to another io.WriteCloser (typically a
+// CollectionFileWriter) and creating an Arvados log entry.
 type ArvLogWriter struct {
        ArvClient     IArvadosClient
        UUID          string
index bb3123a1025a810f0165219967161b6977d8f889..ceb8ca87b00ba25a0a9dc1ac2f2f6ca591cdd0b8 100644 (file)
@@ -16,7 +16,11 @@ type TestTimestamper struct {
 
 func (this *TestTimestamper) Timestamp(t time.Time) string {
        this.count += 1
-       return fmt.Sprintf("2015-12-29T15:51:45.%09dZ", this.count)
+       t, err := time.ParseInLocation(time.RFC3339Nano, fmt.Sprintf("2015-12-29T15:51:45.%09dZ", this.count), t.Location())
+       if err != nil {
+               panic(err)
+       }
+       return RFC3339Timestamp(t)
 }
 
 // Gocheck boilerplate
index 6bce3258d9857808f24e677ce5374f0f2de61a23..cae95fdd9d6cfd30110764e4ea7c87188c0ed6aa 100644 (file)
@@ -2,485 +2,122 @@ package main
 
 import (
        "bufio"
-       "bytes"
-       "errors"
        "flag"
-       "fmt"
        "io"
-       "io/ioutil"
        "log"
        "os"
        "os/exec"
        "os/signal"
-       "strconv"
-       "strings"
        "syscall"
        "time"
-)
 
-/*
-#include <unistd.h>
-#include <sys/types.h>
-#include <pwd.h>
-#include <stdlib.h>
-*/
-import "C"
+       "git.curoverse.com/arvados.git/lib/crunchstat"
+)
 
-// The above block of magic allows us to look up user_hz via _SC_CLK_TCK.
+const MaxLogLine = 1 << 14 // Child stderr lines >16KiB will be split
 
-type Cgroup struct {
-       root   string
-       parent string
-       cid    string
-}
+func main() {
+       reporter := crunchstat.Reporter{
+               Logger: log.New(os.Stderr, "crunchstat: ", 0),
+       }
 
-var childLog = log.New(os.Stderr, "", 0)
-var statLog = log.New(os.Stderr, "crunchstat: ", 0)
+       flag.StringVar(&reporter.CgroupRoot, "cgroup-root", "", "Root of cgroup tree")
+       flag.StringVar(&reporter.CgroupParent, "cgroup-parent", "", "Name of container parent under cgroup")
+       flag.StringVar(&reporter.CIDFile, "cgroup-cid", "", "Path to container id file")
+       pollMsec := flag.Int64("poll", 1000, "Reporting interval, in milliseconds")
 
-const (
-       MaxLogLine = 1 << 14 // Child stderr lines >16KiB will be split
-)
+       flag.Parse()
 
-func CopyPipeToChildLog(in io.ReadCloser, done chan<- bool) {
-       reader := bufio.NewReaderSize(in, MaxLogLine)
-       var prefix string
-       for {
-               line, isPrefix, err := reader.ReadLine()
-               if err == io.EOF {
-                       break
-               } else if err != nil {
-                       statLog.Fatal("error reading child stderr:", err)
-               }
-               var suffix string
-               if isPrefix {
-                       suffix = "[...]"
-               }
-               childLog.Print(prefix, string(line), suffix)
-               // Set up prefix for following line
-               if isPrefix {
-                       prefix = "[...]"
-               } else {
-                       prefix = ""
-               }
+       if reporter.CgroupRoot == "" {
+               reporter.Logger.Fatal("error: must provide -cgroup-root")
        }
-       done <- true
-       in.Close()
-}
+       reporter.PollPeriod = time.Duration(*pollMsec) * time.Millisecond
 
-func ReadAllOrWarn(in *os.File) ([]byte, error) {
-       content, err := ioutil.ReadAll(in)
-       if err != nil {
-               statLog.Printf("error reading %s: %s\n", in.Name(), err)
-       }
-       return content, err
-}
+       reporter.Start()
+       err := runCommand(flag.Args(), reporter.Logger)
+       reporter.Stop()
 
-var reportedStatFile = map[string]string{}
+       if err, ok := err.(*exec.ExitError); ok {
+               // The program has exited with an exit code != 0
 
-// Open the cgroup stats file in /sys/fs corresponding to the target
-// cgroup, and return an *os.File. If no stats file is available,
-// return nil.
-//
-// TODO: Instead of trying all options, choose a process in the
-// container, and read /proc/PID/cgroup to determine the appropriate
-// cgroup root for the given statgroup. (This will avoid falling back
-// to host-level stats during container setup and teardown.)
-func OpenStatFile(cgroup Cgroup, statgroup string, stat string) (*os.File, error) {
-       var paths []string
-       if cgroup.cid != "" {
-               // Collect container's stats
-               paths = []string{
-                       fmt.Sprintf("%s/%s/%s/%s/%s", cgroup.root, statgroup, cgroup.parent, cgroup.cid, stat),
-                       fmt.Sprintf("%s/%s/%s/%s", cgroup.root, cgroup.parent, cgroup.cid, stat),
-               }
-       } else {
-               // Collect this host's stats
-               paths = []string{
-                       fmt.Sprintf("%s/%s/%s", cgroup.root, statgroup, stat),
-                       fmt.Sprintf("%s/%s", cgroup.root, stat),
-               }
-       }
-       var path string
-       var file *os.File
-       var err error
-       for _, path = range paths {
-               file, err = os.Open(path)
-               if err == nil {
-                       break
+               // This works on both Unix and Windows. Although
+               // package syscall is generally platform dependent,
+               // WaitStatus is defined for both Unix and Windows and
+               // in both cases has an ExitStatus() method with the
+               // same signature.
+               if status, ok := err.Sys().(syscall.WaitStatus); ok {
+                       os.Exit(status.ExitStatus())
                } else {
-                       path = ""
+                       reporter.Logger.Fatalln("ExitError without WaitStatus:", err)
                }
+       } else if err != nil {
+               reporter.Logger.Fatalln("error in cmd.Wait:", err)
        }
-       if pathWas, ok := reportedStatFile[stat]; !ok || pathWas != path {
-               // Log whenever we start using a new/different cgroup
-               // stat file for a given statistic. This typically
-               // happens 1 to 3 times per statistic, depending on
-               // whether we happen to collect stats [a] before any
-               // processes have been created in the container and
-               // [b] after all contained processes have exited.
-               if path == "" {
-                       statLog.Printf("notice: stats not available: stat %s, statgroup %s, cid %s, parent %s, root %s\n", stat, statgroup, cgroup.cid, cgroup.parent, cgroup.root)
-               } else if ok {
-                       statLog.Printf("notice: stats moved from %s to %s\n", reportedStatFile[stat], path)
-               } else {
-                       statLog.Printf("notice: reading stats from %s\n", path)
-               }
-               reportedStatFile[stat] = path
-       }
-       return file, err
 }
 
-func GetContainerNetStats(cgroup Cgroup) (io.Reader, error) {
-       procsFile, err := OpenStatFile(cgroup, "cpuacct", "cgroup.procs")
-       if err != nil {
-               return nil, err
-       }
-       defer procsFile.Close()
-       reader := bufio.NewScanner(procsFile)
-       for reader.Scan() {
-               taskPid := reader.Text()
-               statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
-               stats, err := ioutil.ReadFile(statsFilename)
-               if err != nil {
-                       statLog.Printf("error reading %s: %s\n", statsFilename, err)
-                       continue
-               }
-               return strings.NewReader(string(stats)), nil
-       }
-       return nil, errors.New("Could not read stats for any proc in container")
-}
+func runCommand(argv []string, logger *log.Logger) error {
+       cmd := exec.Command(argv[0], argv[1:]...)
 
-type IoSample struct {
-       sampleTime time.Time
-       txBytes    int64
-       rxBytes    int64
-}
+       logger.Println("Running", argv)
 
-func DoBlkIoStats(cgroup Cgroup, lastSample map[string]IoSample) {
-       c, err := OpenStatFile(cgroup, "blkio", "blkio.io_service_bytes")
-       if err != nil {
-               return
-       }
-       defer c.Close()
-       b := bufio.NewScanner(c)
-       var sampleTime = time.Now()
-       newSamples := make(map[string]IoSample)
-       for b.Scan() {
-               var device, op string
-               var val int64
-               if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
-                       continue
-               }
-               var thisSample IoSample
-               var ok bool
-               if thisSample, ok = newSamples[device]; !ok {
-                       thisSample = IoSample{sampleTime, -1, -1}
-               }
-               switch op {
-               case "Read":
-                       thisSample.rxBytes = val
-               case "Write":
-                       thisSample.txBytes = val
-               }
-               newSamples[device] = thisSample
-       }
-       for dev, sample := range newSamples {
-               if sample.txBytes < 0 || sample.rxBytes < 0 {
-                       continue
-               }
-               delta := ""
-               if prev, ok := lastSample[dev]; ok {
-                       delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
-                               sample.sampleTime.Sub(prev.sampleTime).Seconds(),
-                               sample.txBytes-prev.txBytes,
-                               sample.rxBytes-prev.rxBytes)
-               }
-               statLog.Printf("blkio:%s %d write %d read%s\n", dev, sample.txBytes, sample.rxBytes, delta)
-               lastSample[dev] = sample
-       }
-}
-
-type MemSample struct {
-       sampleTime time.Time
-       memStat    map[string]int64
-}
+       // Child process will use our stdin and stdout pipes
+       // (we close our copies below)
+       cmd.Stdin = os.Stdin
+       cmd.Stdout = os.Stdout
 
-func DoMemoryStats(cgroup Cgroup) {
-       c, err := OpenStatFile(cgroup, "memory", "memory.stat")
-       if err != nil {
-               return
-       }
-       defer c.Close()
-       b := bufio.NewScanner(c)
-       thisSample := MemSample{time.Now(), make(map[string]int64)}
-       wantStats := [...]string{"cache", "swap", "pgmajfault", "rss"}
-       for b.Scan() {
-               var stat string
-               var val int64
-               if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err != nil {
-                       continue
-               }
-               thisSample.memStat[stat] = val
-       }
-       var outstat bytes.Buffer
-       for _, key := range wantStats {
-               if val, ok := thisSample.memStat[key]; ok {
-                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+       // Forward SIGINT and SIGTERM to child process
+       sigChan := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               catch := <-sig
+               if cmd.Process != nil {
+                       cmd.Process.Signal(catch)
                }
-       }
-       statLog.Printf("mem%s\n", outstat.String())
-}
+               logger.Println("notice: caught signal:", catch)
+       }(sigChan)
+       signal.Notify(sigChan, syscall.SIGTERM)
+       signal.Notify(sigChan, syscall.SIGINT)
 
-func DoNetworkStats(cgroup Cgroup, lastSample map[string]IoSample) {
-       sampleTime := time.Now()
-       stats, err := GetContainerNetStats(cgroup)
+       // Funnel stderr through our channel
+       stderr_pipe, err := cmd.StderrPipe()
        if err != nil {
-               return
+               logger.Fatalln("error in StderrPipe:", err)
        }
 
-       scanner := bufio.NewScanner(stats)
-       for scanner.Scan() {
-               var ifName string
-               var rx, tx int64
-               words := strings.Fields(scanner.Text())
-               if len(words) != 17 {
-                       // Skip lines with wrong format
-                       continue
-               }
-               ifName = strings.TrimRight(words[0], ":")
-               if ifName == "lo" || ifName == "" {
-                       // Skip loopback interface and lines with wrong format
-                       continue
-               }
-               if tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {
-                       continue
-               }
-               if rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {
-                       continue
-               }
-               nextSample := IoSample{}
-               nextSample.sampleTime = sampleTime
-               nextSample.txBytes = tx
-               nextSample.rxBytes = rx
-               var delta string
-               if prev, ok := lastSample[ifName]; ok {
-                       interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
-                       delta = fmt.Sprintf(" -- interval %.4f seconds %d tx %d rx",
-                               interval,
-                               tx-prev.txBytes,
-                               rx-prev.rxBytes)
-               }
-               statLog.Printf("net:%s %d tx %d rx%s\n", ifName, tx, rx, delta)
-               lastSample[ifName] = nextSample
+       // Run subprocess
+       if err := cmd.Start(); err != nil {
+               logger.Fatalln("error in cmd.Start:", err)
        }
-}
 
-type CpuSample struct {
-       hasData    bool // to distinguish the zero value from real data
-       sampleTime time.Time
-       user       float64
-       sys        float64
-       cpus       int64
-}
+       // Close stdin/stdout in this (parent) process
+       os.Stdin.Close()
+       os.Stdout.Close()
 
-// Return the number of CPUs available in the container. Return 0 if
-// we can't figure out the real number of CPUs.
-func GetCpuCount(cgroup Cgroup) int64 {
-       cpusetFile, err := OpenStatFile(cgroup, "cpuset", "cpuset.cpus")
-       if err != nil {
-               return 0
-       }
-       defer cpusetFile.Close()
-       b, err := ReadAllOrWarn(cpusetFile)
-       sp := strings.Split(string(b), ",")
-       cpus := int64(0)
-       for _, v := range sp {
-               var min, max int64
-               n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
-               if n == 2 {
-                       cpus += (max - min) + 1
-               } else {
-                       cpus += 1
-               }
-       }
-       return cpus
-}
+       copyPipeToChildLog(stderr_pipe, log.New(os.Stderr, "", 0))
 
-func DoCpuStats(cgroup Cgroup, lastSample *CpuSample) {
-       statFile, err := OpenStatFile(cgroup, "cpuacct", "cpuacct.stat")
-       if err != nil {
-               return
-       }
-       defer statFile.Close()
-       b, err := ReadAllOrWarn(statFile)
-       if err != nil {
-               return
-       }
-
-       nextSample := CpuSample{true, time.Now(), 0, 0, GetCpuCount(cgroup)}
-       var userTicks, sysTicks int64
-       fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
-       user_hz := float64(C.sysconf(C._SC_CLK_TCK))
-       nextSample.user = float64(userTicks) / user_hz
-       nextSample.sys = float64(sysTicks) / user_hz
-
-       delta := ""
-       if lastSample.hasData {
-               delta = fmt.Sprintf(" -- interval %.4f seconds %.4f user %.4f sys",
-                       nextSample.sampleTime.Sub(lastSample.sampleTime).Seconds(),
-                       nextSample.user-lastSample.user,
-                       nextSample.sys-lastSample.sys)
-       }
-       statLog.Printf("cpu %.4f user %.4f sys %d cpus%s\n",
-               nextSample.user, nextSample.sys, nextSample.cpus, delta)
-       *lastSample = nextSample
+       return cmd.Wait()
 }
 
-func PollCgroupStats(cgroup Cgroup, poll int64, stop_poll_chan <-chan bool) {
-       var lastNetSample = map[string]IoSample{}
-       var lastDiskSample = map[string]IoSample{}
-       var lastCpuSample = CpuSample{}
-
-       poll_chan := make(chan bool, 1)
-       go func() {
-               // Send periodic poll events.
-               poll_chan <- true
-               for {
-                       time.Sleep(time.Duration(poll) * time.Millisecond)
-                       poll_chan <- true
-               }
-       }()
+func copyPipeToChildLog(in io.ReadCloser, logger *log.Logger) {
+       reader := bufio.NewReaderSize(in, MaxLogLine)
+       var prefix string
        for {
-               select {
-               case <-stop_poll_chan:
-                       return
-               case <-poll_chan:
-                       // Emit stats, then select again.
-               }
-               DoMemoryStats(cgroup)
-               DoCpuStats(cgroup, &lastCpuSample)
-               DoBlkIoStats(cgroup, lastDiskSample)
-               DoNetworkStats(cgroup, lastNetSample)
-       }
-}
-
-func run(logger *log.Logger) error {
-
-       var (
-               cgroup_root    string
-               cgroup_parent  string
-               cgroup_cidfile string
-               wait           int64
-               poll           int64
-       )
-
-       flag.StringVar(&cgroup_root, "cgroup-root", "", "Root of cgroup tree")
-       flag.StringVar(&cgroup_parent, "cgroup-parent", "", "Name of container parent under cgroup")
-       flag.StringVar(&cgroup_cidfile, "cgroup-cid", "", "Path to container id file")
-       flag.Int64Var(&wait, "wait", 5, "Maximum time (in seconds) to wait for cid file to show up")
-       flag.Int64Var(&poll, "poll", 1000, "Polling frequency, in milliseconds")
-
-       flag.Parse()
-
-       if cgroup_root == "" {
-               statLog.Fatal("error: must provide -cgroup-root")
-       }
-
-       finish_chan := make(chan bool)
-       defer close(finish_chan)
-
-       var cmd *exec.Cmd
-
-       if len(flag.Args()) > 0 {
-               // Set up subprocess
-               cmd = exec.Command(flag.Args()[0], flag.Args()[1:]...)
-
-               childLog.Println("Running", flag.Args())
-
-               // Child process will use our stdin and stdout pipes
-               // (we close our copies below)
-               cmd.Stdin = os.Stdin
-               cmd.Stdout = os.Stdout
-
-               // Forward SIGINT and SIGTERM to inner process
-               sigChan := make(chan os.Signal, 1)
-               go func(sig <-chan os.Signal) {
-                       catch := <-sig
-                       if cmd.Process != nil {
-                               cmd.Process.Signal(catch)
-                       }
-                       statLog.Println("notice: caught signal:", catch)
-               }(sigChan)
-               signal.Notify(sigChan, syscall.SIGTERM)
-               signal.Notify(sigChan, syscall.SIGINT)
-
-               // Funnel stderr through our channel
-               stderr_pipe, err := cmd.StderrPipe()
-               if err != nil {
-                       statLog.Fatalln("error in StderrPipe:", err)
-               }
-               go CopyPipeToChildLog(stderr_pipe, finish_chan)
-
-               // Run subprocess
-               if err := cmd.Start(); err != nil {
-                       statLog.Fatalln("error in cmd.Start:", err)
-               }
-
-               // Close stdin/stdout in this (parent) process
-               os.Stdin.Close()
-               os.Stdout.Close()
-       }
-
-       // Read the cid file
-       var container_id string
-       if cgroup_cidfile != "" {
-               // wait up to 'wait' seconds for the cid file to appear
-               ok := false
-               var i time.Duration
-               for i = 0; i < time.Duration(wait)*time.Second; i += (100 * time.Millisecond) {
-                       cid, err := ioutil.ReadFile(cgroup_cidfile)
-                       if err == nil && len(cid) > 0 {
-                               ok = true
-                               container_id = string(cid)
-                               break
-                       }
-                       time.Sleep(100 * time.Millisecond)
+               line, isPrefix, err := reader.ReadLine()
+               if err == io.EOF {
+                       break
+               } else if err != nil {
+                       logger.Fatal("error reading child stderr:", err)
                }
-               if !ok {
-                       statLog.Println("error reading cid file:", cgroup_cidfile)
+               var suffix string
+               if isPrefix {
+                       suffix = "[...]"
                }
-       }
-
-       stop_poll_chan := make(chan bool, 1)
-       cgroup := Cgroup{cgroup_root, cgroup_parent, container_id}
-       go PollCgroupStats(cgroup, poll, stop_poll_chan)
-
-       // When the child exits, tell the polling goroutine to stop.
-       defer func() { stop_poll_chan <- true }()
-
-       // Wait for CopyPipeToChan to consume child's stderr pipe
-       <-finish_chan
-
-       return cmd.Wait()
-}
-
-func main() {
-       logger := log.New(os.Stderr, "crunchstat: ", 0)
-       if err := run(logger); err != nil {
-               if exiterr, ok := err.(*exec.ExitError); ok {
-                       // The program has exited with an exit code != 0
-
-                       // This works on both Unix and
-                       // Windows. Although package syscall is
-                       // generally platform dependent, WaitStatus is
-                       // defined for both Unix and Windows and in
-                       // both cases has an ExitStatus() method with
-                       // the same signature.
-                       if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
-                               os.Exit(status.ExitStatus())
-                       }
+               logger.Print(prefix, string(line), suffix)
+               // Set up prefix for following line
+               if isPrefix {
+                       prefix = "[...]"
                } else {
-                       statLog.Fatalln("error in cmd.Wait:", err)
+                       prefix = ""
                }
        }
+       in.Close()
 }
index 69f31afbc9589ce6cd6c9de2a731d5093e2c80cd..fe3b56d25876fd832d3596abe3db8e40852ebbf7 100644 (file)
@@ -6,56 +6,21 @@ import (
        "io"
        "log"
        "math/rand"
-       "os"
-       "regexp"
        "testing"
        "time"
 )
 
-func TestReadAllOrWarnFail(t *testing.T) {
-       rcv := captureLogs()
-       defer uncaptureLogs()
-       go func() {
-               // The special file /proc/self/mem can be opened for
-               // reading, but reading from byte 0 returns an error.
-               f, err := os.Open("/proc/self/mem")
-               if err != nil {
-                       t.Fatalf("Opening /proc/self/mem: %s", err)
-               }
-               if x, err := ReadAllOrWarn(f); err == nil {
-                       t.Fatalf("Expected error, got %v", x)
-               }
-       }()
-       if msg, err := rcv.ReadBytes('\n'); err != nil {
-               t.Fatal(err)
-       } else if matched, err := regexp.MatchString("^crunchstat: .*error.*", string(msg)); err != nil || !matched {
-               t.Fatalf("Expected error message about unreadable file, got \"%s\"", msg)
-       }
-}
-
-func TestReadAllOrWarnSuccess(t *testing.T) {
-       f, err := os.Open("./crunchstat_test.go")
-       if err != nil {
-               t.Fatalf("Opening ./crunchstat_test.go: %s", err)
-       }
-       data, err := ReadAllOrWarn(f)
-       if err != nil {
-               t.Fatalf("got error %s", err)
-       }
-       if matched, err := regexp.MatchString("^package main\n", string(data)); err != nil || !matched {
-               t.Fatalf("data failed regexp: %s", err)
-       }
-}
-
 // Test that CopyPipeToChildLog works even on lines longer than
 // bufio.MaxScanTokenSize.
 func TestCopyPipeToChildLogLongLines(t *testing.T) {
-       rcv := captureLogs()
-       defer uncaptureLogs()
+       logger, logBuf := bufLogger()
 
-       control := make(chan bool)
        pipeIn, pipeOut := io.Pipe()
-       go CopyPipeToChildLog(pipeIn, control)
+       copied := make(chan bool)
+       go func() {
+               copyPipeToChildLog(pipeIn, logger)
+               close(copied)
+       }()
 
        sentBytes := make([]byte, bufio.MaxScanTokenSize+MaxLogLine+(1<<22))
        go func() {
@@ -72,14 +37,14 @@ func TestCopyPipeToChildLogLongLines(t *testing.T) {
                pipeOut.Close()
        }()
 
-       if before, err := rcv.ReadBytes('\n'); err != nil || string(before) != "before\n" {
+       if before, err := logBuf.ReadBytes('\n'); err != nil || string(before) != "before\n" {
                t.Fatalf("\"before\n\" not received (got \"%s\", %s)", before, err)
        }
 
        var receivedBytes []byte
        done := false
        for !done {
-               line, err := rcv.ReadBytes('\n')
+               line, err := logBuf.ReadBytes('\n')
                if err != nil {
                        t.Fatal(err)
                }
@@ -89,7 +54,7 @@ func TestCopyPipeToChildLogLongLines(t *testing.T) {
                        }
                        line = line[5:]
                }
-               if len(line) >= 6 && string(line[len(line)-6:len(line)]) == "[...]\n" {
+               if len(line) >= 6 && string(line[len(line)-6:]) == "[...]\n" {
                        line = line[:len(line)-6]
                } else {
                        done = true
@@ -100,27 +65,20 @@ func TestCopyPipeToChildLogLongLines(t *testing.T) {
                t.Fatalf("sent %d bytes, got %d different bytes", len(sentBytes), len(receivedBytes))
        }
 
-       if after, err := rcv.ReadBytes('\n'); err != nil || string(after) != "after\n" {
+       if after, err := logBuf.ReadBytes('\n'); err != nil || string(after) != "after\n" {
                t.Fatalf("\"after\n\" not received (got \"%s\", %s)", after, err)
        }
 
        select {
        case <-time.After(time.Second):
                t.Fatal("Timeout")
-       case <-control:
+       case <-copied:
                // Done.
        }
 }
 
-func captureLogs() *bufio.Reader {
-       // Send childLog to our bufio reader instead of stderr
-       stderrIn, stderrOut := io.Pipe()
-       childLog = log.New(stderrOut, "", 0)
-       statLog = log.New(stderrOut, "crunchstat: ", 0)
-       return bufio.NewReader(stderrIn)
-}
-
-func uncaptureLogs() {
-       childLog = log.New(os.Stderr, "", 0)
-       statLog = log.New(os.Stderr, "crunchstat: ", 0)
+func bufLogger() (*log.Logger, *bufio.Reader) {
+       r, w := io.Pipe()
+       logger := log.New(w, "", 0)
+       return logger, bufio.NewReader(r)
 }
index 55b3f61c4e5ee32bcff3fab7082fda7334f08be4..05e7a5f2313d6b5a9138a90d7b1f9f7bfd2ad88c 100644 (file)
@@ -42,18 +42,18 @@ type ReadCollections struct {
 
 // GetCollectionsParams params
 type GetCollectionsParams struct {
-       Client    arvadosclient.ArvadosClient
+       Client    *arvadosclient.ArvadosClient
        Logger    *logger.Logger
        BatchSize int
 }
 
 // SdkCollectionInfo holds collection info from api
 type SdkCollectionInfo struct {
-       UUID                 string    `json:"uuid"`
-       OwnerUUID            string    `json:"owner_uuid"`
-       ReplicationDesired   int       `json:"replication_desired"`
-       ModifiedAt           time.Time `json:"modified_at"`
-       ManifestText         string    `json:"manifest_text"`
+       UUID               string    `json:"uuid"`
+       OwnerUUID          string    `json:"owner_uuid"`
+       ReplicationDesired int       `json:"replication_desired"`
+       ModifiedAt         time.Time `json:"modified_at"`
+       ManifestText       string    `json:"manifest_text"`
 }
 
 // SdkCollectionList lists collections from api
@@ -131,7 +131,7 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections, err e
        sdkParams := arvadosclient.Dict{
                "select":  fieldsWanted,
                "order":   []string{"modified_at ASC", "uuid ASC"},
-               "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}},
+               "filters": [][]string{{"modified_at", ">=", "1900-01-01T00:00:00Z"}},
                "offset":  0}
 
        if params.BatchSize > 0 {
index 47ab5fa4a8a6793f9712ebb4ba1ff5a9aea503aa..1bf6a89f5ab91694b2299b02c8a1e52beb2bebce 100644 (file)
@@ -64,7 +64,7 @@ func CompareSummarizedReadCollections(c *C,
 }
 
 func (s *MySuite) TestSummarizeSimple(checker *C) {
-       rc := MakeTestReadCollections([]TestCollectionSpec{TestCollectionSpec{
+       rc := MakeTestReadCollections([]TestCollectionSpec{{
                ReplicationLevel: 5,
                Blocks:           []int{1, 2},
        }})
@@ -79,7 +79,7 @@ func (s *MySuite) TestSummarizeSimple(checker *C) {
        expected := ExpectedSummary{
                OwnerToCollectionSize:     map[string]int{c.OwnerUUID: c.TotalSize},
                BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{blockDigest1: 5, blockDigest2: 5},
-               BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: []string{c.UUID}, blockDigest2: []string{c.UUID}},
+               BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: {c.UUID}, blockDigest2: {c.UUID}},
        }
 
        CompareSummarizedReadCollections(checker, rc, expected)
@@ -87,11 +87,11 @@ func (s *MySuite) TestSummarizeSimple(checker *C) {
 
 func (s *MySuite) TestSummarizeOverlapping(checker *C) {
        rc := MakeTestReadCollections([]TestCollectionSpec{
-               TestCollectionSpec{
+               {
                        ReplicationLevel: 5,
                        Blocks:           []int{1, 2},
                },
-               TestCollectionSpec{
+               {
                        ReplicationLevel: 8,
                        Blocks:           []int{2, 3},
                },
@@ -117,9 +117,9 @@ func (s *MySuite) TestSummarizeOverlapping(checker *C) {
                        blockDigest3: 8,
                },
                BlockToCollectionUuids: map[blockdigest.DigestWithSize][]string{
-                       blockDigest1: []string{c0.UUID},
-                       blockDigest2: []string{c0.UUID, c1.UUID},
-                       blockDigest3: []string{c1.UUID},
+                       blockDigest1: {c0.UUID},
+                       blockDigest2: {c0.UUID, c1.UUID},
+                       blockDigest3: {c1.UUID},
                },
        }
 
@@ -184,7 +184,7 @@ func testGetCollectionsAndSummarize(c *C, testData APITestData) {
        api := httptest.NewServer(&apiStub)
        defer api.Close()
 
-       arv := arvadosclient.ArvadosClient{
+       arv := &arvadosclient.ArvadosClient{
                Scheme:    "http",
                ApiServer: api.URL[7:],
                ApiToken:  "abc123",
index 8e128358422a560a42cbdaa03e20feb8067fa6ba..5250d175ffa9995779c47d2fbaefedc992f09096 100644 (file)
@@ -81,7 +81,7 @@ func main() {
 
 var arvLogger *logger.Logger
 
-func singlerun(arv arvadosclient.ArvadosClient) error {
+func singlerun(arv *arvadosclient.ArvadosClient) error {
        var err error
        if isAdmin, err := util.UserIsAdmin(arv); err != nil {
                return errors.New("Error verifying admin token: " + err.Error())
@@ -142,7 +142,7 @@ func singlerun(arv arvadosclient.ArvadosClient) error {
                        rlbss.Count)
        }
 
-       kc, err := keepclient.MakeKeepClient(&arv)
+       kc, err := keepclient.MakeKeepClient(arv)
        if err != nil {
                return fmt.Errorf("Error setting up keep client %v", err.Error())
        }
@@ -185,7 +185,7 @@ func singlerun(arv arvadosclient.ArvadosClient) error {
 }
 
 // BuildDataFetcher returns a data fetcher that fetches data from remote servers.
-func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher {
+func BuildDataFetcher(arv *arvadosclient.ArvadosClient) summary.DataFetcher {
        return func(
                arvLogger *logger.Logger,
                readCollections *collection.ReadCollections,
index a99ec6b05252714c1ed3b21ce6dbf055a6dfd846..7a8fff5c32a30d3a79926305df5db1ef81e48f6a 100644 (file)
@@ -19,7 +19,7 @@ import (
        "time"
 )
 
-var arv arvadosclient.ArvadosClient
+var arv *arvadosclient.ArvadosClient
 var keepClient *keepclient.KeepClient
 var keepServers []string
 
@@ -40,7 +40,7 @@ func SetupDataManagerTest(t *testing.T) {
 
        // keep client
        keepClient = &keepclient.KeepClient{
-               Arvados:       &arv,
+               Arvados:       arv,
                Want_replicas: 2,
                Client:        &http.Client{},
        }
@@ -538,32 +538,31 @@ func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
        testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
 }
 
-func createBadPath(t *testing.T) (badpath string) {
-       tempdir, err := ioutil.TempDir("", "bad")
-       if err != nil {
-               t.Fatalf("Could not create temporary directory for bad path: %v", err)
-       }
-       badpath = path.Join(tempdir, "bad")
-       return
-}
-
-func destroyBadPath(t *testing.T, badpath string) {
-       tempdir := path.Join(badpath, "..")
-       err := os.Remove(tempdir)
+func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
+       badpath, err := arvadostest.CreateBadPath()
        if err != nil {
-               t.Fatalf("Could not remove bad path temporary directory %v: %v", tempdir, err)
+               t.Fatalf(err.Error())
        }
-}
-
-func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
-       badpath := createBadPath(t)
-       defer destroyBadPath(t, badpath)
+       defer func() {
+               err = arvadostest.DestroyBadPath(badpath)
+               if err != nil {
+                       t.Fatalf(err.Error())
+               }
+       }()
        testOldBlocksNotDeletedOnDataManagerError(t, path.Join(badpath, "writetofile"), "", true, true)
 }
 
 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
-       badpath := createBadPath(t)
-       defer destroyBadPath(t, badpath)
+       badpath, err := arvadostest.CreateBadPath()
+       if err != nil {
+               t.Fatalf(err.Error())
+       }
+       defer func() {
+               err = arvadostest.DestroyBadPath(badpath)
+               if err != nil {
+                       t.Fatalf(err.Error())
+               }
+       }()
        testOldBlocksNotDeletedOnDataManagerError(t, "", path.Join(badpath, "heapprofilefile"), true, true)
 }
 
index 206a9c43fd4878babf0d9a5340a68b787b15b71a..39d2d5bd5d7d7b5af23675baa295e5a3eb38f2e7 100644 (file)
@@ -66,7 +66,7 @@ type ReadServers struct {
 
 // GetKeepServersParams struct
 type GetKeepServersParams struct {
-       Client arvadosclient.ArvadosClient
+       Client *arvadosclient.ArvadosClient
        Logger *logger.Logger
        Limit  int
 }
@@ -118,7 +118,7 @@ func GetKeepServersAndSummarize(params GetKeepServersParams) (results ReadServer
 // GetKeepServers from api server
 func GetKeepServers(params GetKeepServersParams) (results ReadServers, err error) {
        sdkParams := arvadosclient.Dict{
-               "filters": [][]string{[]string{"service_type", "!=", "proxy"}},
+               "filters": [][]string{{"service_type", "!=", "proxy"}},
        }
        if params.Limit > 0 {
                sdkParams["limit"] = params.Limit
@@ -215,7 +215,7 @@ func GetKeepServers(params GetKeepServersParams) (results ReadServers, err error
 // GetServerContents of the keep server
 func GetServerContents(arvLogger *logger.Logger,
        keepServer ServerAddress,
-       arv arvadosclient.ArvadosClient) (response ServerResponse) {
+       arv *arvadosclient.ArvadosClient) (response ServerResponse) {
 
        err := GetServerStatus(arvLogger, keepServer, arv)
        if err != nil {
@@ -247,7 +247,7 @@ func GetServerContents(arvLogger *logger.Logger,
 // GetServerStatus get keep server status by invoking /status.json
 func GetServerStatus(arvLogger *logger.Logger,
        keepServer ServerAddress,
-       arv arvadosclient.ArvadosClient) error {
+       arv *arvadosclient.ArvadosClient) error {
        url := fmt.Sprintf("http://%s:%d/status.json",
                keepServer.Host,
                keepServer.Port)
@@ -298,7 +298,7 @@ func GetServerStatus(arvLogger *logger.Logger,
 // CreateIndexRequest to the keep server
 func CreateIndexRequest(arvLogger *logger.Logger,
        keepServer ServerAddress,
-       arv arvadosclient.ArvadosClient) (req *http.Request, err error) {
+       arv *arvadosclient.ArvadosClient) (req *http.Request, err error) {
        url := fmt.Sprintf("http://%s:%d/index", keepServer.Host, keepServer.Port)
        log.Println("About to fetch keep server contents from " + url)
 
@@ -430,13 +430,23 @@ func parseBlockInfoFromIndexLine(indexLine string) (blockInfo BlockInfo, err err
                return
        }
 
-       blockInfo.Mtime, err = strconv.ParseInt(tokens[1], 10, 64)
+       var ns int64
+       ns, err = strconv.ParseInt(tokens[1], 10, 64)
        if err != nil {
                return
        }
-       blockInfo.Digest =
-               blockdigest.DigestWithSize{Digest: locator.Digest,
-                       Size: uint32(locator.Size)}
+       if ns < 1e12 {
+               // An old version of keepstore is giving us timestamps
+               // in seconds instead of nanoseconds. (This threshold
+               // correctly handles all times between 1970-01-02 and
+               // 33658-09-27.)
+               ns = ns * 1e9
+       }
+       blockInfo.Mtime = ns
+       blockInfo.Digest = blockdigest.DigestWithSize{
+               Digest: locator.Digest,
+               Size:   uint32(locator.Size),
+       }
        return
 }
 
index 79ff3f8f0763b1c2452e63afe9b8d553fbab84b1..ca8797ea6e403a4463cd0de27edaa139ceb5ce2c 100644 (file)
@@ -43,10 +43,10 @@ func (s *KeepSuite) TestSendTrashLists(c *C) {
        defer server.Close()
 
        tl := map[string]TrashList{
-               server.URL: TrashList{TrashRequest{"000000000000000000000000deadbeef", 99}}}
+               server.URL: {TrashRequest{"000000000000000000000000deadbeef", 99}}}
 
-       arv := arvadosclient.ArvadosClient{ApiToken: "abc123"}
-       kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
+       arv := &arvadosclient.ArvadosClient{ApiToken: "abc123"}
+       kc := keepclient.KeepClient{Arvados: arv, Client: &http.Client{}}
        kc.SetServiceRoots(map[string]string{"xxxx": server.URL},
                map[string]string{"xxxx": server.URL},
                map[string]string{})
@@ -70,10 +70,10 @@ func (tse *TestHandlerError) ServeHTTP(writer http.ResponseWriter, req *http.Req
 
 func sendTrashListError(c *C, server *httptest.Server) {
        tl := map[string]TrashList{
-               server.URL: TrashList{TrashRequest{"000000000000000000000000deadbeef", 99}}}
+               server.URL: {TrashRequest{"000000000000000000000000deadbeef", 99}}}
 
-       arv := arvadosclient.ArvadosClient{ApiToken: "abc123"}
-       kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
+       arv := &arvadosclient.ArvadosClient{ApiToken: "abc123"}
+       kc := keepclient.KeepClient{Arvados: arv, Client: &http.Client{}}
        kc.SetServiceRoots(map[string]string{"xxxx": server.URL},
                map[string]string{"xxxx": server.URL},
                map[string]string{})
@@ -132,14 +132,14 @@ func testGetKeepServersFromAPI(c *C, testData APITestData, expectedError string)
        api := httptest.NewServer(&apiStub)
        defer api.Close()
 
-       arv := arvadosclient.ArvadosClient{
+       arv := &arvadosclient.ArvadosClient{
                Scheme:    "http",
                ApiServer: api.URL[7:],
                ApiToken:  "abc123",
                Client:    &http.Client{Transport: &http.Transport{}},
        }
 
-       kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
+       kc := keepclient.KeepClient{Arvados: arv, Client: &http.Client{}}
        kc.SetServiceRoots(map[string]string{"xxxx": "http://example.com:23456"},
                map[string]string{"xxxx": "http://example.com:23456"},
                map[string]string{})
@@ -233,14 +233,14 @@ func testGetKeepServersAndSummarize(c *C, testData KeepServerTestData) {
        api := httptest.NewServer(&apiStub)
        defer api.Close()
 
-       arv := arvadosclient.ArvadosClient{
+       arv := &arvadosclient.ArvadosClient{
                Scheme:    "http",
                ApiServer: api.URL[7:],
                ApiToken:  "abc123",
                Client:    &http.Client{Transport: &http.Transport{}},
        }
 
-       kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
+       kc := keepclient.KeepClient{Arvados: arv, Client: &http.Client{}}
        kc.SetServiceRoots(map[string]string{"xxxx": ks.URL},
                map[string]string{"xxxx": ks.URL},
                map[string]string{})
index e2050c2b1ebefbc42bf950fc1ad30121d63b9c84..60b495c41a89799a34d40d7bce649d0af0c9a5fb 100644 (file)
@@ -164,69 +164,69 @@ func (s *PullSuite) TestBuildPullLists(c *C) {
        locator1 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xBadBeef)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{}, From: []string{}}}),
+                       locator1: {To: []string{}, From: []string{}}}),
                PullListMapEquals,
                map[string]PullList{})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{}, From: []string{"f1", "f2"}}}),
+                       locator1: {To: []string{}, From: []string{"f1", "f2"}}}),
                PullListMapEquals,
                map[string]PullList{})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}}}),
+                       locator1: {To: []string{"t1"}, From: []string{"f1", "f2"}}}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}}})
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}}})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{}}}),
+                       locator1: {To: []string{"t1"}, From: []string{}}}),
                PullListMapEquals,
-               map[string]PullList{"t1": PullList{
+               map[string]PullList{"t1": {
                        PullRequest{locator1, []string{}}}})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1", "t2"},
                                From: []string{"f1", "f2"},
                        }}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
-                       "t2": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t2": {PullRequest{locator1, []string{"f1", "f2"}}},
                })
 
        locator2 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xCabbed)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}},
-                       locator2: PullServers{To: []string{"t2"}, From: []string{"f3", "f4"}}}),
+                       locator1: {To: []string{"t1"}, From: []string{"f1", "f2"}},
+                       locator2: {To: []string{"t2"}, From: []string{"f3", "f4"}}}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
-                       "t2": PullList{PullRequest{locator2, []string{"f3", "f4"}}},
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t2": {PullRequest{locator2, []string{"f3", "f4"}}},
                })
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1"},
                                From: []string{"f1", "f2"}},
-                       locator2: PullServers{
+                       locator2: {
                                To:   []string{"t2", "t1"},
                                From: []string{"f3", "f4"}},
                }),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{
+                       "t1": {
                                PullRequest{locator1, []string{"f1", "f2"}},
                                PullRequest{locator2, []string{"f3", "f4"}},
                        },
-                       "t2": PullList{
+                       "t2": {
                                PullRequest{locator2, []string{"f3", "f4"}},
                        },
                })
@@ -235,37 +235,37 @@ func (s *PullSuite) TestBuildPullLists(c *C) {
        locator4 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xFedBeef)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1"},
                                From: []string{"f1", "f2"}},
-                       locator2: PullServers{
+                       locator2: {
                                To:   []string{"t2", "t1"},
                                From: []string{"f3", "f4"}},
-                       locator3: PullServers{
+                       locator3: {
                                To:   []string{"t3", "t2", "t1"},
                                From: []string{"f4", "f5"}},
-                       locator4: PullServers{
+                       locator4: {
                                To:   []string{"t4", "t3", "t2", "t1"},
                                From: []string{"f1", "f5"}},
                }),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{
+                       "t1": {
                                PullRequest{locator1, []string{"f1", "f2"}},
                                PullRequest{locator2, []string{"f3", "f4"}},
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t2": PullList{
+                       "t2": {
                                PullRequest{locator2, []string{"f3", "f4"}},
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t3": PullList{
+                       "t3": {
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t4": PullList{
+                       "t4": {
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
                })
index cc4eb92560b26b385378ffa6d947abb2bc9f0168..82684041275ff602236823b68da5ef2fab6714cf 100644 (file)
@@ -85,21 +85,21 @@ func VerifyToCollectionIndexSet(
 }
 
 func TestToCollectionIndexSet(t *testing.T) {
-       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{6: []int{0}}, []int{0})
-       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1}}, []int{1})
-       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1, 9}}, []int{1, 9})
+       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{6: {0}}, []int{0})
+       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: {1}}, []int{1})
+       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: {1, 9}}, []int{1, 9})
        VerifyToCollectionIndexSet(t, []int{5, 6},
-               map[int][]int{5: []int{2, 3}, 6: []int{3, 4}},
+               map[int][]int{5: {2, 3}, 6: {3, 4}},
                []int{2, 3, 4})
        VerifyToCollectionIndexSet(t, []int{5, 6},
-               map[int][]int{5: []int{8}, 6: []int{4}},
+               map[int][]int{5: {8}, 6: {4}},
                []int{4, 8})
-       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{5: []int{0}}, []int{})
+       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{5: {0}}, []int{})
 }
 
 func TestSimpleSummary(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -128,7 +128,7 @@ func TestSimpleSummary(t *testing.T) {
 
 func TestMissingBlock(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -159,7 +159,7 @@ func TestMissingBlock(t *testing.T) {
 
 func TestUnderAndOverReplicatedBlocks(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{1, 2}},
+               {ReplicationLevel: 2, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -190,9 +190,9 @@ func TestUnderAndOverReplicatedBlocks(t *testing.T) {
 
 func TestMixedReplication(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{3, 4}},
-               collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{5, 6}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{3, 4}},
+               {ReplicationLevel: 2, Blocks: []int{5, 6}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
index b6ceacecde2b8e2ffe810deea9e3777aade06625..3e4d387b62e2c4ba3c7d039a7114bd5ad222d2da 100644 (file)
@@ -29,7 +29,7 @@ func BuildTrashLists(kc *keepclient.KeepClient,
        ttl := int64(_ttl.(float64))
 
        // expire unreferenced blocks more than "ttl" seconds old.
-       expiry := time.Now().UTC().Unix() - ttl
+       expiry := time.Now().UTC().UnixNano() - ttl*1e9
 
        return buildTrashListsInternal(writableServers, keepServerInfo, expiry, keepBlocksNotInCollections), nil
 }
index 555211fe0275e9a42b49625557f8d505999b9c2d..3626904f3309743f08c6f23a5b1185e6ccd5b886 100644 (file)
@@ -26,12 +26,12 @@ func (s *TrashSuite) TestBuildTrashLists(c *C) {
        var keepServerInfo = keep.ReadServers{
                KeepServerIndexToAddress: []keep.ServerAddress{sv0, sv1},
                BlockToServers: map[blockdigest.DigestWithSize][]keep.BlockServerInfo{
-                       block0: []keep.BlockServerInfo{
-                               keep.BlockServerInfo{0, 99},
-                               keep.BlockServerInfo{1, 101}},
-                       block1: []keep.BlockServerInfo{
-                               keep.BlockServerInfo{0, 99},
-                               keep.BlockServerInfo{1, 101}}}}
+                       block0: {
+                               {0, 99},
+                               {1, 101}},
+                       block1: {
+                               {0, 99},
+                               {1, 101}}}}
 
        // only block0 is in delete set
        var bs = make(BlockSet)
@@ -40,37 +40,37 @@ func (s *TrashSuite) TestBuildTrashLists(c *C) {
        // Test trash list where only sv0 is on writable list.
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{}},
+                       sv0.URL(): {}},
                &keepServerInfo,
                110,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
 
        // Test trash list where both sv0 and sv1 are on writable list.
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{},
-                       sv1.URL(): struct{}{}},
+                       sv0.URL(): {},
+                       sv1.URL(): {}},
                &keepServerInfo,
                110,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}},
-                       "http://keep1.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 101}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}},
+                       "http://keep1.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 101}}})
 
        // Test trash list where only block on sv0 is expired
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{},
-                       sv1.URL(): struct{}{}},
+                       sv0.URL(): {},
+                       sv1.URL(): {}},
                &keepServerInfo,
                100,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
 
 }
index 4db8152d090ecd7b43f8be48fd2db363d03a2483..5430662dd20429aaa2f775c6dd05afe2a2563657 100644 (file)
@@ -1 +1,2 @@
 include agpl-3.0.txt
+include arvados-docker-cleaner.service
diff --git a/services/dockercleaner/arvados-docker-cleaner.service b/services/dockercleaner/arvados-docker-cleaner.service
new file mode 100644 (file)
index 0000000..28653ae
--- /dev/null
@@ -0,0 +1,14 @@
+[Unit]
+Description=Arvados Docker Image Cleaner
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/docker-cleaner/docker-cleaner.json
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/env arvados-docker-cleaner
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
index 88b8a4bc3c5f3330d42859ee9a40d0b3ffdd2e47..5ac81004ace90c6a2b374d36b2af30d7eb5a2139 100755 (executable)
@@ -15,11 +15,13 @@ import sys
 import time
 
 import docker
+import json
 
 SUFFIX_SIZES = {suffix: 1024 ** exp for exp, suffix in enumerate('kmgt', 1)}
 
 logger = logging.getLogger('arvados_docker.cleaner')
 
+
 def return_when_docker_not_found(result=None):
     # If the decorated function raises a 404 error from Docker, return
     # `result` instead.
@@ -35,7 +37,9 @@ def return_when_docker_not_found(result=None):
         return docker_not_found_wrapper
     return docker_not_found_decorator
 
+
 class DockerImage:
+
     def __init__(self, image_hash):
         self.docker_id = image_hash['Id']
         self.size = image_hash['VirtualSize']
@@ -46,6 +50,7 @@ class DockerImage:
 
 
 class DockerImages:
+
     def __init__(self, target_size):
         self.target_size = target_size
         self.images = {}
@@ -117,6 +122,7 @@ class DockerImages:
 class DockerEventHandlers:
     # This class maps Docker event types to the names of methods that should
     # receive those events.
+
     def __init__(self):
         self.handler_names = collections.defaultdict(list)
 
@@ -148,7 +154,9 @@ class DockerEventListener:
     def run(self):
         for event in self.events:
             event = json.loads(event.decode(self.ENCODING))
-            for method_name in self.event_handlers.for_event(event['status']):
+            if event.get('Type', 'container') != 'container':
+                continue
+            for method_name in self.event_handlers.for_event(event.get('status')):
                 getattr(self, method_name)(event)
 
 
@@ -218,7 +226,8 @@ class DockerImageCleaner(DockerImageUseRecorder):
             try:
                 self.docker_client.remove_image(image_id)
             except docker.errors.APIError as error:
-                logger.warning("Failed to remove image %s: %s", image_id, error)
+                logger.warning(
+                    "Failed to remove image %s: %s", image_id, error)
             else:
                 logger.info("Removed image %s", image_id)
                 self.images.del_image(image_id)
@@ -228,8 +237,9 @@ class DockerImageCleaner(DockerImageUseRecorder):
         unknown_ids = {image['Id'] for image in self.docker_client.images()
                        if not self.images.has_image(image['Id'])}
         for image_id in (unknown_ids - self.logged_unknown):
-            logger.info("Image %s is loaded but unused, so it won't be cleaned",
-                        image_id)
+            logger.info(
+                "Image %s is loaded but unused, so it won't be cleaned",
+                image_id)
         self.logged_unknown = unknown_ids
 
 
@@ -242,53 +252,102 @@ def human_size(size_str):
         size_str = size_str[:-1]
     return int(size_str) * multiplier
 
+
+def load_config(arguments):
+    args = parse_arguments(arguments)
+
+    config = default_config()
+    try:
+        with open(args.config, 'r') as f:
+            c = json.load(f)
+            config.update(c)
+    except (FileNotFoundError, IOError, ValueError) as error:
+        sys.exit('error reading config file {}: {}'.format(args.config, error))
+
+    configargs = vars(args).copy()
+    configargs.pop('config')
+    config.update({k: v for k, v in configargs.items() if v})
+
+    if isinstance(config['Quota'], str):
+        config['Quota'] = human_size(config['Quota'])
+
+    return config
+
+
+def default_config():
+    return {
+        'Quota': '1G',
+        'RemoveStoppedContainers': 'always',
+        'Verbose': 0,
+    }
+
+
 def parse_arguments(arguments):
+    class Formatter(argparse.ArgumentDefaultsHelpFormatter,
+                    argparse.RawDescriptionHelpFormatter):
+        pass
     parser = argparse.ArgumentParser(
         prog="arvados_docker.cleaner",
-        description="clean old Docker images from Arvados compute nodes")
+        description="clean old Docker images from Arvados compute nodes",
+        epilog="Example config file:\n\n{}".format(
+            json.dumps(default_config(), indent=4)),
+        formatter_class=Formatter,
+    )
+    parser.add_argument(
+        '--config', action='store', type=str, default='/etc/arvados/docker-cleaner/docker-cleaner.json',
+        help="configuration file")
+
+    deprecated = " (DEPRECATED -- use config file instead)"
     parser.add_argument(
-        '--quota', action='store', type=human_size, required=True,
-        help="space allowance for Docker images, suffixed with K/M/G/T")
+        '--quota', action='store', type=human_size, dest='Quota',
+        help="space allowance for Docker images, suffixed with K/M/G/T" + deprecated)
     parser.add_argument(
-        '--remove-stopped-containers', type=str, default='always',
+        '--remove-stopped-containers', type=str, default='always', dest='RemoveStoppedContainers',
         choices=['never', 'onexit', 'always'],
         help="""when to remove stopped containers (default: always, i.e., remove
         stopped containers found at startup, and remove containers as
-        soon as they exit)""")
+        soon as they exit)""" + deprecated)
     parser.add_argument(
-        '--verbose', '-v', action='count', default=0,
-        help="log more information")
+        '--verbose', '-v', action='count', default=0, dest='Verbose',
+        help="log more information" + deprecated)
+
     return parser.parse_args(arguments)
 
-def setup_logging(args):
+
+def setup_logging(config):
     log_handler = logging.StreamHandler()
     log_handler.setFormatter(logging.Formatter(
-            '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
-            '%Y-%m-%d %H:%M:%S'))
+        '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+        '%Y-%m-%d %H:%M:%S'))
     logger.addHandler(log_handler)
-    logger.setLevel(logging.ERROR - (10 * args.verbose))
+    logger.setLevel(logging.ERROR - (10 * config['Verbose']))
+
 
-def run(args, docker_client):
+def run(config, docker_client):
     start_time = int(time.time())
     logger.debug("Loading Docker activity through present")
-    images = DockerImages.from_daemon(args.quota, docker_client)
+    images = DockerImages.from_daemon(config['Quota'], docker_client)
     use_recorder = DockerImageUseRecorder(
         images, docker_client, docker_client.events(since=1, until=start_time))
     use_recorder.run()
     cleaner = DockerImageCleaner(
         images, docker_client, docker_client.events(since=start_time),
-        remove_containers_onexit=args.remove_stopped_containers != 'never')
+        remove_containers_onexit=config['RemoveStoppedContainers'] != 'never')
     cleaner.check_stopped_containers(
-        remove=args.remove_stopped_containers == 'always')
+        remove=config['RemoveStoppedContainers'] == 'always')
     logger.info("Checking image quota at startup")
     cleaner.clean_images()
     logger.info("Listening for docker events")
     cleaner.run()
 
-def main(arguments):
-    args = parse_arguments(arguments)
-    setup_logging(args)
-    run(args, docker.Client(version='1.14'))
+
+def main(arguments=sys.argv[1:]):
+    config = load_config(arguments)
+    setup_logging(config)
+    try:
+        run(config, docker.Client(version='1.14'))
+    except KeyboardInterrupt:
+        sys.exit(1)
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    main()
index 3ca9714066e23a1e3ad2c58d40691686da2b36e8..535650e3fa3f35bcbef48240727c7746e68eb7f8 100644 (file)
@@ -21,17 +21,20 @@ setup(name="arvados-docker-cleaner",
       download_url="https://github.com/curoverse/arvados.git",
       license="GNU Affero General Public License version 3.0",
       packages=find_packages(),
+      entry_points={
+          'console_scripts': ['arvados-docker-cleaner=arvados_docker.cleaner:main'],
+      },
       data_files=[
-          ('share/doc/arvados-docker-cleaner', ['agpl-3.0.txt']),
+          ('share/doc/arvados-docker-cleaner', ['agpl-3.0.txt', 'arvados-docker-cleaner.service']),
       ],
       install_requires=[
-        'docker-py==1.7.2',
-        ],
+          'docker-py==1.7.2',
+      ],
       tests_require=[
-        'pbr<1.7.0',
-        'mock',
-        ],
+          'pbr<1.7.0',
+          'mock',
+      ],
       test_suite='tests',
       zip_safe=False,
       cmdclass={'egg_info': tagger},
-      )
+)
index 3cb172e1e686d550206f8e21a49616ed71990fc4..9fbd3e3014ecd0038d789d64ce3660d872268f6c 100644 (file)
@@ -4,6 +4,7 @@ import collections
 import itertools
 import json
 import random
+import tempfile
 import time
 import unittest
 
@@ -14,13 +15,16 @@ from arvados_docker import cleaner
 
 MAX_DOCKER_ID = (16 ** 64) - 1
 
+
 def MockDockerId():
     return '{:064x}'.format(random.randint(0, MAX_DOCKER_ID))
 
+
 def MockContainer(image_hash):
     return {'Id': MockDockerId(),
             'Image': image_hash['Id']}
 
+
 def MockImage(*, size=0, vsize=None, tags=[]):
     if vsize is None:
         vsize = random.randint(100, 2000000)
@@ -30,6 +34,7 @@ def MockImage(*, size=0, vsize=None, tags=[]):
             'Size': size,
             'VirtualSize': vsize}
 
+
 class MockEvent(dict):
     ENCODING = 'utf-8'
     event_seq = itertools.count(1)
@@ -47,6 +52,7 @@ class MockEvent(dict):
 
 
 class MockException(docker.errors.APIError):
+
     def __init__(self, status_code):
         response = mock.Mock(name='response')
         response.status_code = status_code
@@ -54,6 +60,7 @@ class MockException(docker.errors.APIError):
 
 
 class DockerImageTestCase(unittest.TestCase):
+
     def test_used_at_sets_last_used(self):
         image = cleaner.DockerImage(MockImage())
         image.used_at(5)
@@ -73,6 +80,7 @@ class DockerImageTestCase(unittest.TestCase):
 
 
 class DockerImagesTestCase(unittest.TestCase):
+
     def setUp(self):
         self.mock_images = []
 
@@ -335,6 +343,7 @@ class DockerContainerCleanerTestCase(DockerImageUseRecorderTestCase):
 
 
 class HumanSizeTestCase(unittest.TestCase):
+
     def check(self, human_str, count, exp):
         self.assertEqual(count * (1024 ** exp),
                          cleaner.human_size(human_str))
@@ -361,15 +370,16 @@ class HumanSizeTestCase(unittest.TestCase):
 
 
 class RunTestCase(unittest.TestCase):
+
     def setUp(self):
-        self.args = mock.MagicMock(name='args')
-        self.args.quota = 1000000
+        self.config = cleaner.default_config()
+        self.config['Quota'] = 1000000
         self.docker_client = mock.MagicMock(name='docker_client')
 
     def test_run(self):
         test_start_time = int(time.time())
         self.docker_client.events.return_value = []
-        cleaner.run(self.args, self.docker_client)
+        cleaner.run(self.config, self.docker_client)
         self.assertEqual(2, self.docker_client.events.call_count)
         event_kwargs = [args[1] for args in
                         self.docker_client.events.call_args_list]
@@ -383,23 +393,56 @@ class RunTestCase(unittest.TestCase):
 @mock.patch('docker.Client', name='docker_client')
 @mock.patch('arvados_docker.cleaner.run', name='cleaner_run')
 class MainTestCase(unittest.TestCase):
+
     def test_client_api_version(self, run_mock, docker_client):
-        cleaner.main(['--quota', '1000T'])
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write('{"Quota":"1000T"}')
+            cf.flush()
+            cleaner.main(['--config', cf.name])
         self.assertEqual(1, docker_client.call_count)
         # 1.14 is the first version that's well defined, going back to
         # Docker 1.2, and still supported up to at least Docker 1.9.
-        # See <https://docs.docker.com/engine/reference/api/docker_remote_api/>.
+        # See
+        # <https://docs.docker.com/engine/reference/api/docker_remote_api/>.
         self.assertEqual('1.14',
                          docker_client.call_args[1].get('version'))
         self.assertEqual(1, run_mock.call_count)
         self.assertIs(run_mock.call_args[0][1], docker_client())
 
 
+class ConfigTestCase(unittest.TestCase):
+
+    def test_load_config(self):
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write(
+                '{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
+            cf.flush()
+            config = cleaner.load_config(['--config', cf.name])
+        self.assertEqual(1000 << 40, config['Quota'])
+        self.assertEqual("always", config['RemoveStoppedContainers'])
+        self.assertEqual(2, config['Verbose'])
+
+    def test_args_override_config(self):
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write(
+                '{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
+            cf.flush()
+            config = cleaner.load_config([
+                '--config', cf.name,
+                '--quota', '1G',
+                '--remove-stopped-containers', 'never',
+                '--verbose',
+            ])
+        self.assertEqual(1 << 30, config['Quota'])
+        self.assertEqual('never', config['RemoveStoppedContainers'])
+        self.assertEqual(1, config['Verbose'])
+
+
 class ContainerRemovalTestCase(unittest.TestCase):
     LIFECYCLE = ['create', 'attach', 'start', 'resize', 'die', 'destroy']
 
     def setUp(self):
-        self.args = mock.MagicMock(name='args')
+        self.config = cleaner.default_config()
         self.docker_client = mock.MagicMock(name='docker_client')
         self.existingCID = MockDockerId()
         self.docker_client.containers.return_value = [{
@@ -417,33 +460,37 @@ class ContainerRemovalTestCase(unittest.TestCase):
             for e in self.LIFECYCLE]
 
     def test_remove_onexit(self):
-        self.args.remove_stopped_containers = 'onexit'
-        cleaner.run(self.args, self.docker_client)
-        self.docker_client.remove_container.assert_called_once_with(self.newCID, v=True)
+        self.config['RemoveStoppedContainers'] = 'onexit'
+        cleaner.run(self.config, self.docker_client)
+        self.docker_client.remove_container.assert_called_once_with(
+            self.newCID, v=True)
 
     def test_remove_always(self):
-        self.args.remove_stopped_containers = 'always'
-        cleaner.run(self.args, self.docker_client)
-        self.docker_client.remove_container.assert_any_call(self.existingCID, v=True)
-        self.docker_client.remove_container.assert_any_call(self.newCID, v=True)
+        self.config['RemoveStoppedContainers'] = 'always'
+        cleaner.run(self.config, self.docker_client)
+        self.docker_client.remove_container.assert_any_call(
+            self.existingCID, v=True)
+        self.docker_client.remove_container.assert_any_call(
+            self.newCID, v=True)
         self.assertEqual(2, self.docker_client.remove_container.call_count)
 
     def test_remove_never(self):
-        self.args.remove_stopped_containers = 'never'
-        cleaner.run(self.args, self.docker_client)
+        self.config['RemoveStoppedContainers'] = 'never'
+        cleaner.run(self.config, self.docker_client)
         self.assertEqual(0, self.docker_client.remove_container.call_count)
 
     def test_container_exited_between_subscribe_events_and_check_existing(self):
-        self.args.remove_stopped_containers = 'always'
+        self.config['RemoveStoppedContainers'] = 'always'
         self.docker_client.events.return_value = [
             MockEvent(e, docker_id=self.existingCID).encoded()
             for e in ['die', 'destroy']]
-        cleaner.run(self.args, self.docker_client)
+        cleaner.run(self.config, self.docker_client)
         # Subscribed to events before getting the list of existing
         # exited containers?
         self.docker_client.assert_has_calls([
             mock.call.events(since=mock.ANY),
-            mock.call.containers(filters={'status':'exited'})])
+            mock.call.containers(filters={'status': 'exited'})])
         # Asked to delete the container twice?
-        self.docker_client.remove_container.assert_has_calls([mock.call(self.existingCID, v=True)] * 2)
+        self.docker_client.remove_container.assert_has_calls(
+            [mock.call(self.existingCID, v=True)] * 2)
         self.assertEqual(2, self.docker_client.remove_container.call_count)
index 6ac51f43873d94ca853167b8ad4b4fefbe6e4139..527e02728bdd711c2deb6ef19573d223c9101e0a 100644 (file)
@@ -134,7 +134,6 @@ class InodeCache(object):
     def __init__(self, cap, min_entries=4):
         self._entries = collections.OrderedDict()
         self._by_uuid = {}
-        self._counter = itertools.count(0)
         self.cap = cap
         self._total = 0
         self.min_entries = min_entries
@@ -143,32 +142,44 @@ class InodeCache(object):
         return self._total
 
     def _remove(self, obj, clear):
-        if clear and not obj.clear():
-            _logger.debug("InodeCache could not clear %i in_use %s", obj.inode, obj.in_use())
-            return False
+        if clear:
+            if obj.in_use():
+                _logger.debug("InodeCache cannot clear inode %i, in use", obj.inode)
+                return
+            if obj.has_ref(True):
+                obj.kernel_invalidate()
+                _logger.debug("InodeCache sent kernel invalidate inode %i", obj.inode)
+                return
+            obj.clear()
+
+        # The llfuse lock is released in del_entry(), which is called by
+        # Directory.clear().  While the llfuse lock is released, it can happen
+        # that a reentrant call removes this entry before this call gets to it.
+        # Ensure that the entry is still valid before trying to remove it.
+        if obj.inode not in self._entries:
+            return
+
         self._total -= obj.cache_size
-        del self._entries[obj.cache_priority]
+        del self._entries[obj.inode]
         if obj.cache_uuid:
             self._by_uuid[obj.cache_uuid].remove(obj)
             if not self._by_uuid[obj.cache_uuid]:
                 del self._by_uuid[obj.cache_uuid]
             obj.cache_uuid = None
         if clear:
-            _logger.debug("InodeCache cleared %i total now %i", obj.inode, self._total)
-        return True
+            _logger.debug("InodeCache cleared inode %i total now %i", obj.inode, self._total)
 
     def cap_cache(self):
         if self._total > self.cap:
-            for key in list(self._entries.keys()):
+            for ent in self._entries.values():
                 if self._total < self.cap or len(self._entries) < self.min_entries:
                     break
-                self._remove(self._entries[key], True)
+                self._remove(ent, True)
 
     def manage(self, obj):
         if obj.persisted():
-            obj.cache_priority = next(self._counter)
             obj.cache_size = obj.objsize()
-            self._entries[obj.cache_priority] = obj
+            self._entries[obj.inode] = obj
             obj.cache_uuid = obj.uuid()
             if obj.cache_uuid:
                 if obj.cache_uuid not in self._by_uuid:
@@ -177,19 +188,17 @@ class InodeCache(object):
                     if obj not in self._by_uuid[obj.cache_uuid]:
                         self._by_uuid[obj.cache_uuid].append(obj)
             self._total += obj.objsize()
-            _logger.debug("InodeCache touched %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
+            _logger.debug("InodeCache touched inode %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
             self.cap_cache()
-        else:
-            obj.cache_priority = None
 
     def touch(self, obj):
         if obj.persisted():
-            if obj.cache_priority in self._entries:
+            if obj.inode in self._entries:
                 self._remove(obj, False)
             self.manage(obj)
 
     def unmanage(self, obj):
-        if obj.persisted() and obj.cache_priority in self._entries:
+        if obj.persisted() and obj.inode in self._entries:
             self._remove(obj, True)
 
     def find_by_uuid(self, uuid):
@@ -632,7 +641,7 @@ class Operations(llfuse.Operations):
 
     @catch_exceptions
     def create(self, inode_parent, name, mode, flags, ctx):
-        _logger.debug("arv-mount create: %i '%s' %o", inode_parent, name, mode)
+        _logger.debug("arv-mount create: parent_inode %i '%s' %o", inode_parent, name, mode)
 
         p = self._check_writable(inode_parent)
         p.create(name)
@@ -648,7 +657,7 @@ class Operations(llfuse.Operations):
 
     @catch_exceptions
     def mkdir(self, inode_parent, name, mode, ctx):
-        _logger.debug("arv-mount mkdir: %i '%s' %o", inode_parent, name, mode)
+        _logger.debug("arv-mount mkdir: parent_inode %i '%s' %o", inode_parent, name, mode)
 
         p = self._check_writable(inode_parent)
         p.mkdir(name)
@@ -661,19 +670,19 @@ class Operations(llfuse.Operations):
 
     @catch_exceptions
     def unlink(self, inode_parent, name):
-        _logger.debug("arv-mount unlink: %i '%s'", inode_parent, name)
+        _logger.debug("arv-mount unlink: parent_inode %i '%s'", inode_parent, name)
         p = self._check_writable(inode_parent)
         p.unlink(name)
 
     @catch_exceptions
     def rmdir(self, inode_parent, name):
-        _logger.debug("arv-mount rmdir: %i '%s'", inode_parent, name)
+        _logger.debug("arv-mount rmdir: parent_inode %i '%s'", inode_parent, name)
         p = self._check_writable(inode_parent)
         p.rmdir(name)
 
     @catch_exceptions
     def rename(self, inode_parent_old, name_old, inode_parent_new, name_new):
-        _logger.debug("arv-mount rename: %i '%s' %i '%s'", inode_parent_old, name_old, inode_parent_new, name_new)
+        _logger.debug("arv-mount rename: old_parent_inode %i '%s' new_parent_inode %i '%s'", inode_parent_old, name_old, inode_parent_new, name_new)
         src = self._check_writable(inode_parent_old)
         dest = self._check_writable(inode_parent_new)
         dest.rename(name_old, name_new, src)
index c4b0df3a4e51e5ba9236b1a04957d019cce5d88c..d15f01792a8e8dd4b433611ce20a752c1138c877 100644 (file)
@@ -117,6 +117,7 @@ class Mount(object):
         self.llfuse_thread.daemon = True
         self.llfuse_thread.start()
         self.operations.initlock.wait()
+        return self
 
     def __exit__(self, exc_type, exc_value, traceback):
         subprocess.call(["fusermount", "-u", "-z", self.args.mountpoint])
index 2075741dbd64b923b37a8ca6da84808bbbb83cc2..e7e91db4279a895cffa23641ef97becc8ca9ffc8 100644 (file)
@@ -64,16 +64,19 @@ class FreshBase(object):
         self.use_count = 0
         self.ref_count = 0
         self.dead = False
-        self.cache_priority = None
         self.cache_size = 0
         self.cache_uuid = None
         self.allow_attr_cache = True
         self.allow_dirent_cache = True
 
-    # Mark the value as stale
     def invalidate(self):
+        """Indicate that object contents should be refreshed from source."""
         self._stale = True
 
+    def kernel_invalidate(self):
+        """Indicate that an invalidation for this object should be sent to the kernel."""
+        pass
+
     # Test if the entries dict is stale.
     def stale(self):
         if self._stale:
@@ -92,7 +95,7 @@ class FreshBase(object):
     def persisted(self):
         return False
 
-    def clear(self, force=False):
+    def clear(self):
         pass
 
     def in_use(self):
@@ -112,6 +115,18 @@ class FreshBase(object):
         self.ref_count -= n
         return self.ref_count
 
+    def has_ref(self, only_children):
+        """Determine if there are any kernel references to this
+        object or its children.
+
+        If only_children is True, ignore refcount of self and only consider
+        children.
+        """
+        if only_children:
+            return False
+        else:
+            return self.ref_count > 0
+
     def objsize(self):
         return 0
 
index 3f2bcd5ec2464fb5351a42be205b17f1fe93e49c..76530844b5724867f1f7c0895c765de63e503460 100644 (file)
@@ -156,24 +156,38 @@ class Directory(FreshBase):
 
         self.fresh()
 
-    def clear(self, force=False):
-        """Delete all entries"""
+    def in_use(self):
+        if super(Directory, self).in_use():
+            return True
+        for v in self._entries.itervalues():
+            if v.in_use():
+                return True
+        return False
 
-        if not self.in_use() or force:
-            oldentries = self._entries
-            self._entries = {}
-            for n in oldentries:
-                if not oldentries[n].clear(force):
-                    self._entries = oldentries
-                    return False
-            for n in oldentries:
-                self.inodes.invalidate_entry(self.inode, n.encode(self.inodes.encoding))
-                self.inodes.del_entry(oldentries[n])
-            self.inodes.invalidate_inode(self.inode)
-            self.invalidate()
+    def has_ref(self, only_children):
+        if super(Directory, self).has_ref(only_children):
             return True
-        else:
-            return False
+        for v in self._entries.itervalues():
+            if v.has_ref(False):
+                return True
+        return False
+
+    def clear(self):
+        """Delete all entries"""
+        oldentries = self._entries
+        self._entries = {}
+        for n in oldentries:
+            oldentries[n].clear()
+            self.inodes.invalidate_entry(self.inode, n.encode(self.inodes.encoding))
+            self.inodes.del_entry(oldentries[n])
+        self.inodes.invalidate_inode(self.inode)
+        self.invalidate()
+
+    def kernel_invalidate(self):
+        for n, e in self._entries.iteritems():
+            self.inodes.invalidate_entry(self.inode, n.encode(self.inodes.encoding))
+            e.kernel_invalidate()
+        self.inodes.invalidate_inode(self.inode)
 
     def mtime(self):
         return self._mtime
@@ -320,10 +334,9 @@ class CollectionDirectoryBase(Directory):
         self.flush()
         src.flush()
 
-    def clear(self, force=False):
-        r = super(CollectionDirectoryBase, self).clear(force)
+    def clear(self):
+        super(CollectionDirectoryBase, self).clear()
         self.collection = None
-        return r
 
 
 class CollectionDirectory(CollectionDirectoryBase):
@@ -375,7 +388,7 @@ class CollectionDirectory(CollectionDirectoryBase):
 
     def new_collection(self, new_collection_record, coll_reader):
         if self.inode:
-            self.clear(force=True)
+            self.clear()
 
         self.collection_record = new_collection_record
 
@@ -407,7 +420,7 @@ class CollectionDirectory(CollectionDirectoryBase):
                     if not self.stale():
                         return
 
-                    _logger.debug("Updating %s", to_record_version)
+                    _logger.debug("Updating collection %s inode %s to record version %s", self.collection_locator, self.inode, to_record_version)
                     if self.collection is not None:
                         if self.collection.known_past_version(to_record_version):
                             _logger.debug("%s already processed %s", self.collection_locator, to_record_version)
@@ -493,6 +506,10 @@ class CollectionDirectory(CollectionDirectoryBase):
                 self.collection.save()
             self.collection.stop_threads()
 
+    def clear(self):
+        super(CollectionDirectory, self).clear()
+        self._manifest_size = 0
+
 
 class TmpCollectionDirectory(CollectionDirectoryBase):
     """A directory backed by an Arvados collection that never gets saved.
@@ -640,24 +657,14 @@ will appear if it exists.
         else:
             raise KeyError("No collection with id " + item)
 
-    def clear(self, force=False):
+    def clear(self):
         pass
 
     def want_event_subscribe(self):
         return not self.pdh_only
 
 
-class RecursiveInvalidateDirectory(Directory):
-    def invalidate(self):
-        try:
-            super(RecursiveInvalidateDirectory, self).invalidate()
-            for a in self._entries:
-                self._entries[a].invalidate()
-        except Exception:
-            _logger.exception()
-
-
-class TagsDirectory(RecursiveInvalidateDirectory):
+class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
     def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
index 3f0e4932fddb181d84a17def278e21bd3035b6db..81fcd405ad051f50e5d7e88e048f02895d0e7323 100644 (file)
@@ -29,8 +29,8 @@ class File(FreshBase):
     def mtime(self):
         return self._mtime
 
-    def clear(self, force=False):
-        return True
+    def clear(self):
+        pass
 
     def writable(self):
         return False
index 5a45bfc103f34df2ae928ed5bcd305d100c408fa..b485037c8cdfe5b897d2de64f42aeaba9ba3ea20 100644 (file)
@@ -1,14 +1,29 @@
 import arvados
 import arvados_fuse
 import arvados_fuse.command
+import atexit
 import functools
 import inspect
+import logging
 import multiprocessing
 import os
+import run_test_server
+import signal
 import sys
 import tempfile
 import unittest
-import run_test_server
+
+_pool = None
+
+
+@atexit.register
+def _pool_cleanup():
+    global _pool
+    if _pool is None:
+        return
+    _pool.close()
+    _pool.join()
+
 
 def wrap_static_test_method(modName, clsName, funcName, args, kwargs):
     class Test(unittest.TestCase):
@@ -24,17 +39,15 @@ class IntegrationTest(unittest.TestCase):
         If called by method 'foobar', the static method '_foobar' of
         the same class will be called in the other process.
         """
+        global _pool
+        if _pool is None:
+            _pool = multiprocessing.Pool(1, maxtasksperchild=1)
         modName = inspect.getmodule(self).__name__
         clsName = self.__class__.__name__
         funcName = inspect.currentframe().f_back.f_code.co_name
-        pool = multiprocessing.Pool(1)
-        try:
-            pool.apply(
-                wrap_static_test_method,
-                (modName, clsName, '_'+funcName, args, kwargs))
-        finally:
-            pool.terminate()
-            pool.join()
+        _pool.apply(
+            wrap_static_test_method,
+            (modName, clsName, '_'+funcName, args, kwargs))
 
     @classmethod
     def setUpClass(cls):
@@ -60,11 +73,19 @@ class IntegrationTest(unittest.TestCase):
         def decorator(func):
             @functools.wraps(func)
             def wrapper(self, *args, **kwargs):
-                with arvados_fuse.command.Mount(
-                        arvados_fuse.command.ArgumentParser().parse_args(
-                            argv + ['--foreground',
-                                    '--unmount-timeout=0.1',
-                                    self.mnt])):
-                    return func(self, *args, **kwargs)
+                self.mount = None
+                try:
+                    with arvados_fuse.command.Mount(
+                            arvados_fuse.command.ArgumentParser().parse_args(
+                                argv + ['--foreground',
+                                        '--unmount-timeout=2',
+                                        self.mnt])) as self.mount:
+                        return func(self, *args, **kwargs)
+                finally:
+                    if self.mount and self.mount.llfuse_thread.is_alive():
+                        logging.warning("IntegrationTest.mount:"
+                                            " llfuse thread still alive after umount"
+                                            " -- killing test suite to avoid deadlock")
+                        os.kill(os.getpid(), signal.SIGKILL)
             return wrapper
         return decorator
index 12395d7f951422c90a76bc1e474172524c598356..20192f9d84302e1d9967136bcfa19e03ef7012dd 100644 (file)
@@ -1,18 +1,19 @@
 import arvados
-import arvados.safeapi
 import arvados_fuse as fuse
+import arvados.safeapi
 import llfuse
+import logging
+import multiprocessing
 import os
+import run_test_server
 import shutil
+import signal
 import subprocess
 import sys
 import tempfile
 import threading
 import time
 import unittest
-import logging
-import multiprocessing
-import run_test_server
 
 logger = logging.getLogger('arvados.arv-mount')
 
@@ -64,23 +65,26 @@ class MountTestBase(unittest.TestCase):
         return self.operations.inodes[llfuse.ROOT_INODE]
 
     def tearDown(self):
-        self.pool.terminate()
-        self.pool.join()
-        del self.pool
-
         if self.llfuse_thread:
             subprocess.call(["fusermount", "-u", "-z", self.mounttmp])
-            self.llfuse_thread.join(timeout=1)
+            t0 = time.time()
+            self.llfuse_thread.join(timeout=10)
             if self.llfuse_thread.is_alive():
                 logger.warning("MountTestBase.tearDown():"
-                               " llfuse thread still alive 1s after umount"
-                               " -- abandoning and exiting anyway")
+                               " llfuse thread still alive 10s after umount"
+                               " -- exiting with SIGKILL")
+                os.kill(os.getpid(), signal.SIGKILL)
+            waited = time.time() - t0
+            if waited > 0.1:
+                logger.warning("MountTestBase.tearDown(): waited %f s for llfuse thread to end", waited)
 
         os.rmdir(self.mounttmp)
         if self.keeptmp:
             shutil.rmtree(self.keeptmp)
             os.environ.pop('KEEP_LOCAL_STORE')
         run_test_server.reset()
+        self.pool.close()
+        self.pool.join()
 
     def assertDirContents(self, subdir, expect_content):
         path = self.mounttmp
index 7aa00092bc774bb3bc08d963838ed001682d9696..745c6f7b31e744546c5268e9f429283cbcf31eac 100644 (file)
@@ -11,8 +11,8 @@ import unittest
 from .integration_test import IntegrationTest
 from .mount_test_base import MountTestBase
 
-class TmpCollectionTest(IntegrationTest):
-    mnt_args = ["--directory-cache=0"]
+class CacheTest(IntegrationTest):
+    mnt_args = ["--by-id", "--directory-cache=0"]
 
     @IntegrationTest.mount(argv=mnt_args)
     def test_cache_spill(self):
index 61170d59807d5bf38ab4b4f622698e5aa78628c8..f9d73b6ef73369d8b5ec956f4714a84d66a6c179 100644 (file)
@@ -2,6 +2,7 @@ import arvados_fuse
 import mock
 import unittest
 import llfuse
+import logging
 
 class InodeTests(unittest.TestCase):
     def test_inodes_basic(self):
@@ -11,8 +12,8 @@ class InodeTests(unittest.TestCase):
         # Check that ent1 gets added to inodes
         ent1 = mock.MagicMock()
         ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
         ent1.persisted.return_value = True
-        ent1.clear.return_value = True
         ent1.objsize.return_value = 500
         inodes.add_entry(ent1)
         self.assertIn(ent1.inode, inodes)
@@ -25,8 +26,8 @@ class InodeTests(unittest.TestCase):
 
         ent1 = mock.MagicMock()
         ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
         ent1.persisted.return_value = True
-        ent1.clear.return_value = True
         ent1.objsize.return_value = 500
         inodes.add_entry(ent1)
 
@@ -34,6 +35,7 @@ class InodeTests(unittest.TestCase):
         # affect the cache total
         ent2 = mock.MagicMock()
         ent2.in_use.return_value = False
+        ent2.has_ref.return_value = False
         ent2.persisted.return_value = False
         ent2.objsize.return_value = 600
         inodes.add_entry(ent2)
@@ -46,17 +48,17 @@ class InodeTests(unittest.TestCase):
         # Check that ent1 gets added to inodes
         ent1 = mock.MagicMock()
         ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
         ent1.persisted.return_value = True
-        ent1.clear.return_value = True
         ent1.objsize.return_value = 500
         inodes.add_entry(ent1)
 
         # ent3 is persisted, adding it should cause ent1 to get cleared
         ent3 = mock.MagicMock()
         ent3.in_use.return_value = False
+        ent3.has_ref.return_value = False
         ent3.persisted.return_value = True
         ent3.objsize.return_value = 600
-        ent3.clear.return_value = True
 
         self.assertFalse(ent1.clear.called)
         inodes.add_entry(ent3)
@@ -78,46 +80,44 @@ class InodeTests(unittest.TestCase):
         self.assertTrue(ent3.clear.called)
         self.assertEqual(500, cache.total())
 
-    def test_clear_false(self):
+    def test_clear_in_use(self):
         cache = arvados_fuse.InodeCache(1000, 4)
         inodes = arvados_fuse.Inodes(cache)
 
         ent1 = mock.MagicMock()
-        ent1.in_use.return_value = False
+        ent1.in_use.return_value = True
+        ent1.has_ref.return_value = False
         ent1.persisted.return_value = True
-        ent1.clear.return_value = True
         ent1.objsize.return_value = 500
         inodes.add_entry(ent1)
 
         ent3 = mock.MagicMock()
         ent3.in_use.return_value = False
+        ent3.has_ref.return_value = True
         ent3.persisted.return_value = True
         ent3.objsize.return_value = 600
-        ent3.clear.return_value = True
         inodes.add_entry(ent3)
 
         cache.min_entries = 1
 
-        # ent1, ent3 clear return false, can't be cleared
-        ent1.clear.return_value = False
-        ent3.clear.return_value = False
+        # ent1, ent3 in use, has ref, can't be cleared
         ent1.clear.called = False
         ent3.clear.called = False
         self.assertFalse(ent1.clear.called)
         self.assertFalse(ent3.clear.called)
         cache.touch(ent3)
-        self.assertTrue(ent1.clear.called)
-        self.assertTrue(ent3.clear.called)
+        self.assertFalse(ent1.clear.called)
+        self.assertFalse(ent3.clear.called)
+        self.assertTrue(ent3.kernel_invalidate.called)
         self.assertEqual(1100, cache.total())
 
-        # ent1 clear return false, so ent3
-        # gets cleared
-        ent1.clear.return_value = False
-        ent3.clear.return_value = True
+        # ent1 still in use, ent3 doesn't have ref,
+        # so ent3 gets cleared
+        ent3.has_ref.return_value = False
         ent1.clear.called = False
         ent3.clear.called = False
         cache.touch(ent3)
-        self.assertTrue(ent1.clear.called)
+        self.assertFalse(ent1.clear.called)
         self.assertTrue(ent3.clear.called)
         self.assertEqual(500, cache.total())
 
@@ -127,20 +127,19 @@ class InodeTests(unittest.TestCase):
 
         ent1 = mock.MagicMock()
         ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
         ent1.persisted.return_value = True
-        ent1.clear.return_value = True
         ent1.objsize.return_value = 500
         inodes.add_entry(ent1)
 
         ent3 = mock.MagicMock()
         ent3.in_use.return_value = False
+        ent3.has_ref.return_value = False
         ent3.persisted.return_value = True
         ent3.objsize.return_value = 600
-        ent3.clear.return_value = True
 
         # Delete ent1
         self.assertEqual(500, cache.total())
-        ent1.clear.return_value = True
         ent1.ref_count = 0
         with llfuse.lock:
             inodes.del_entry(ent1)
index e534e3273747372ce0f9ba19d7b08e9a21b3b7a8..8b6d01969a7819f52975c511f1d7a954aaaa324d 100644 (file)
@@ -23,6 +23,33 @@ from mount_test_base import MountTestBase
 logger = logging.getLogger('arvados.arv-mount')
 
 
+class AssertWithTimeout(object):
+    """Allow some time for an assertion to pass."""
+
+    def __init__(self, timeout=0):
+        self.timeout = timeout
+
+    def __iter__(self):
+        self.deadline = time.time() + self.timeout
+        self.done = False
+        return self
+
+    def next(self):
+        if self.done:
+            raise StopIteration
+        return self.attempt
+
+    def attempt(self, fn, *args, **kwargs):
+        try:
+            fn(*args, **kwargs)
+        except AssertionError:
+            if time.time() > self.deadline:
+                raise
+            time.sleep(0.1)
+        else:
+            self.done = True
+
+
 class FuseMountTest(MountTestBase):
     def setUp(self):
         super(FuseMountTest, self).setUp()
@@ -182,18 +209,18 @@ class FuseTagsUpdateTest(MountTestBase):
 
         bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']
         self.tag_collection(bar_uuid, 'fuse_test_tag')
-        time.sleep(1)
-        self.assertIn('fuse_test_tag', llfuse.listdir(self.mounttmp))
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertIn, 'fuse_test_tag', llfuse.listdir(self.mounttmp))
         self.assertDirContents('fuse_test_tag', [bar_uuid])
 
         baz_uuid = run_test_server.fixture('collections')['baz_file']['uuid']
         l = self.tag_collection(baz_uuid, 'fuse_test_tag')
-        time.sleep(1)
-        self.assertDirContents('fuse_test_tag', [bar_uuid, baz_uuid])
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid, baz_uuid])
 
         self.api.links().delete(uuid=l['uuid']).execute()
-        time.sleep(1)
-        self.assertDirContents('fuse_test_tag', [bar_uuid])
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid])
 
 
 class FuseSharedTest(MountTestBase):
@@ -713,12 +740,8 @@ class FuseUpdateFromEventTest(MountTestBase):
             with collection2.open("file1.txt", "w") as f:
                 f.write("foo")
 
-        time.sleep(1)
-
-        # should show up via event bus notify
-
-        d1 = llfuse.listdir(os.path.join(self.mounttmp))
-        self.assertEqual(["file1.txt"], sorted(d1))
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertEqual, ["file1.txt"], llfuse.listdir(os.path.join(self.mounttmp)))
 
 
 def fuseFileConflictTestHelper(mounttmp):
@@ -1155,7 +1178,7 @@ class TokenExpiryTest(MountTestBase):
             re.search(r'\+A[0-9a-f]+@([0-9a-f]+)', got_loc).group(1),
             16)
         self.assertGreaterEqual(
-            got_exp, want_exp-1,
+            got_exp, want_exp-2,
             msg='now+2w = {:x}, but fuse fetched locator {} (old_exp {:x})'.format(
                 want_exp, got_loc, old_exp))
         self.assertLessEqual(
index 2d1a59e8909bc250b2ca995775496f1839adf9f9..8fc06c3534b76054cecbfdb1116007579952bcb1 100644 (file)
@@ -6,6 +6,7 @@ import (
        "math"
        "os"
        "runtime"
+       "sort"
        "strings"
        "sync"
        "time"
@@ -50,11 +51,17 @@ type Balancer struct {
 }
 
 // Run performs a balance operation using the given config and
-// runOptions. It should only be called once on a given Balancer
-// object. Typical usage:
+// runOptions, and returns RunOptions suitable for passing to a
+// subsequent balance operation.
 //
-//   err = (&Balancer{}).Run(config, runOptions)
-func (bal *Balancer) Run(config Config, runOptions RunOptions) (err error) {
+// Run should only be called once on a given Balancer object.
+//
+// Typical usage:
+//
+//   runOptions, err = (&Balancer{}).Run(config, runOptions)
+func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
+       nextRunOptions = runOptions
+
        bal.Dumper = runOptions.Dumper
        bal.Logger = runOptions.Logger
        if bal.Logger == nil {
@@ -75,10 +82,20 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (err error) {
        if err = bal.CheckSanityEarly(&config.Client); err != nil {
                return
        }
-       if runOptions.CommitTrash {
+       rs := bal.rendezvousState()
+       if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
+               if runOptions.SafeRendezvousState != "" {
+                       bal.logf("notice: KeepServices list has changed since last run")
+               }
+               bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
                if err = bal.ClearTrashLists(&config.Client); err != nil {
                        return
                }
+               // The current rendezvous state becomes "safe" (i.e.,
+               // OK to compute changes for that state without
+               // clearing existing trash lists) only now, after we
+               // succeed in clearing existing trash lists.
+               nextRunOptions.SafeRendezvousState = rs
        }
        if err = bal.GetCurrentState(&config.Client, config.CollectionBatchSize, config.CollectionBuffers); err != nil {
                return
@@ -158,6 +175,17 @@ func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
        return nil
 }
 
+// rendezvousState returns a fingerprint (e.g., a sorted list of
+// UUID+host+port) of the current set of keep services.
+func (bal *Balancer) rendezvousState() string {
+       srvs := make([]string, 0, len(bal.KeepServices))
+       for _, srv := range bal.KeepServices {
+               srvs = append(srvs, srv.String())
+       }
+       sort.Strings(srvs)
+       return strings.Join(srvs, "; ")
+}
+
 // ClearTrashLists sends an empty trash list to each keep
 // service. Calling this before GetCurrentState avoids races.
 //
@@ -199,7 +227,7 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                return err
        }
        bal.DefaultReplication = dd.DefaultCollectionReplication
-       bal.MinMtime = time.Now().Unix() - dd.BlobSignatureTTL
+       bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
 
        errs := make(chan error, 2+len(bal.KeepServices))
        wg := sync.WaitGroup{}
@@ -216,6 +244,12 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                                errs <- fmt.Errorf("%s: %v", srv, err)
                                return
                        }
+                       if len(errs) > 0 {
+                               // Some other goroutine encountered an
+                               // error -- any futher effort here
+                               // will be wasted.
+                               return
+                       }
                        bal.logf("%s: add %d replicas to map", srv, len(idx))
                        bal.BlockStateMap.AddReplicas(srv, idx)
                        bal.logf("%s: done", srv)
@@ -270,14 +304,11 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                }
        }()
 
-       go func() {
-               // Send a nil error when all goroutines finish. If
-               // this is the first error sent to errs, then
-               // everything worked.
-               wg.Wait()
-               errs <- nil
-       }()
-       return <-errs
+       wg.Wait()
+       if len(errs) > 0 {
+               return <-errs
+       }
+       return nil
 }
 
 func (bal *Balancer) addCollection(coll arvados.Collection) error {
@@ -619,7 +650,7 @@ func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *Ke
                }(srv)
        }
        var lastErr error
-       for _ = range bal.KeepServices {
+       for range bal.KeepServices {
                if err := <-errs; err != nil {
                        bal.logf("%v", err)
                        lastErr = err
index a138d911a3352a6edf261c6295110d1091b3d98a..23d74fe1198a4f9b91f840624b06af3ccbbe4ccb 100644 (file)
@@ -236,7 +236,7 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
-       err := (&Balancer{}).Run(s.config, opts)
+       _, err := (&Balancer{}).Run(s.config, opts)
        c.Check(err, check.ErrorMatches, "received zero collections")
        c.Check(trashReqs.Count(), check.Equals, 4)
        c.Check(pullReqs.Count(), check.Equals, 0)
@@ -254,7 +254,7 @@ func (s *runSuite) TestServiceTypes(c *check.C) {
        s.stub.serveFourDiskKeepServices()
        indexReqs := s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
-       err := (&Balancer{}).Run(s.config, opts)
+       _, err := (&Balancer{}).Run(s.config, opts)
        c.Check(err, check.IsNil)
        c.Check(indexReqs.Count(), check.Equals, 0)
        c.Check(trashReqs.Count(), check.Equals, 0)
@@ -271,7 +271,7 @@ func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
        s.stub.serveFourDiskKeepServices()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
-       err := (&Balancer{}).Run(s.config, opts)
+       _, err := (&Balancer{}).Run(s.config, opts)
        c.Check(err, check.ErrorMatches, "current user .* is not .* admin user")
        c.Check(trashReqs.Count(), check.Equals, 0)
        c.Check(pullReqs.Count(), check.Equals, 0)
@@ -289,7 +289,7 @@ func (s *runSuite) TestDetectSkippedCollections(c *check.C) {
        s.stub.serveKeepstoreIndexFoo4Bar1()
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
-       err := (&Balancer{}).Run(s.config, opts)
+       _, err := (&Balancer{}).Run(s.config, opts)
        c.Check(err, check.ErrorMatches, `Retrieved 2 collections with modtime <= .* but server now reports there are 3 collections.*`)
        c.Check(trashReqs.Count(), check.Equals, 4)
        c.Check(pullReqs.Count(), check.Equals, 0)
@@ -308,7 +308,7 @@ func (s *runSuite) TestDryRun(c *check.C) {
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
        var bal Balancer
-       err := bal.Run(s.config, opts)
+       _, err := bal.Run(s.config, opts)
        c.Check(err, check.IsNil)
        c.Check(trashReqs.Count(), check.Equals, 0)
        c.Check(pullReqs.Count(), check.Equals, 0)
@@ -332,7 +332,7 @@ func (s *runSuite) TestCommit(c *check.C) {
        trashReqs := s.stub.serveKeepstoreTrash()
        pullReqs := s.stub.serveKeepstorePull()
        var bal Balancer
-       err := bal.Run(s.config, opts)
+       _, err := bal.Run(s.config, opts)
        c.Check(err, check.IsNil)
        c.Check(trashReqs.Count(), check.Equals, 8)
        c.Check(pullReqs.Count(), check.Equals, 4)
@@ -362,13 +362,14 @@ func (s *runSuite) TestRunForever(c *check.C) {
        s.config.RunPeriod = arvados.Duration(time.Millisecond)
        go RunForever(s.config, opts, stop)
 
-       // Each run should send 4 clear trash lists + 4 pull lists + 4
-       // trash lists. We should complete four runs in much less than
+       // Each run should send 4 pull lists + 4 trash lists. The
+       // first run should also send 4 empty trash lists at
+       // startup. We should complete all four runs in much less than
        // a second.
        for t0 := time.Now(); pullReqs.Count() < 16 && time.Since(t0) < 10*time.Second; {
                time.Sleep(time.Millisecond)
        }
        stop <- true
        c.Check(pullReqs.Count() >= 16, check.Equals, true)
-       c.Check(trashReqs.Count(), check.Equals, 2*pullReqs.Count())
+       c.Check(trashReqs.Count(), check.Equals, pullReqs.Count()+4)
 }
index 682a5fb070cf0ab7e8a2b0fff0bb92a750622e06..b93939c0526d3c1f8bb7da93fe1f9915ad74c6cc 100644 (file)
@@ -76,7 +76,7 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                bal.KeepServices[srv.UUID] = srv
        }
 
-       bal.MinMtime = time.Now().Unix() - bal.signatureTTL
+       bal.MinMtime = time.Now().UnixNano() - bal.signatureTTL*1e9
 }
 
 func (bal *balancerSuite) TestPerfect(c *check.C) {
@@ -240,7 +240,7 @@ func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepSe
 // replList is like srvList but returns an "existing replicas" slice,
 // suitable for a BlockState test fixture.
 func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
-       mtime := time.Now().Unix() - bal.signatureTTL - 86400
+       mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
        for _, srv := range bal.srvList(knownBlockID, order) {
                repls = append(repls, Replica{srv, mtime})
                mtime++
index b090614607ceed2a6e2bb7e66354644843186a34..148b783788e1df83afd75fdb2ed2615d60e76b84 100644 (file)
@@ -36,7 +36,7 @@ func (s *integrationSuite) SetUpSuite(c *check.C) {
        arv.ApiToken = arvadostest.DataManagerToken
        c.Assert(err, check.IsNil)
        s.keepClient = &keepclient.KeepClient{
-               Arvados: &arv,
+               Arvados: arv,
                Client:  &http.Client{},
        }
        c.Assert(s.keepClient.DiscoverKeepServers(), check.IsNil)
@@ -78,8 +78,10 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
                        CommitTrash: true,
                        Logger:      log.New(logBuf, "", log.LstdFlags),
                }
-               err := (&Balancer{}).Run(s.config, opts)
+               nextOpts, err := (&Balancer{}).Run(s.config, opts)
                c.Check(err, check.IsNil)
+               c.Check(nextOpts.SafeRendezvousState, check.Not(check.Equals), "")
+               c.Check(nextOpts.CommitPulls, check.Equals, true)
                if iter == 0 {
                        c.Check(logBuf.String(), check.Matches, `(?ms).*ChangeSet{Pulls:1.*`)
                        c.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*ChangeSet{.*Trashes:[^0]}*`)
diff --git a/services/keep-balance/keep-balance.service b/services/keep-balance/keep-balance.service
new file mode 100644 (file)
index 0000000..157e42c
--- /dev/null
@@ -0,0 +1,14 @@
+[Unit]
+Description=Arvados Keep Balance
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keep-balance/keep-balance.yml
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/keep-balance -config /etc/arvados/keep-balance/keep-balance.yml -commit-pulls -commit-trash
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
index 364bb3ffd3f6b437ec1d46edf52223539fb1161b..60349e91d8b9064c9715db9c701cd9b11b9ef8d3 100644 (file)
@@ -3,7 +3,6 @@ package main
 import (
        "encoding/json"
        "flag"
-       "io/ioutil"
        "log"
        "os"
        "os/signal"
@@ -11,6 +10,7 @@ import (
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
 )
 
 // Config specifies site configuration, like API credentials and the
@@ -51,6 +51,12 @@ type RunOptions struct {
        CommitTrash bool
        Logger      *log.Logger
        Dumper      *log.Logger
+
+       // SafeRendezvousState from the most recent balance operation,
+       // or "" if unknown. If this changes from one run to the next,
+       // we need to watch out for races. See
+       // (*Balancer)ClearTrashLists.
+       SafeRendezvousState string
 }
 
 var debugf = func(string, ...interface{}) {}
@@ -60,9 +66,9 @@ func main() {
        var runOptions RunOptions
 
        configPath := flag.String("config", "",
-               "`path` of json configuration file")
+               "`path` of JSON or YAML configuration file")
        serviceListPath := flag.String("config.KeepServiceList", "",
-               "`path` of json file with list of keep services to balance, as given by \"arv keep_service list\" "+
+               "`path` of JSON or YAML file with list of keep services to balance, as given by \"arv keep_service list\" "+
                        "(default: config[\"KeepServiceList\"], or if none given, get all available services and filter by config[\"KeepServiceTypes\"])")
        flag.BoolVar(&runOptions.Once, "once", false,
                "balance once and then exit")
@@ -78,9 +84,9 @@ func main() {
        if *configPath == "" {
                log.Fatal("You must specify a config file (see `keep-balance -help`)")
        }
-       mustReadJSON(&config, *configPath)
+       mustReadConfig(&config, *configPath)
        if *serviceListPath != "" {
-               mustReadJSON(&config.KeepServiceList, *serviceListPath)
+               mustReadConfig(&config.KeepServiceList, *serviceListPath)
        }
 
        if *debugFlag {
@@ -98,7 +104,7 @@ func main() {
        if err != nil {
                // (don't run)
        } else if runOptions.Once {
-               err = (&Balancer{}).Run(config, runOptions)
+               _, err = (&Balancer{}).Run(config, runOptions)
        } else {
                err = RunForever(config, runOptions, nil)
        }
@@ -107,11 +113,9 @@ func main() {
        }
 }
 
-func mustReadJSON(dst interface{}, path string) {
-       if buf, err := ioutil.ReadFile(path); err != nil {
-               log.Fatalf("Reading %q: %v", path, err)
-       } else if err = json.Unmarshal(buf, dst); err != nil {
-               log.Fatalf("Decoding %q: %v", path, err)
+func mustReadConfig(dst interface{}, path string) {
+       if err := config.LoadFile(dst, path); err != nil {
+               log.Fatal(err)
        }
 }
 
@@ -138,7 +142,9 @@ func RunForever(config Config, runOptions RunOptions, stop <-chan interface{}) e
                        logger.Print("=======  Consider using -commit-pulls and -commit-trash flags.")
                }
 
-               err := (&Balancer{}).Run(config, runOptions)
+               bal := &Balancer{}
+               var err error
+               runOptions, err = bal.Run(config, runOptions)
                if err != nil {
                        logger.Print("run failed: ", err)
                } else {
index 4a56098b29d1dbb7d8352d02c8ee0b492a6aea76..157ee4574b99b23195917308d73f343716371e2a 100644 (file)
@@ -1,9 +1,9 @@
 package main
 
 import (
-       "encoding/json"
        "time"
 
+       "github.com/ghodss/yaml"
        check "gopkg.in/check.v1"
 )
 
@@ -13,7 +13,7 @@ type mainSuite struct{}
 
 func (s *mainSuite) TestExampleJSON(c *check.C) {
        var config Config
-       c.Check(json.Unmarshal(exampleConfigFile, &config), check.IsNil)
+       c.Check(yaml.Unmarshal(exampleConfigFile, &config), check.IsNil)
        c.Check(config.KeepServiceTypes, check.DeepEquals, []string{"disk"})
        c.Check(config.Client.AuthToken, check.Equals, "xyzzy")
        c.Check(time.Duration(config.RunPeriod), check.Equals, 600*time.Second)
@@ -21,8 +21,7 @@ func (s *mainSuite) TestExampleJSON(c *check.C) {
 
 func (s *mainSuite) TestConfigJSONWithKeepServiceList(c *check.C) {
        var config Config
-       c.Check(json.Unmarshal([]byte(`
-               {
+       c.Check(yaml.Unmarshal([]byte(`{
                    "Client": {
                        "APIHost": "zzzzz.arvadosapi.com:443",
                        "AuthToken": "xyzzy",
index b521c652cc6766f901c05513ade1197f527fca6d..d11201047bff92752c89cfd3a3183b6f08dd3371 100644 (file)
@@ -7,19 +7,15 @@ import (
 )
 
 var exampleConfigFile = []byte(`
-    {
-       "Client": {
-           "APIHost": "zzzzz.arvadosapi.com:443",
-           "AuthToken": "xyzzy",
-           "Insecure": false
-       },
-       "KeepServiceTypes": [
-           "disk"
-       ],
-       "RunPeriod": "600s",
-       "CollectionBatchSize": 100000,
-       "CollectionBuffers": 1000
-    }`)
+Client:
+    APIHost: zzzzz.arvadosapi.com:443
+    AuthToken: xyzzy
+    Insecure: false
+KeepServiceTypes:
+    - disk
+RunPeriod: 600s
+CollectionBatchSize: 100000
+CollectionBuffers: 1000`)
 
 func usage() {
        fmt.Fprintf(os.Stderr, `
@@ -30,7 +26,7 @@ overreplicated and unreferenced blocks, and moves blocks to better
 positions (according to the rendezvous hash algorithm) so clients find
 them faster.
 
-Usage: keep-balance -config path/to/config.json [options]
+Usage: keep-balance -config path/to/keep-balance.yml [options]
 
 Options:
 `)
diff --git a/services/keep-web/anonymous.go b/services/keep-web/anonymous.go
deleted file mode 100644 (file)
index 15a98c2..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-package main
-
-import (
-       "flag"
-       "fmt"
-       "os"
-       "strconv"
-)
-
-var anonymousTokens tokenSet
-
-type tokenSet []string
-
-func (ts *tokenSet) Set(s string) error {
-       v, err := strconv.ParseBool(s)
-       if v && len(*ts) == 0 {
-               *ts = append(*ts, os.Getenv("ARVADOS_API_TOKEN"))
-       } else if !v {
-               *ts = (*ts)[:0]
-       }
-       return err
-}
-
-func (ts *tokenSet) String() string {
-       return fmt.Sprintf("%v", len(*ts) > 0)
-}
-
-func (ts *tokenSet) IsBoolFlag() bool {
-       return true
-}
-
-func init() {
-       flag.Var(&anonymousTokens, "allow-anonymous",
-               "Serve public data to anonymous clients. Try the token supplied in the ARVADOS_API_TOKEN environment variable when none of the tokens provided in an HTTP request succeed in reading the desired collection.")
-}
index 9ca732f01af40e4fd4206677c0fcc7838db1ff10..3326dd19d853cd95833341fb2adeeddcf3acaec6 100644 (file)
@@ -6,17 +6,33 @@
 //
 // See http://doc.arvados.org/install/install-keep-web.html.
 //
-// Run "keep-web -help" to show all supported options.
+// Configuration
+//
+// The default configuration file location is
+// /etc/arvados/keep-web/keep-web.yml.
+//
+// Example configuration file
+//
+//     Client:
+//       APIHost: "zzzzz.arvadosapi.com:443"
+//       AuthToken: ""
+//       Insecure: false
+//     Listen: :1234
+//     AnonymousTokens:
+//       - xxxxxxxxxxxxxxxxxxxx
+//     AttachmentOnlyHost: ""
+//     TrustAllContent: false
 //
 // Starting the server
 //
-// Serve HTTP requests at port 1234 on all interfaces:
+// Start a server using the default config file
+// /etc/arvados/keep-web/keep-web.yml:
 //
-//   keep-web -listen=:1234
+//   keep-web
 //
-// Serve HTTP requests at port 1234 on the interface with IP address 1.2.3.4:
+// Start a server using the config file /path/to/keep-web.yml:
 //
-//   keep-web -listen=1.2.3.4:1234
+//   keep-web -config /path/to/keep-web.yml
 //
 // Proxy configuration
 //
 //
 // Anonymous downloads
 //
-// Use the -allow-anonymous flag with an ARVADOS_API_TOKEN environment
-// variable to specify a token to use when clients try to retrieve
-// files without providing their own Arvados API token.
+// The "AnonymousTokens" configuration entry is an array of tokens to
+// use when processing anonymous requests, i.e., whenever a web client
+// does not supply its own Arvados API token via path, query string,
+// cookie, or request header.
 //
-//   export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
-//   keep-web [...] -allow-anonymous
+//   "AnonymousTokens":["xxxxxxxxxxxxxxxxxxxxxxx"]
 //
 // See http://doc.arvados.org/install/install-keep-web.html for examples.
 //
 // only when the designated origin matches exactly the Host header
 // provided by the client or downstream proxy.
 //
-//   keep-web -listen :9999 -attachment-only-host domain.example:9999
+//   "AttachmentOnlyHost":"domain.example:9999"
 //
 // Trust All Content mode
 //
-// In "trust all content" mode, Keep-web will accept credentials (API
+// In TrustAllContent mode, Keep-web will accept credentials (API
 // tokens) and serve any collection X at
-// "https://collections.example.com/c=X/path/file.ext".
-// This is UNSAFE except in the special case where everyone who is
-// able write ANY data to Keep, and every JavaScript and HTML file
-// written to Keep, is also trusted to read ALL of the data in Keep.
+// "https://collections.example.com/c=X/path/file.ext".  This is
+// UNSAFE except in the special case where everyone who is able write
+// ANY data to Keep, and every JavaScript and HTML file written to
+// Keep, is also trusted to read ALL of the data in Keep.
 //
 // In such cases you can enable trust-all-content mode.
 //
-//   keep-web -listen :9999 -trust-all-content
+//   "TrustAllContent":true
 //
-// When using trust-all-content mode, the only effect of the
-// -attachment-only-host option is to add a "Content-Disposition:
+// When TrustAllContent is enabled, the only effect of the
+// AttachmentOnlyHost flag is to add a "Content-Disposition:
 // attachment" header.
 //
-//   keep-web -listen :9999 -attachment-only-host domain.example:9999 -trust-all-content
+//   "AttachmentOnlyHost":"domain.example:9999",
+//   "TrustAllContent":true
 //
 // Depending on your site configuration, you might also want to enable
-// "trust all content" setting on Workbench. Normally, Workbench
+// the "trust all content" setting in Workbench. Normally, Workbench
 // avoids redirecting requests to keep-web if they depend on
-// -trust-all-content being set.
+// TrustAllContent being enabled.
 //
 package main
index 6f5f66ae0ef1bf57979f04189fe4d110818b1bd6..11d0d96b298de5e4369474418f9f78583634510e 100644 (file)
@@ -1,7 +1,6 @@
 package main
 
 import (
-       "flag"
        "fmt"
        "html"
        "io"
@@ -12,6 +11,7 @@ import (
        "regexp"
        "strconv"
        "strings"
+       "sync"
 
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/auth"
@@ -19,23 +19,14 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
 )
 
-type handler struct{}
-
-var (
-       clientPool         = arvadosclient.MakeClientPool()
-       trustAllContent    = false
-       attachmentOnlyHost = ""
-)
-
-func init() {
-       flag.StringVar(&attachmentOnlyHost, "attachment-only-host", "",
-               "Accept credentials, and add \"Content-Disposition: attachment\" response headers, for requests at this hostname:port. Prohibiting inline display makes it possible to serve untrusted and non-public content from a single origin, i.e., without wildcard DNS or SSL.")
-       flag.BoolVar(&trustAllContent, "trust-all-content", false,
-               "Serve non-public content from a single origin. Dangerous: read docs before using!")
+type handler struct {
+       Config     *Config
+       clientPool *arvadosclient.ClientPool
+       setupOnce  sync.Once
 }
 
-// return a UUID or PDH if s begins with a UUID or URL-encoded PDH;
-// otherwise return "".
+// parseCollectionIDFromDNSName returns a UUID or PDH if s begins with
+// a UUID or URL-encoded PDH; otherwise "".
 func parseCollectionIDFromDNSName(s string) string {
        // Strip domain.
        if i := strings.IndexRune(s, '.'); i >= 0 {
@@ -58,8 +49,9 @@ func parseCollectionIDFromDNSName(s string) string {
 
 var urlPDHDecoder = strings.NewReplacer(" ", "+", "-", "+")
 
-// return a UUID or PDH if s is a UUID or a PDH (even if it is a PDH
-// with "+" replaced by " " or "-"); otherwise return "".
+// parseCollectionIDFromURL returns a UUID or PDH if s is a UUID or a
+// PDH (even if it is a PDH with "+" replaced by " " or "-");
+// otherwise "".
 func parseCollectionIDFromURL(s string) string {
        if arvadosclient.UUIDMatch(s) {
                return s
@@ -70,7 +62,14 @@ func parseCollectionIDFromURL(s string) string {
        return ""
 }
 
+func (h *handler) setup() {
+       h.clientPool = arvadosclient.MakeClientPool()
+}
+
+// ServeHTTP implements http.Handler.
 func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
+       h.setupOnce.Do(h.setup)
+
        var statusCode = 0
        var statusText string
 
@@ -109,12 +108,12 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                w.Header().Set("Access-Control-Allow-Origin", "*")
        }
 
-       arv := clientPool.Get()
+       arv := h.clientPool.Get()
        if arv == nil {
-               statusCode, statusText = http.StatusInternalServerError, "Pool failed: "+clientPool.Err().Error()
+               statusCode, statusText = http.StatusInternalServerError, "Pool failed: "+h.clientPool.Err().Error()
                return
        }
-       defer clientPool.Put(arv)
+       defer h.clientPool.Put(arv)
 
        pathParts := strings.Split(r.URL.Path[1:], "/")
 
@@ -124,9 +123,9 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        var reqTokens []string
        var pathToken bool
        var attachment bool
-       credentialsOK := trustAllContent
+       credentialsOK := h.Config.TrustAllContent
 
-       if r.Host != "" && r.Host == attachmentOnlyHost {
+       if r.Host != "" && r.Host == h.Config.AttachmentOnlyHost {
                credentialsOK = true
                attachment = true
        } else if r.FormValue("disposition") == "attachment" {
@@ -151,7 +150,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                } else {
                        // /collections/ID/PATH...
                        targetID = pathParts[1]
-                       tokens = anonymousTokens
+                       tokens = h.Config.AnonymousTokens
                        targetPath = pathParts[2:]
                }
        } else {
@@ -186,7 +185,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                        // It is not safe to copy the provided token
                        // into a cookie unless the current vhost
                        // (origin) serves only a single collection or
-                       // we are in trustAllContent mode.
+                       // we are in TrustAllContent mode.
                        statusCode = http.StatusBadRequest
                        return
                }
@@ -246,7 +245,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                if credentialsOK {
                        reqTokens = auth.NewCredentialsFromHTTPRequest(r).Tokens
                }
-               tokens = append(reqTokens, anonymousTokens...)
+               tokens = append(reqTokens, h.Config.AnonymousTokens...)
        }
 
        if len(targetPath) > 0 && targetPath[0] == "_" {
index d04c5c2d100feff32d9bced628f54bdfd8e51e0a..b3e17e8b61db0a4f3dd88b48ae82e3c6f6579673 100644 (file)
@@ -38,7 +38,7 @@ func (s *IntegrationSuite) TestVhost404(c *check.C) {
                        URL:        u,
                        RequestURI: u.RequestURI(),
                }
-               (&handler{}).ServeHTTP(resp, req)
+               s.testServer.Handler.ServeHTTP(resp, req)
                c.Check(resp.Code, check.Equals, http.StatusNotFound)
                c.Check(resp.Body.String(), check.Equals, "")
        }
@@ -51,7 +51,7 @@ func (s *IntegrationSuite) TestVhost404(c *check.C) {
 type authorizer func(*http.Request, string) int
 
 func (s *IntegrationSuite) TestVhostViaAuthzHeader(c *check.C) {
-       doVhostRequests(c, authzViaAuthzHeader)
+       s.doVhostRequests(c, authzViaAuthzHeader)
 }
 func authzViaAuthzHeader(r *http.Request, tok string) int {
        r.Header.Add("Authorization", "OAuth2 "+tok)
@@ -59,7 +59,7 @@ func authzViaAuthzHeader(r *http.Request, tok string) int {
 }
 
 func (s *IntegrationSuite) TestVhostViaCookieValue(c *check.C) {
-       doVhostRequests(c, authzViaCookieValue)
+       s.doVhostRequests(c, authzViaCookieValue)
 }
 func authzViaCookieValue(r *http.Request, tok string) int {
        r.AddCookie(&http.Cookie{
@@ -70,7 +70,7 @@ func authzViaCookieValue(r *http.Request, tok string) int {
 }
 
 func (s *IntegrationSuite) TestVhostViaPath(c *check.C) {
-       doVhostRequests(c, authzViaPath)
+       s.doVhostRequests(c, authzViaPath)
 }
 func authzViaPath(r *http.Request, tok string) int {
        r.URL.Path = "/t=" + tok + r.URL.Path
@@ -78,7 +78,7 @@ func authzViaPath(r *http.Request, tok string) int {
 }
 
 func (s *IntegrationSuite) TestVhostViaQueryString(c *check.C) {
-       doVhostRequests(c, authzViaQueryString)
+       s.doVhostRequests(c, authzViaQueryString)
 }
 func authzViaQueryString(r *http.Request, tok string) int {
        r.URL.RawQuery = "api_token=" + tok
@@ -86,7 +86,7 @@ func authzViaQueryString(r *http.Request, tok string) int {
 }
 
 func (s *IntegrationSuite) TestVhostViaPOST(c *check.C) {
-       doVhostRequests(c, authzViaPOST)
+       s.doVhostRequests(c, authzViaPOST)
 }
 func authzViaPOST(r *http.Request, tok string) int {
        r.Method = "POST"
@@ -97,7 +97,7 @@ func authzViaPOST(r *http.Request, tok string) int {
 }
 
 func (s *IntegrationSuite) TestVhostViaXHRPOST(c *check.C) {
-       doVhostRequests(c, authzViaPOST)
+       s.doVhostRequests(c, authzViaPOST)
 }
 func authzViaXHRPOST(r *http.Request, tok string) int {
        r.Method = "POST"
@@ -113,7 +113,7 @@ func authzViaXHRPOST(r *http.Request, tok string) int {
 
 // Try some combinations of {url, token} using the given authorization
 // mechanism, and verify the result is correct.
-func doVhostRequests(c *check.C, authz authorizer) {
+func (s *IntegrationSuite) doVhostRequests(c *check.C, authz authorizer) {
        for _, hostPath := range []string{
                arvadostest.FooCollection + ".example.com/foo",
                arvadostest.FooCollection + "--collections.example.com/foo",
@@ -123,11 +123,11 @@ func doVhostRequests(c *check.C, authz authorizer) {
                arvadostest.FooBarDirCollection + ".example.com/dir1/foo",
        } {
                c.Log("doRequests: ", hostPath)
-               doVhostRequestsWithHostPath(c, authz, hostPath)
+               s.doVhostRequestsWithHostPath(c, authz, hostPath)
        }
 }
 
-func doVhostRequestsWithHostPath(c *check.C, authz authorizer, hostPath string) {
+func (s *IntegrationSuite) doVhostRequestsWithHostPath(c *check.C, authz authorizer, hostPath string) {
        for _, tok := range []string{
                arvadostest.ActiveToken,
                arvadostest.ActiveToken[:15],
@@ -144,7 +144,7 @@ func doVhostRequestsWithHostPath(c *check.C, authz authorizer, hostPath string)
                        Header:     http.Header{},
                }
                failCode := authz(req, tok)
-               req, resp := doReq(req)
+               req, resp := s.doReq(req)
                code, body := resp.Code, resp.Body.String()
 
                // If the initial request had a (non-empty) token
@@ -173,9 +173,9 @@ func doVhostRequestsWithHostPath(c *check.C, authz authorizer, hostPath string)
        }
 }
 
-func doReq(req *http.Request) (*http.Request, *httptest.ResponseRecorder) {
+func (s *IntegrationSuite) doReq(req *http.Request) (*http.Request, *httptest.ResponseRecorder) {
        resp := httptest.NewRecorder()
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        if resp.Code != http.StatusSeeOther {
                return req, resp
        }
@@ -191,7 +191,7 @@ func doReq(req *http.Request) (*http.Request, *httptest.ResponseRecorder) {
        for _, c := range cookies {
                req.AddCookie(c)
        }
-       return doReq(req)
+       return s.doReq(req)
 }
 
 func (s *IntegrationSuite) TestVhostRedirectQueryTokenToCookie(c *check.C) {
@@ -270,10 +270,7 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenRequestAttachment(c *check
 }
 
 func (s *IntegrationSuite) TestVhostRedirectQueryTokenTrustAllContent(c *check.C) {
-       defer func(orig bool) {
-               trustAllContent = orig
-       }(trustAllContent)
-       trustAllContent = true
+       s.testServer.Config.TrustAllContent = true
        s.testVhostRedirectTokenToCookie(c, "GET",
                "example.com/c="+arvadostest.FooCollection+"/foo",
                "?api_token="+arvadostest.ActiveToken,
@@ -285,10 +282,7 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenTrustAllContent(c *check.C
 }
 
 func (s *IntegrationSuite) TestVhostRedirectQueryTokenAttachmentOnlyHost(c *check.C) {
-       defer func(orig string) {
-               attachmentOnlyHost = orig
-       }(attachmentOnlyHost)
-       attachmentOnlyHost = "example.com:1234"
+       s.testServer.Config.AttachmentOnlyHost = "example.com:1234"
 
        s.testVhostRedirectTokenToCookie(c, "GET",
                "example.com/c="+arvadostest.FooCollection+"/foo",
@@ -333,7 +327,7 @@ func (s *IntegrationSuite) TestVhostRedirectPOSTFormTokenToCookie404(c *check.C)
 }
 
 func (s *IntegrationSuite) TestAnonymousTokenOK(c *check.C) {
-       anonymousTokens = []string{arvadostest.AnonymousToken}
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
        s.testVhostRedirectTokenToCookie(c, "GET",
                "example.com/c="+arvadostest.HelloWorldCollection+"/Hello%20world.txt",
                "",
@@ -345,7 +339,7 @@ func (s *IntegrationSuite) TestAnonymousTokenOK(c *check.C) {
 }
 
 func (s *IntegrationSuite) TestAnonymousTokenError(c *check.C) {
-       anonymousTokens = []string{"anonymousTokenConfiguredButInvalid"}
+       s.testServer.Config.AnonymousTokens = []string{"anonymousTokenConfiguredButInvalid"}
        s.testVhostRedirectTokenToCookie(c, "GET",
                "example.com/c="+arvadostest.HelloWorldCollection+"/Hello%20world.txt",
                "",
@@ -357,6 +351,7 @@ func (s *IntegrationSuite) TestAnonymousTokenError(c *check.C) {
 }
 
 func (s *IntegrationSuite) TestRange(c *check.C) {
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
        u, _ := url.Parse("http://example.com/c=" + arvadostest.HelloWorldCollection + "/Hello%20world.txt")
        req := &http.Request{
                Method:     "GET",
@@ -366,7 +361,7 @@ func (s *IntegrationSuite) TestRange(c *check.C) {
                Header:     http.Header{"Range": {"bytes=0-4"}},
        }
        resp := httptest.NewRecorder()
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        c.Check(resp.Code, check.Equals, http.StatusPartialContent)
        c.Check(resp.Body.String(), check.Equals, "Hello")
        c.Check(resp.Header().Get("Content-Length"), check.Equals, "5")
@@ -374,7 +369,7 @@ func (s *IntegrationSuite) TestRange(c *check.C) {
 
        req.Header.Set("Range", "bytes=0-")
        resp = httptest.NewRecorder()
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        // 200 and 206 are both correct:
        c.Check(resp.Code, check.Equals, http.StatusOK)
        c.Check(resp.Body.String(), check.Equals, "Hello world\n")
@@ -389,7 +384,7 @@ func (s *IntegrationSuite) TestRange(c *check.C) {
        } {
                req.Header.Set("Range", hdr)
                resp = httptest.NewRecorder()
-               (&handler{}).ServeHTTP(resp, req)
+               s.testServer.Handler.ServeHTTP(resp, req)
                c.Check(resp.Code, check.Equals, http.StatusOK)
                c.Check(resp.Body.String(), check.Equals, "Hello world\n")
                c.Check(resp.Header().Get("Content-Length"), check.Equals, "12")
@@ -420,7 +415,7 @@ func (s *IntegrationSuite) TestXHRNoRedirect(c *check.C) {
                }.Encode())),
        }
        resp := httptest.NewRecorder()
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        c.Check(resp.Code, check.Equals, http.StatusOK)
        c.Check(resp.Body.String(), check.Equals, "foo")
        c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
@@ -443,7 +438,7 @@ func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, ho
                c.Check(resp.Body.String(), check.Equals, expectRespBody)
        }()
 
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        if resp.Code != http.StatusSeeOther {
                return resp
        }
@@ -463,7 +458,7 @@ func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, ho
        }
 
        resp = httptest.NewRecorder()
-       (&handler{}).ServeHTTP(resp, req)
+       s.testServer.Handler.ServeHTTP(resp, req)
        c.Check(resp.Header().Get("Location"), check.Equals, "")
        return resp
 }
diff --git a/services/keep-web/keep-web.service b/services/keep-web/keep-web.service
new file mode 100644 (file)
index 0000000..24be771
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Arvados Keep web gateway
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keep-web/keep-web.yml
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/keep-web
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
index 135f01b394720efba18dc8082744637bfaf3a7c1..13d8c1b329ab6583fec2a3d4f281d61a55036ca2 100644 (file)
@@ -4,8 +4,38 @@ import (
        "flag"
        "log"
        "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/coreos/go-systemd/daemon"
+)
+
+var (
+       defaultConfigPath = "/etc/arvados/keep-web/keep-web.yml"
 )
 
+// Config specifies server configuration.
+type Config struct {
+       Client arvados.Client
+
+       Listen string
+
+       AnonymousTokens    []string
+       AttachmentOnlyHost string
+       TrustAllContent    bool
+
+       // Hack to support old command line flag, which is a bool
+       // meaning "get actual token from environment".
+       deprecatedAllowAnonymous bool
+}
+
+// DefaultConfig returns the default configuration.
+func DefaultConfig() *Config {
+       return &Config{
+               Listen: ":80",
+       }
+}
+
 func init() {
        // MakeArvadosClient returns an error if this env var isn't
        // available as a default token (even if we explicitly set a
@@ -18,14 +48,44 @@ func init() {
 }
 
 func main() {
+       cfg := DefaultConfig()
+
+       var configPath string
+       deprecated := " (DEPRECATED -- use config file instead)"
+       flag.StringVar(&configPath, "config", defaultConfigPath,
+               "`path` to JSON or YAML configuration file")
+       flag.StringVar(&cfg.Listen, "listen", "",
+               "address:port or :port to listen on"+deprecated)
+       flag.BoolVar(&cfg.deprecatedAllowAnonymous, "allow-anonymous", false,
+               "Load an anonymous token from the ARVADOS_API_TOKEN environment variable"+deprecated)
+       flag.StringVar(&cfg.AttachmentOnlyHost, "attachment-only-host", "",
+               "Only serve attachments at the given `host:port`"+deprecated)
+       flag.BoolVar(&cfg.TrustAllContent, "trust-all-content", false,
+               "Serve non-public content from a single origin. Dangerous: read docs before using!"+deprecated)
+       flag.Usage = usage
        flag.Parse()
-       if os.Getenv("ARVADOS_API_HOST") == "" {
-               log.Fatal("ARVADOS_API_HOST environment variable must be set.")
+
+       if err := config.LoadFile(cfg, configPath); err != nil {
+               if h := os.Getenv("ARVADOS_API_HOST"); h != "" && configPath == defaultConfigPath {
+                       log.Printf("DEPRECATED: Using ARVADOS_API_HOST environment variable. Use config file instead.")
+                       cfg.Client.APIHost = h
+               } else {
+                       log.Fatal(err)
+               }
+       }
+       if cfg.deprecatedAllowAnonymous {
+               log.Printf("DEPRECATED: Using -allow-anonymous command line flag with ARVADOS_API_TOKEN environment variable. Use config file instead.")
+               cfg.AnonymousTokens = []string{os.Getenv("ARVADOS_API_TOKEN")}
        }
-       srv := &server{}
+
+       os.Setenv("ARVADOS_API_HOST", cfg.Client.APIHost)
+       srv := &server{Config: cfg}
        if err := srv.Start(); err != nil {
                log.Fatal(err)
        }
+       if _, err := daemon.SdNotify("READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
        log.Println("Listening at", srv.Addr)
        if err := srv.Wait(); err != nil {
                log.Fatal(err)
index 100900830f5d6808563a928fdd3e501ca660d501..babc68b6b59d37a14f17ac26f8cc49dc1485b4ca 100644 (file)
@@ -1,27 +1,16 @@
 package main
 
 import (
-       "flag"
-       "net/http"
-
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
 )
 
-var address string
-
-func init() {
-       flag.StringVar(&address, "listen", ":80",
-               "Address to listen on: \"host:port\", or \":port\" to listen on all interfaces.")
-}
-
 type server struct {
        httpserver.Server
+       Config *Config
 }
 
 func (srv *server) Start() error {
-       mux := http.NewServeMux()
-       mux.Handle("/", &handler{})
-       srv.Handler = mux
-       srv.Addr = address
+       srv.Handler = &handler{Config: srv.Config}
+       srv.Addr = srv.Config.Listen
        return srv.Server.Start()
 }
index 324588a29a11db72f8c30cfafff7095480db2822..6441364e99fcc93d4da26f4c1f6fe150c740be7b 100644 (file)
@@ -6,16 +6,20 @@ import (
        "io"
        "io/ioutil"
        "net"
+       "os"
        "os/exec"
        "strings"
        "testing"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
        check "gopkg.in/check.v1"
 )
 
+var testAPIHost = os.Getenv("ARVADOS_API_HOST")
+
 var _ = check.Suite(&IntegrationSuite{})
 
 // IntegrationSuite tests need an API server and a keep-web server
@@ -102,7 +106,7 @@ func (s *IntegrationSuite) test100BlockFile(c *check.C, blocksize int) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, check.Equals, nil)
        arv.ApiToken = arvadostest.ActiveToken
-       kc, err := keepclient.MakeKeepClient(&arv)
+       kc, err := keepclient.MakeKeepClient(arv)
        c.Assert(err, check.Equals, nil)
        loc, _, err := kc.PutB(testdata[:])
        c.Assert(err, check.Equals, nil)
@@ -137,7 +141,7 @@ type curlCase struct {
 }
 
 func (s *IntegrationSuite) Test200(c *check.C) {
-       anonymousTokens = []string{arvadostest.AnonymousToken}
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
        for _, spec := range []curlCase{
                // My collection
                {
@@ -293,7 +297,7 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, check.Equals, nil)
        arv.ApiToken = arvadostest.ActiveToken
-       kc, err := keepclient.MakeKeepClient(&arv)
+       kc, err := keepclient.MakeKeepClient(arv)
        c.Assert(err, check.Equals, nil)
        kc.PutB([]byte("Hello world\n"))
        kc.PutB([]byte("foo"))
@@ -307,10 +311,14 @@ func (s *IntegrationSuite) TearDownSuite(c *check.C) {
 
 func (s *IntegrationSuite) SetUpTest(c *check.C) {
        arvadostest.ResetEnv()
-       s.testServer = &server{}
-       var err error
-       address = "127.0.0.1:0"
-       err = s.testServer.Start()
+       s.testServer = &server{Config: &Config{
+               Client: arvados.Client{
+                       APIHost:  testAPIHost,
+                       Insecure: true,
+               },
+               Listen: "127.0.0.1:0",
+       }}
+       err := s.testServer.Start()
        c.Assert(err, check.Equals, nil)
 }
 
diff --git a/services/keep-web/usage.go b/services/keep-web/usage.go
new file mode 100644 (file)
index 0000000..a36bf58
--- /dev/null
@@ -0,0 +1,71 @@
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "os"
+)
+
+func usage() {
+       c := DefaultConfig()
+       c.AnonymousTokens = []string{"xxxxxxxxxxxxxxxxxxxxxxx"}
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := json.MarshalIndent(c, "    ", "  ")
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+Keep-web provides read-only HTTP access to files stored in Keep; see
+https://godoc.org/github.com/curoverse/arvados/services/keep-web and
+http://doc.arvados.org/install/install-keep-web.html
+
+Usage: keep-web -config path/to/keep-web.yml
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+    %s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Unused. Normally empty, or omitted entirely.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+Listen:
+
+    Local port to listen on. Can be "address", "address:port", or
+    ":port", where "address" is a host IP address or name and "port"
+    is a port number or name.
+
+AnonymousTokens:
+
+    Array of tokens to try when a client does not provide a token.
+
+AttachmentOnlyHost:
+
+    Accept credentials, and add "Content-Disposition: attachment"
+    response headers, for requests at this hostname:port.
+
+    This prohibits inline display, which makes it possible to serve
+    untrusted and non-public content from a single origin, i.e.,
+    without wildcard DNS or SSL.
+
+TrustAllContent:
+
+    Serve non-public content from a single origin. Dangerous: read
+    docs before using!
+
+`, exampleConfigFile)
+}
index 4cd931037ef830dfd8a6b25022126c84c13d7036..816de29da8abc4bad7ddcf22afcb572a7b16da69 100644 (file)
@@ -1,12 +1,10 @@
 package main
 
 import (
+       "encoding/json"
        "errors"
        "flag"
        "fmt"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       "github.com/gorilla/mux"
        "io"
        "io/ioutil"
        "log"
@@ -18,98 +16,125 @@ import (
        "sync"
        "syscall"
        "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/coreos/go-systemd/daemon"
+       "github.com/gorilla/mux"
 )
 
-// Default TCP address on which to listen for requests.
-// Override with -listen.
-const DefaultAddr = ":25107"
+type Config struct {
+       Client          arvados.Client
+       Listen          string
+       DisableGet      bool
+       DisablePut      bool
+       DefaultReplicas int
+       Timeout         arvados.Duration
+       PIDFile         string
+       Debug           bool
+}
+
+func DefaultConfig() *Config {
+       return &Config{
+               Listen:  ":25107",
+               Timeout: arvados.Duration(15 * time.Second),
+       }
+}
 
 var listener net.Listener
 
 func main() {
-       var (
-               listen           string
-               no_get           bool
-               no_put           bool
-               default_replicas int
-               timeout          int64
-               pidfile          string
-       )
+       cfg := DefaultConfig()
 
        flagset := flag.NewFlagSet("keepproxy", flag.ExitOnError)
-
-       flagset.StringVar(
-               &listen,
-               "listen",
-               DefaultAddr,
-               "Interface on which to listen for requests, in the format "+
-                       "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
-                       "to listen on all network interfaces.")
-
-       flagset.BoolVar(
-               &no_get,
-               "no-get",
-               false,
-               "If set, disable GET operations")
-
-       flagset.BoolVar(
-               &no_put,
-               "no-put",
-               false,
-               "If set, disable PUT operations")
-
-       flagset.IntVar(
-               &default_replicas,
-               "default-replicas",
-               2,
-               "Default number of replicas to write if not specified by the client.")
-
-       flagset.Int64Var(
-               &timeout,
-               "timeout",
-               15,
-               "Timeout on requests to internal Keep services (default 15 seconds)")
-
-       flagset.StringVar(
-               &pidfile,
-               "pid",
-               "",
-               "Path to write pid file")
-
+       flagset.Usage = usage
+
+       const deprecated = " (DEPRECATED -- use config file instead)"
+       flagset.StringVar(&cfg.Listen, "listen", cfg.Listen, "Local port to listen on."+deprecated)
+       flagset.BoolVar(&cfg.DisableGet, "no-get", cfg.DisableGet, "Disable GET operations."+deprecated)
+       flagset.BoolVar(&cfg.DisablePut, "no-put", cfg.DisablePut, "Disable PUT operations."+deprecated)
+       flagset.IntVar(&cfg.DefaultReplicas, "default-replicas", cfg.DefaultReplicas, "Default number of replicas to write if not specified by the client. If 0, use site default."+deprecated)
+       flagset.StringVar(&cfg.PIDFile, "pid", cfg.PIDFile, "Path to write pid file."+deprecated)
+       timeoutSeconds := flagset.Int("timeout", int(time.Duration(cfg.Timeout)/time.Second), "Timeout (in seconds) on requests to internal Keep services."+deprecated)
+
+       var cfgPath string
+       const defaultCfgPath = "/etc/arvados/keepproxy/keepproxy.yml"
+       flagset.StringVar(&cfgPath, "config", defaultCfgPath, "Configuration file `path`")
        flagset.Parse(os.Args[1:])
 
-       arv, err := arvadosclient.MakeArvadosClient()
+       err := config.LoadFile(cfg, cfgPath)
+       if err != nil {
+               h := os.Getenv("ARVADOS_API_HOST")
+               t := os.Getenv("ARVADOS_API_TOKEN")
+               if h == "" || t == "" || !os.IsNotExist(err) || cfgPath != defaultCfgPath {
+                       log.Fatal(err)
+               }
+               log.Print("DEPRECATED: No config file found, but ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are set. Please use a config file instead.")
+               cfg.Client.APIHost = h
+               cfg.Client.AuthToken = t
+               if regexp.MustCompile("^(?i:1|yes|true)$").MatchString(os.Getenv("ARVADOS_API_HOST_INSECURE")) {
+                       cfg.Client.Insecure = true
+               }
+               if j, err := json.MarshalIndent(cfg, "", "    "); err == nil {
+                       log.Print("Current configuration:\n", string(j))
+               }
+               cfg.Timeout = arvados.Duration(time.Duration(*timeoutSeconds) * time.Second)
+       }
+
+       arv, err := arvadosclient.New(&cfg.Client)
        if err != nil {
                log.Fatalf("Error setting up arvados client %s", err.Error())
        }
 
-       if os.Getenv("ARVADOS_DEBUG") != "" {
+       if cfg.Debug {
                keepclient.DebugPrintf = log.Printf
        }
-       kc, err := keepclient.MakeKeepClient(&arv)
+       kc, err := keepclient.MakeKeepClient(arv)
        if err != nil {
                log.Fatalf("Error setting up keep client %s", err.Error())
        }
 
-       if pidfile != "" {
-               f, err := os.Create(pidfile)
+       if cfg.PIDFile != "" {
+               f, err := os.Create(cfg.PIDFile)
+               if err != nil {
+                       log.Fatal(err)
+               }
+               defer f.Close()
+               err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+               if err != nil {
+                       log.Fatalf("flock(%s): %s", cfg.PIDFile, err)
+               }
+               defer os.Remove(cfg.PIDFile)
+               err = f.Truncate(0)
+               if err != nil {
+                       log.Fatalf("truncate(%s): %s", cfg.PIDFile, err)
+               }
+               _, err = fmt.Fprint(f, os.Getpid())
+               if err != nil {
+                       log.Fatalf("write(%s): %s", cfg.PIDFile, err)
+               }
+               err = f.Sync()
                if err != nil {
-                       log.Fatalf("Error writing pid file (%s): %s", pidfile, err.Error())
+                       log.Fatal("sync(%s): %s", cfg.PIDFile, err)
                }
-               fmt.Fprint(f, os.Getpid())
-               f.Close()
-               defer os.Remove(pidfile)
        }
 
-       kc.Want_replicas = default_replicas
-       kc.Client.Timeout = time.Duration(timeout) * time.Second
+       if cfg.DefaultReplicas > 0 {
+               kc.Want_replicas = cfg.DefaultReplicas
+       }
+       kc.Client.Timeout = time.Duration(cfg.Timeout)
        go kc.RefreshServices(5*time.Minute, 3*time.Second)
 
-       listener, err = net.Listen("tcp", listen)
+       listener, err = net.Listen("tcp", cfg.Listen)
        if err != nil {
-               log.Fatalf("Could not listen on %v", listen)
+               log.Fatalf("listen(%s): %s", cfg.Listen, err)
+       }
+       if _, err := daemon.SdNotify("READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
        }
-       log.Printf("Arvados Keep proxy started listening on %v", listener.Addr())
+       log.Println("Listening at", listener.Addr())
 
        // Shut down the server gracefully (by closing the listener)
        // if SIGTERM is received.
@@ -123,7 +148,7 @@ func main() {
        signal.Notify(term, syscall.SIGINT)
 
        // Start serving requests.
-       http.Serve(listener, MakeRESTRouter(!no_get, !no_put, kc))
+       http.Serve(listener, MakeRESTRouter(!cfg.DisableGet, !cfg.DisablePut, kc))
 
        log.Println("shutting down")
 }
@@ -367,7 +392,7 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
 
        kc := *this.KeepClient
        var err error
-       var expectLength int64 = -1
+       var expectLength int64
        var status = http.StatusInternalServerError
        var wroteReplicas int
        var locatorOut string = "-"
@@ -381,15 +406,8 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
 
        locatorIn := mux.Vars(req)["locator"]
 
-       if req.Header.Get("Content-Length") != "" {
-               _, err := fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &expectLength)
-               if err != nil {
-                       resp.Header().Set("Content-Length", fmt.Sprintf("%d", expectLength))
-               }
-
-       }
-
-       if expectLength < 0 {
+       _, err = fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &expectLength)
+       if err != nil || expectLength < 0 {
                err = LengthRequiredError
                status = http.StatusLengthRequired
                return
@@ -424,7 +442,7 @@ func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
        if req.Header.Get("X-Keep-Desired-Replicas") != "" {
                var r int
                _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
-               if err != nil {
+               if err == nil {
                        kc.Want_replicas = r
                }
        }
diff --git a/services/keepproxy/keepproxy.service b/services/keepproxy/keepproxy.service
new file mode 100644 (file)
index 0000000..c340fab
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Arvados Keep Proxy
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keepproxy/keepproxy.yml
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/keepproxy
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
index 4a1d58f86d822b2508f681c122c51261e2df37fc..6a349dae216c54e81b309e7ecc52471549380146 100644 (file)
@@ -1,6 +1,7 @@
 package main
 
 import (
+       "bytes"
        "crypto/md5"
        "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
@@ -9,6 +10,7 @@ import (
        "io/ioutil"
        "log"
        "net/http"
+       "net/http/httptest"
        "os"
        "strings"
        "testing"
@@ -99,7 +101,7 @@ func runProxy(c *C, args []string, bogusClientToken bool) *keepclient.KeepClient
        if bogusClientToken {
                arv.ApiToken = "bogus-token"
        }
-       kc := keepclient.New(&arv)
+       kc := keepclient.New(arv)
        sr := map[string]string{
                TestProxyUUID: "http://" + listener.Addr().String(),
        }
@@ -109,6 +111,62 @@ func runProxy(c *C, args []string, bogusClientToken bool) *keepclient.KeepClient
        return kc
 }
 
+func (s *ServerRequiredSuite) TestDesiredReplicas(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       content := []byte("TestDesiredReplicas")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       for _, kc.Want_replicas = range []int{0, 1, 2} {
+               locator, rep, err := kc.PutB(content)
+               c.Check(err, Equals, nil)
+               c.Check(rep, Equals, kc.Want_replicas)
+               if rep > 0 {
+                       c.Check(locator, Matches, fmt.Sprintf(`^%s\+%d(\+.+)?$`, hash, len(content)))
+               }
+       }
+}
+
+func (s *ServerRequiredSuite) TestPutWrongContentLength(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       content := []byte("TestPutWrongContentLength")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       // If we use http.Client to send these requests to the network
+       // server we just started, the Go http library automatically
+       // fixes the invalid Content-Length header. In order to test
+       // our server behavior, we have to call the handler directly
+       // using an httptest.ResponseRecorder.
+       rtr := MakeRESTRouter(true, true, kc)
+
+       type testcase struct {
+               sendLength   string
+               expectStatus int
+       }
+
+       for _, t := range []testcase{
+               {"1", http.StatusBadRequest},
+               {"", http.StatusLengthRequired},
+               {"-1", http.StatusLengthRequired},
+               {"abcdef", http.StatusLengthRequired},
+       } {
+               req, err := http.NewRequest("PUT",
+                       fmt.Sprintf("http://%s/%s+%d", listener.Addr().String(), hash, len(content)),
+                       bytes.NewReader(content))
+               c.Assert(err, IsNil)
+               req.Header.Set("Content-Length", t.sendLength)
+               req.Header.Set("Authorization", "OAuth2 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+               req.Header.Set("Content-Type", "application/octet-stream")
+
+               resp := httptest.NewRecorder()
+               rtr.ServeHTTP(resp, req)
+               c.Check(resp.Code, Equals, t.expectStatus)
+       }
+}
+
 func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
        kc := runProxy(c, nil, false)
        defer closeListener()
@@ -446,7 +504,7 @@ func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
        c.Assert(err, Equals, nil)
 
        // keepclient with no such keep server
-       kc := keepclient.New(&arv)
+       kc := keepclient.New(arv)
        locals := map[string]string{
                TestProxyUUID: "http://localhost:12345",
        }
diff --git a/services/keepproxy/usage.go b/services/keepproxy/usage.go
new file mode 100644 (file)
index 0000000..403c6bf
--- /dev/null
@@ -0,0 +1,82 @@
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "os"
+)
+
+func usage() {
+       c := DefaultConfig()
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := json.MarshalIndent(c, "    ", "  ")
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+Keepproxy forwards GET and PUT requests to keepstore servers.  See
+http://doc.arvados.org/install/install-keepproxy.html
+
+Usage: keepproxy [-config path/to/keepproxy.yml]
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+    %s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Anonymous API token.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+Listen:
+
+    Local port to listen on. Can be "address:port" or ":port", where
+    "address" is a host IP address or name and "port" is a port number
+    or name.
+
+DisableGet:
+
+    Respond 404 to GET and HEAD requests.
+
+DisablePut:
+
+    Respond 404 to PUT, POST, and OPTIONS requests.
+
+DefaultReplicas:
+
+    Default number of replicas to write if not specified by the
+    client. If this is zero or omitted, the site-wide
+    defaultCollectionReplication configuration will be used.
+
+Timeout:
+
+    Timeout for requests to keep services, with units (e.g., "120s",
+    "2m").
+
+PIDFile:
+
+    Path to PID file. During startup this file will be created if
+    needed, and locked using flock() until keepproxy exits. If it is
+    already locked, or any error is encountered while writing to it,
+    keepproxy will exit immediately. If omitted or empty, no PID file
+    will be used.
+
+Debug:
+
+    Enable debug logging.
+
+`, exampleConfigFile)
+}
index 99da2a3a3de35de90be820b1a5285e5b592004d7..48cb02647cfd098cdc67796ba992ac5cba327bde 100644 (file)
@@ -350,7 +350,7 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
                                // Trashed blob; exclude it from response
                                continue
                        }
-                       fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.Unix())
+                       fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.UnixNano())
                }
                if resp.NextMarker == "" {
                        return nil
index f698982415aae5bd7d8a341428acb2d8bdb57317..a6798a9f72bb6355ba8b5f6d9cb7d58f3ffe69e9 100644 (file)
@@ -73,7 +73,7 @@ func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
 func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
        if enforcePermissions {
                locator := req.URL.Path[1:] // strip leading slash
-               if err := VerifySignature(locator, GetApiToken(req)); err != nil {
+               if err := VerifySignature(locator, GetAPIToken(req)); err != nil {
                        http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
                        return
                }
@@ -184,7 +184,7 @@ func PutBlockHandler(resp http.ResponseWriter, req *http.Request) {
        // Success; add a size hint, sign the locator if possible, and
        // return it to the client.
        returnHash := fmt.Sprintf("%s+%d", hash, req.ContentLength)
-       apiToken := GetApiToken(req)
+       apiToken := GetAPIToken(req)
        if PermissionSecret != nil && apiToken != "" {
                expiry := time.Now().Add(blobSignatureTTL)
                returnHash = SignLocator(returnHash, apiToken, expiry)
@@ -196,7 +196,7 @@ func PutBlockHandler(resp http.ResponseWriter, req *http.Request) {
 // IndexHandler is a HandleFunc to address /index and /index/{prefix} requests.
 func IndexHandler(resp http.ResponseWriter, req *http.Request) {
        // Reject unauthorized requests.
-       if !IsDataManagerToken(GetApiToken(req)) {
+       if !IsDataManagerToken(GetAPIToken(req)) {
                http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
                return
        }
@@ -328,7 +328,7 @@ func DeleteHandler(resp http.ResponseWriter, req *http.Request) {
        hash := mux.Vars(req)["hash"]
 
        // Confirm that this user is an admin and has a token with unlimited scope.
-       var tok = GetApiToken(req)
+       var tok = GetAPIToken(req)
        if tok == "" || !CanDelete(tok) {
                http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
                return
@@ -419,7 +419,7 @@ type PullRequest struct {
 // PullHandler processes "PUT /pull" requests for the data manager.
 func PullHandler(resp http.ResponseWriter, req *http.Request) {
        // Reject unauthorized requests.
-       if !IsDataManagerToken(GetApiToken(req)) {
+       if !IsDataManagerToken(GetAPIToken(req)) {
                http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
                return
        }
@@ -455,7 +455,7 @@ type TrashRequest struct {
 // TrashHandler processes /trash requests.
 func TrashHandler(resp http.ResponseWriter, req *http.Request) {
        // Reject unauthorized requests.
-       if !IsDataManagerToken(GetApiToken(req)) {
+       if !IsDataManagerToken(GetAPIToken(req)) {
                http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
                return
        }
@@ -485,7 +485,7 @@ func TrashHandler(resp http.ResponseWriter, req *http.Request) {
 // UntrashHandler processes "PUT /untrash/{hash:[0-9a-f]{32}}" requests for the data manager.
 func UntrashHandler(resp http.ResponseWriter, req *http.Request) {
        // Reject unauthorized requests.
-       if !IsDataManagerToken(GetApiToken(req)) {
+       if !IsDataManagerToken(GetAPIToken(req)) {
                http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
                return
        }
@@ -714,10 +714,10 @@ func IsValidLocator(loc string) bool {
 
 var authRe = regexp.MustCompile(`^OAuth2\s+(.*)`)
 
-// GetApiToken returns the OAuth2 token from the Authorization
+// GetAPIToken returns the OAuth2 token from the Authorization
 // header of a HTTP request, or an empty string if no matching
 // token is found.
-func GetApiToken(req *http.Request) string {
+func GetAPIToken(req *http.Request) string {
        if auth, ok := req.Header["Authorization"]; ok {
                if match := authRe.FindStringSubmatch(auth[0]); match != nil {
                        return match[1]
index 819d52fe0adecd71670ab89d57f1967b64368b4a..48b83de4b8aa2a40e62be953e236162d396ddae0 100644 (file)
@@ -197,8 +197,8 @@ func main() {
        flag.IntVar(
                &permissionTTLSec,
                "blob-signature-ttl",
-               int(time.Duration(2*7*24*time.Hour).Seconds()),
-               "Lifetime of blob permission signatures. Modifying the ttl will invalidate all existing signatures. "+
+               2*7*24*3600,
+               "Lifetime of blob permission signatures in seconds. Modifying the ttl will invalidate all existing signatures. "+
                        "See services/api/config/application.default.yml.")
        flag.BoolVar(
                &flagSerializeIO,
@@ -223,7 +223,7 @@ func main() {
        flag.DurationVar(
                &trashLifetime,
                "trash-lifetime",
-               0*time.Second,
+               0,
                "Time duration after a block is trashed during which it can be recovered using an /untrash request")
        flag.DurationVar(
                &trashCheckInterval,
index 2626d4bf68e1594f394ad4539f0f32a90fe00339..d53d1060e743e07d9d2bfba6b90c67376a1006ab 100644 (file)
@@ -2,7 +2,6 @@ package main
 
 import (
        "crypto/rand"
-       "errors"
        "fmt"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "io"
@@ -57,7 +56,7 @@ func PullItemAndProcess(pullRequest PullRequest, token string, keepClient *keepc
                return
        }
        if reader == nil {
-               return errors.New(fmt.Sprintf("No reader found for : %s", signedLocator))
+               return fmt.Errorf("No reader found for : %s", signedLocator)
        }
        defer reader.Close()
 
@@ -67,7 +66,7 @@ func PullItemAndProcess(pullRequest PullRequest, token string, keepClient *keepc
        }
 
        if (readContent == nil) || (int64(len(readContent)) != contentLen) {
-               return errors.New(fmt.Sprintf("Content not found for: %s", signedLocator))
+               return fmt.Errorf("Content not found for: %s", signedLocator)
        }
 
        err = PutContent(readContent, pullRequest.Locator)
index e0613a2cd12685195d841c2ef2ae31f7b90f60bb..77b4c75f4a702d4afd701d3601f9a54062b2ba2f 100644 (file)
@@ -37,7 +37,7 @@ func SetupPullWorkerIntegrationTest(t *testing.T, testData PullWorkIntegrationTe
 
        // keep client
        keepClient = &keepclient.KeepClient{
-               Arvados:       &arv,
+               Arvados:       arv,
                Want_replicas: 1,
                Client:        &http.Client{},
        }
index 5076b85e20703e8305b74ab4ce5a048cd17a81b0..4d85d5fd20cf6abe408035d294e3d85c5d011251 100644 (file)
@@ -39,7 +39,7 @@ func (s *PullWorkerTestSuite) SetUpTest(c *C) {
 func RunTestPullWorker(c *C) {
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, Equals, nil)
-       keepClient, err := keepclient.MakeKeepClient(&arv)
+       keepClient, err := keepclient.MakeKeepClient(arv)
        c.Assert(err, Equals, nil)
 
        pullq = NewWorkQueue()
index 80a7c89f2ed4f6669566711c40c4d0a59940e439..1a2a47b0df3b27d9c38ae7f4c8986a0f48f933a2 100644 (file)
@@ -18,7 +18,9 @@ import (
 )
 
 var (
-       ErrS3DeleteNotAvailable = fmt.Errorf("delete without -s3-unsafe-delete is not implemented")
+       // ErrS3TrashDisabled is returned by Trash if that operation
+       // is impossible with the current config.
+       ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
 
        s3AccessKeyFile string
        s3SecretKeyFile string
@@ -26,6 +28,7 @@ var (
        s3Endpoint      string
        s3Replication   int
        s3UnsafeDelete  bool
+       s3RaceWindow    time.Duration
 
        s3ACL = s3.Private
 )
@@ -40,9 +43,6 @@ type s3VolumeAdder struct {
 }
 
 func (s *s3VolumeAdder) Set(bucketName string) error {
-       if trashLifetime != 0 {
-               return ErrNotImplemented
-       }
        if bucketName == "" {
                return fmt.Errorf("no container name given")
        }
@@ -77,7 +77,7 @@ func (s *s3VolumeAdder) Set(bucketName string) error {
        if flagSerializeIO {
                log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
        }
-       v := NewS3Volume(auth, region, bucketName, flagReadonly, s3Replication)
+       v := NewS3Volume(auth, region, bucketName, s3RaceWindow, flagReadonly, s3Replication)
        if err := v.Check(); err != nil {
                return err
        }
@@ -86,7 +86,7 @@ func (s *s3VolumeAdder) Set(bucketName string) error {
 }
 
 func s3regions() (okList []string) {
-       for r, _ := range aws.Regions {
+       for r := range aws.Regions {
                okList = append(okList, r)
        }
        return
@@ -116,6 +116,11 @@ func init() {
                "s3-secret-key-file",
                "",
                "File containing the secret key used for subsequent -s3-bucket-volume arguments.")
+       flag.DurationVar(
+               &s3RaceWindow,
+               "s3-race-window",
+               24*time.Hour,
+               "Maximum eventual consistency latency for subsequent -s3-bucket-volume arguments.")
        flag.IntVar(
                &s3Replication,
                "s3-replication",
@@ -128,8 +133,10 @@ func init() {
                "EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.")
 }
 
+// S3Volume implements Volume using an S3 bucket.
 type S3Volume struct {
        *s3.Bucket
+       raceWindow    time.Duration
        readonly      bool
        replication   int
        indexPageSize int
@@ -138,26 +145,61 @@ type S3Volume struct {
 // NewS3Volume returns a new S3Volume using the given auth, region,
 // and bucket name. The replication argument specifies the replication
 // level to report when writing data.
-func NewS3Volume(auth aws.Auth, region aws.Region, bucket string, readonly bool, replication int) *S3Volume {
+func NewS3Volume(auth aws.Auth, region aws.Region, bucket string, raceWindow time.Duration, readonly bool, replication int) *S3Volume {
        return &S3Volume{
                Bucket: &s3.Bucket{
                        S3:   s3.New(auth, region),
                        Name: bucket,
                },
+               raceWindow:    raceWindow,
                readonly:      readonly,
                replication:   replication,
                indexPageSize: 1000,
        }
 }
 
+// Check returns an error if the volume is inaccessible (e.g., config
+// error).
 func (v *S3Volume) Check() error {
        return nil
 }
 
+// getReader wraps (Bucket)GetReader.
+//
+// In situations where (Bucket)GetReader would fail because the block
+// disappeared in a Trash race, getReader calls fixRace to recover the
+// data, and tries again.
+func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
+       rdr, err = v.Bucket.GetReader(loc)
+       err = v.translateError(err)
+       if err == nil || !os.IsNotExist(err) {
+               return
+       }
+       _, err = v.Bucket.Head("recent/"+loc, nil)
+       err = v.translateError(err)
+       if err != nil {
+               // If we can't read recent/X, there's no point in
+               // trying fixRace. Give up.
+               return
+       }
+       if !v.fixRace(loc) {
+               err = os.ErrNotExist
+               return
+       }
+       rdr, err = v.Bucket.GetReader(loc)
+       if err != nil {
+               log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
+               err = v.translateError(err)
+       }
+       return
+}
+
+// Get a block: copy the block data into buf, and return the number of
+// bytes copied.
 func (v *S3Volume) Get(loc string, buf []byte) (int, error) {
-       rdr, err := v.Bucket.GetReader(loc)
+       rdr, err := v.getReader(loc)
        if err != nil {
-               return 0, v.translateError(err)
+               return 0, err
        }
        defer rdr.Close()
        n, err := io.ReadFull(rdr, buf)
@@ -169,15 +211,17 @@ func (v *S3Volume) Get(loc string, buf []byte) (int, error) {
        }
 }
 
+// Compare the given data with the stored data.
 func (v *S3Volume) Compare(loc string, expect []byte) error {
-       rdr, err := v.Bucket.GetReader(loc)
+       rdr, err := v.getReader(loc)
        if err != nil {
-               return v.translateError(err)
+               return err
        }
        defer rdr.Close()
        return v.translateError(compareReaderWithBuf(rdr, expect, loc[:32]))
 }
 
+// Put writes a block.
 func (v *S3Volume) Put(loc string, block []byte) error {
        if v.readonly {
                return MethodDisabledError
@@ -190,71 +234,112 @@ func (v *S3Volume) Put(loc string, block []byte) error {
                }
                opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
        }
-       return v.translateError(
-               v.Bucket.Put(
-                       loc, block, "application/octet-stream", s3ACL, opts))
+       err := v.Bucket.Put(loc, block, "application/octet-stream", s3ACL, opts)
+       if err != nil {
+               return v.translateError(err)
+       }
+       err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+       return v.translateError(err)
 }
 
+// Touch sets the timestamp for the given locator to the current time.
 func (v *S3Volume) Touch(loc string) error {
        if v.readonly {
                return MethodDisabledError
        }
-       result, err := v.Bucket.PutCopy(loc, s3ACL, s3.CopyOptions{
-               ContentType:       "application/octet-stream",
-               MetadataDirective: "REPLACE",
-       }, v.Bucket.Name+"/"+loc)
-       if err != nil {
-               return v.translateError(err)
-       }
-       t, err := time.Parse(time.RFC3339, result.LastModified)
-       if err != nil {
+       _, err := v.Bucket.Head(loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) && v.fixRace(loc) {
+               // The data object got trashed in a race, but fixRace
+               // rescued it.
+       } else if err != nil {
                return err
        }
-       if time.Since(t) > maxClockSkew {
-               return fmt.Errorf("PutCopy returned old LastModified %s => %s (%s ago)", result.LastModified, t, time.Since(t))
-       }
-       return nil
+       err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+       return v.translateError(err)
 }
 
+// Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
-       resp, err := v.Bucket.Head(loc, nil)
+       _, err := v.Bucket.Head(loc, nil)
        if err != nil {
                return zeroTime, v.translateError(err)
        }
-       hdr := resp.Header.Get("Last-Modified")
-       t, err := time.Parse(time.RFC1123, hdr)
-       if err != nil && hdr != "" {
-               // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
-               // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
-               // as required by HTTP spec. If it's not a valid HTTP
-               // header value, it's probably AWS (or s3test) giving
-               // us a nearly-RFC1123 timestamp.
-               t, err = time.Parse(nearlyRFC1123, hdr)
+       resp, err := v.Bucket.Head("recent/"+loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // The data object X exists, but recent/X is missing.
+               err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+               if err != nil {
+                       log.Printf("error: creating %q: %s", "recent/"+loc, err)
+                       return zeroTime, v.translateError(err)
+               }
+               log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+               resp, err = v.Bucket.Head("recent/"+loc, nil)
+               if err != nil {
+                       log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
+                       return zeroTime, v.translateError(err)
+               }
+       } else if err != nil {
+               // HEAD recent/X failed for some other reason.
+               return zeroTime, err
        }
-       return t, err
+       return v.lastModified(resp)
 }
 
+// IndexTo writes a complete list of locators with the given prefix
+// for which Get() can retrieve data.
 func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
-       nextMarker := ""
-       for {
-               listResp, err := v.Bucket.List(prefix, "", nextMarker, v.indexPageSize)
-               if err != nil {
-                       return err
+       // Use a merge sort to find matching sets of X and recent/X.
+       dataL := s3Lister{
+               Bucket:   v.Bucket,
+               Prefix:   prefix,
+               PageSize: v.indexPageSize,
+       }
+       recentL := s3Lister{
+               Bucket:   v.Bucket,
+               Prefix:   "recent/" + prefix,
+               PageSize: v.indexPageSize,
+       }
+       for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
+               if data.Key >= "g" {
+                       // Conveniently, "recent/*" and "trash/*" are
+                       // lexically greater than all hex-encoded data
+                       // hashes, so stopping here avoids iterating
+                       // over all of them needlessly with dataL.
+                       break
                }
-               for _, key := range listResp.Contents {
-                       t, err := time.Parse(time.RFC3339, key.LastModified)
-                       if err != nil {
-                               return err
-                       }
-                       if !v.isKeepBlock(key.Key) {
+               if !v.isKeepBlock(data.Key) {
+                       continue
+               }
+
+               // stamp is the list entry we should use to report the
+               // last-modified time for this data block: it will be
+               // the recent/X entry if one exists, otherwise the
+               // entry for the data block itself.
+               stamp := data
+
+               // Advance to the corresponding recent/X marker, if any
+               for recent != nil {
+                       if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
+                               recent = recentL.Next()
                                continue
+                       } else if cmp == 0 {
+                               stamp = recent
+                               recent = recentL.Next()
+                               break
+                       } else {
+                               // recent/X marker is missing: we'll
+                               // use the timestamp on the data
+                               // object.
+                               break
                        }
-                       fmt.Fprintf(writer, "%s+%d %d\n", key.Key, key.Size, t.Unix())
                }
-               if !listResp.IsTruncated {
-                       break
+               t, err := time.Parse(time.RFC3339, stamp.LastModified)
+               if err != nil {
+                       return err
                }
-               nextMarker = listResp.NextMarker
+               fmt.Fprintf(writer, "%s+%d %d\n", data.Key, data.Size, t.UnixNano())
        }
        return nil
 }
@@ -264,25 +349,110 @@ func (v *S3Volume) Trash(loc string) error {
        if v.readonly {
                return MethodDisabledError
        }
-       if trashLifetime != 0 {
-               return ErrNotImplemented
-       }
        if t, err := v.Mtime(loc); err != nil {
                return err
        } else if time.Since(t) < blobSignatureTTL {
                return nil
        }
-       if !s3UnsafeDelete {
-               return ErrS3DeleteNotAvailable
+       if trashLifetime == 0 {
+               if !s3UnsafeDelete {
+                       return ErrS3TrashDisabled
+               }
+               return v.Bucket.Del(loc)
+       }
+       err := v.checkRaceWindow(loc)
+       if err != nil {
+               return err
+       }
+       err = v.safeCopy("trash/"+loc, loc)
+       if err != nil {
+               return err
+       }
+       return v.translateError(v.Bucket.Del(loc))
+}
+
+// checkRaceWindow returns a non-nil error if trash/loc is, or might
+// be, in the race window (i.e., it's not safe to trash loc).
+func (v *S3Volume) checkRaceWindow(loc string) error {
+       resp, err := v.Bucket.Head("trash/"+loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // OK, trash/X doesn't exist so we're not in the race
+               // window
+               return nil
+       } else if err != nil {
+               // Error looking up trash/X. We don't know whether
+               // we're in the race window
+               return err
+       }
+       t, err := v.lastModified(resp)
+       if err != nil {
+               // Can't parse timestamp
+               return err
+       }
+       safeWindow := t.Add(trashLifetime).Sub(time.Now().Add(v.raceWindow))
+       if safeWindow <= 0 {
+               // We can't count on "touch trash/X" to prolong
+               // trash/X's lifetime. The new timestamp might not
+               // become visible until now+raceWindow, and EmptyTrash
+               // is allowed to delete trash/X before then.
+               return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
+       }
+       // trash/X exists, but it won't be eligible for deletion until
+       // after now+raceWindow, so it's safe to overwrite it.
+       return nil
+}
+
+// safeCopy calls PutCopy, and checks the response to make sure the
+// copy succeeded and updated the timestamp on the destination object
+// (PutCopy returns 200 OK if the request was received, even if the
+// copy failed).
+func (v *S3Volume) safeCopy(dst, src string) error {
+       resp, err := v.Bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
+               ContentType:       "application/octet-stream",
+               MetadataDirective: "REPLACE",
+       }, v.Bucket.Name+"/"+src)
+       err = v.translateError(err)
+       if err != nil {
+               return err
+       }
+       if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
+               return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
+       } else if time.Now().Sub(t) > maxClockSkew {
+               return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
+       }
+       return nil
+}
+
+// Get the LastModified header from resp, and parse it as RFC1123 or
+// -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
+func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
+       s := resp.Header.Get("Last-Modified")
+       t, err = time.Parse(time.RFC1123, s)
+       if err != nil && s != "" {
+               // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
+               // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
+               // as required by HTTP spec. If it's not a valid HTTP
+               // header value, it's probably AWS (or s3test) giving
+               // us a nearly-RFC1123 timestamp.
+               t, err = time.Parse(nearlyRFC1123, s)
        }
-       return v.Bucket.Del(loc)
+       return
 }
 
-// TBD
+// Untrash moves block from trash back into store
 func (v *S3Volume) Untrash(loc string) error {
-       return ErrNotImplemented
+       err := v.safeCopy(loc, "trash/"+loc)
+       if err != nil {
+               return err
+       }
+       err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+       return v.translateError(err)
 }
 
+// Status returns a *VolumeStatus representing the current in-use
+// storage capacity and a fake available capacity that doesn't make
+// the volume seem full or nearly-full.
 func (v *S3Volume) Status() *VolumeStatus {
        return &VolumeStatus{
                DeviceNum: 1,
@@ -291,13 +461,19 @@ func (v *S3Volume) Status() *VolumeStatus {
        }
 }
 
+// String implements fmt.Stringer.
 func (v *S3Volume) String() string {
        return fmt.Sprintf("s3-bucket:%+q", v.Bucket.Name)
 }
 
+// Writable returns false if all future Put, Mtime, and Delete calls
+// are expected to fail.
 func (v *S3Volume) Writable() bool {
        return !v.readonly
 }
+
+// Replication returns the storage redundancy of the underlying
+// device. Configured via command line flag.
 func (v *S3Volume) Replication() int {
        return v.replication
 }
@@ -308,6 +484,52 @@ func (v *S3Volume) isKeepBlock(s string) bool {
        return s3KeepBlockRegexp.MatchString(s)
 }
 
+// fixRace(X) is called when "recent/X" exists but "X" doesn't
+// exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
+// there was a race between Put and Trash, fixRace recovers from the
+// race by Untrashing the block.
+func (v *S3Volume) fixRace(loc string) bool {
+       trash, err := v.Bucket.Head("trash/"+loc, nil)
+       if err != nil {
+               if !os.IsNotExist(v.translateError(err)) {
+                       log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
+               }
+               return false
+       }
+       trashTime, err := v.lastModified(trash)
+       if err != nil {
+               log.Printf("error: fixRace: parse %q: %s", trash.Header.Get("Last-Modified"), err)
+               return false
+       }
+
+       recent, err := v.Bucket.Head("recent/"+loc, nil)
+       if err != nil {
+               log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
+               return false
+       }
+       recentTime, err := v.lastModified(recent)
+       if err != nil {
+               log.Printf("error: fixRace: parse %q: %s", recent.Header.Get("Last-Modified"), err)
+               return false
+       }
+
+       ageWhenTrashed := trashTime.Sub(recentTime)
+       if ageWhenTrashed >= blobSignatureTTL {
+               // No evidence of a race: block hasn't been written
+               // since it became eligible for Trash. No fix needed.
+               return false
+       }
+
+       log.Printf("notice: fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, blobSignatureTTL)
+       log.Printf("notice: fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
+       err = v.safeCopy(loc, "trash/"+loc)
+       if err != nil {
+               log.Printf("error: fixRace: %s", err)
+               return false
+       }
+       return true
+}
+
 func (v *S3Volume) translateError(err error) error {
        switch err := err.(type) {
        case *s3.Error:
@@ -325,6 +547,152 @@ func (v *S3Volume) translateError(err error) error {
 
 // EmptyTrash looks for trashed blocks that exceeded trashLifetime
 // and deletes them from the volume.
-// TBD
 func (v *S3Volume) EmptyTrash() {
+       var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
+
+       // Use a merge sort to find matching sets of trash/X and recent/X.
+       trashL := s3Lister{
+               Bucket:   v.Bucket,
+               Prefix:   "trash/",
+               PageSize: v.indexPageSize,
+       }
+       // Define "ready to delete" as "...when EmptyTrash started".
+       startT := time.Now()
+       for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+               loc := trash.Key[6:]
+               if !v.isKeepBlock(loc) {
+                       continue
+               }
+               bytesInTrash += trash.Size
+               blocksInTrash++
+
+               trashT, err := time.Parse(time.RFC3339, trash.LastModified)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
+                       continue
+               }
+               recent, err := v.Bucket.Head("recent/"+loc, nil)
+               if err != nil && os.IsNotExist(v.translateError(err)) {
+                       log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
+                       err = v.Untrash(loc)
+                       if err != nil {
+                               log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
+                       }
+                       continue
+               } else if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
+                       continue
+               }
+               recentT, err := v.lastModified(recent)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
+                       continue
+               }
+               if trashT.Sub(recentT) < blobSignatureTTL {
+                       if age := startT.Sub(recentT); age >= blobSignatureTTL-v.raceWindow {
+                               // recent/loc is too old to protect
+                               // loc from being Trashed again during
+                               // the raceWindow that starts if we
+                               // delete trash/X now.
+                               //
+                               // Note this means (trashCheckInterval
+                               // < blobSignatureTTL - raceWindow) is
+                               // necessary to avoid starvation.
+                               log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
+                               v.fixRace(loc)
+                               v.Touch(loc)
+                               continue
+                       } else if _, err := v.Bucket.Head(loc, nil); os.IsNotExist(err) {
+                               log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
+                               v.fixRace(loc)
+                               continue
+                       } else if err != nil {
+                               log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+                               continue
+                       }
+               }
+               if startT.Sub(trashT) < trashLifetime {
+                       continue
+               }
+               err = v.Bucket.Del(trash.Key)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
+                       continue
+               }
+               bytesDeleted += trash.Size
+               blocksDeleted++
+
+               _, err = v.Bucket.Head(loc, nil)
+               if os.IsNotExist(err) {
+                       err = v.Bucket.Del("recent/" + loc)
+                       if err != nil {
+                               log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
+                       }
+               } else if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
+               }
+       }
+       if err := trashL.Error(); err != nil {
+               log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
+       }
+       log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+}
+
+type s3Lister struct {
+       Bucket     *s3.Bucket
+       Prefix     string
+       PageSize   int
+       nextMarker string
+       buf        []s3.Key
+       err        error
+}
+
+// First fetches the first page and returns the first item. It returns
+// nil if the response is the empty set or an error occurs.
+func (lister *s3Lister) First() *s3.Key {
+       lister.getPage()
+       return lister.pop()
+}
+
+// Next returns the next item, fetching the next page if necessary. It
+// returns nil if the last available item has already been fetched, or
+// an error occurs.
+func (lister *s3Lister) Next() *s3.Key {
+       if len(lister.buf) == 0 && lister.nextMarker != "" {
+               lister.getPage()
+       }
+       return lister.pop()
+}
+
+// Return the most recent error encountered by First or Next.
+func (lister *s3Lister) Error() error {
+       return lister.err
+}
+
+func (lister *s3Lister) getPage() {
+       resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
+       lister.nextMarker = ""
+       if err != nil {
+               lister.err = err
+               return
+       }
+       if resp.IsTruncated {
+               lister.nextMarker = resp.NextMarker
+       }
+       lister.buf = make([]s3.Key, 0, len(resp.Contents))
+       for _, key := range resp.Contents {
+               if !strings.HasPrefix(key.Key, lister.Prefix) {
+                       log.Printf("warning: s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
+                       continue
+               }
+               lister.buf = append(lister.buf, key)
+       }
+}
+
+func (lister *s3Lister) pop() (k *s3.Key) {
+       if len(lister.buf) > 0 {
+               k = &lister.buf[0]
+               lister.buf = lister.buf[1:]
+       }
+       return
 }
index 0c2cd4942ecee8961ee9b8b07e36781c516fbba6..6ba390426f51dbe4f8ef1d6ac2ce2a27b4d3f3b2 100644 (file)
@@ -2,9 +2,10 @@ package main
 
 import (
        "bytes"
+       "crypto/md5"
        "fmt"
        "log"
-       "strings"
+       "os"
        "time"
 
        "github.com/AdRoll/goamz/aws"
@@ -41,7 +42,7 @@ func init() {
        s3UnsafeDelete = true
 }
 
-func NewTestableS3Volume(c *check.C, readonly bool, replication int) *TestableS3Volume {
+func NewTestableS3Volume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume {
        clock := &fakeClock{}
        srv, err := s3test.NewServer(&s3test.Config{Clock: clock})
        c.Assert(err, check.IsNil)
@@ -59,7 +60,7 @@ func NewTestableS3Volume(c *check.C, readonly bool, replication int) *TestableS3
        c.Assert(err, check.IsNil)
 
        return &TestableS3Volume{
-               S3Volume:    NewS3Volume(auth, region, TestBucketName, readonly, replication),
+               S3Volume:    NewS3Volume(auth, region, TestBucketName, raceWindow, readonly, replication),
                server:      srv,
                serverClock: clock,
        }
@@ -73,18 +74,20 @@ type StubbedS3Suite struct {
 
 func (s *StubbedS3Suite) TestGeneric(c *check.C) {
        DoGenericVolumeTests(c, func(t TB) TestableVolume {
-               return NewTestableS3Volume(c, false, 2)
+               // Use a negative raceWindow so s3test's 1-second
+               // timestamp precision doesn't confuse fixRace.
+               return NewTestableS3Volume(c, -2*time.Second, false, 2)
        })
 }
 
 func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) {
        DoGenericVolumeTests(c, func(t TB) TestableVolume {
-               return NewTestableS3Volume(c, true, 2)
+               return NewTestableS3Volume(c, -2*time.Second, true, 2)
        })
 }
 
 func (s *StubbedS3Suite) TestIndex(c *check.C) {
-       v := NewTestableS3Volume(c, false, 2)
+       v := NewTestableS3Volume(c, 0, false, 2)
        v.indexPageSize = 3
        for i := 0; i < 256; i++ {
                v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
@@ -108,6 +111,202 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) {
        }
 }
 
+func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
+       defer func(tl, bs time.Duration) {
+               trashLifetime = tl
+               blobSignatureTTL = bs
+       }(trashLifetime, blobSignatureTTL)
+       trashLifetime = time.Hour
+       blobSignatureTTL = time.Hour
+
+       v := NewTestableS3Volume(c, 5*time.Minute, false, 2)
+       var none time.Time
+
+       putS3Obj := func(t time.Time, key string, data []byte) {
+               if t == none {
+                       return
+               }
+               v.serverClock.now = &t
+               v.Bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
+       }
+
+       t0 := time.Now()
+       nextKey := 0
+       for _, scenario := range []struct {
+               label               string
+               dataT               time.Time
+               recentT             time.Time
+               trashT              time.Time
+               canGet              bool
+               canTrash            bool
+               canGetAfterTrash    bool
+               canUntrash          bool
+               haveTrashAfterEmpty bool
+               freshAfterEmpty     bool
+       }{
+               {
+                       "No related objects",
+                       none, none, none,
+                       false, false, false, false, false, false,
+               },
+               {
+                       // Stored by older version, or there was a
+                       // race between EmptyTrash and Put: Trash is a
+                       // no-op even though the data object is very
+                       // old
+                       "No recent/X",
+                       t0.Add(-48 * time.Hour), none, none,
+                       true, true, true, false, false, false,
+               },
+               {
+                       "Not trash, but old enough to be eligible for trash",
+                       t0.Add(-24 * time.Hour), t0.Add(-2 * time.Hour), none,
+                       true, true, false, false, false, false,
+               },
+               {
+                       "Not trash, and not old enough to be eligible for trash",
+                       t0.Add(-24 * time.Hour), t0.Add(-30 * time.Minute), none,
+                       true, true, true, false, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, due to recent race between Trash and Put",
+                       t0.Add(-24 * time.Hour), t0.Add(-3 * time.Minute), t0.Add(-2 * time.Minute),
+                       true, true, true, true, true, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, trash nearly eligible for deletion: prone to Trash race",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, trash is eligible for deletion: prone to Trash race",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-61 * time.Minute),
+                       true, false, true, true, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, due to old race between Put and unfinished Trash: emptying trash is unsafe",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-12 * time.Hour),
+                       true, false, true, true, true, true,
+               },
+               {
+                       "Trashed + untrashed copies exist, used to be unsafe to empty, but since made safe by fixRace+Touch",
+                       t0.Add(-time.Second), t0.Add(-time.Second), t0.Add(-12 * time.Hour),
+                       true, true, true, true, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist because Trash operation was interrupted (no race)",
+                       t0.Add(-24 * time.Hour), t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour),
+                       true, false, true, true, false, false,
+               },
+               {
+                       "Trash, not yet eligible for deletion",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-time.Minute),
+                       false, false, false, true, true, false,
+               },
+               {
+                       "Trash, not yet eligible for deletion, prone to races",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
+                       false, false, false, true, true, false,
+               },
+               {
+                       "Trash, eligible for deletion",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-2 * time.Hour),
+                       false, false, false, true, false, false,
+               },
+               {
+                       "Erroneously trashed during a race, detected before trashLifetime",
+                       none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Erroneously trashed during a race, rescue during EmptyTrash despite reaching trashLifetime",
+                       none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Trashed copy exists with no recent/* marker (cause unknown); repair by untrashing",
+                       none, none, t0.Add(-time.Minute),
+                       false, false, false, true, true, true,
+               },
+       } {
+               c.Log("Scenario: ", scenario.label)
+
+               // We have a few tests to run for each scenario, and
+               // the tests are expected to change state. By calling
+               // this setup func between tests, we (re)create the
+               // scenario as specified, using a new unique block
+               // locator to prevent interference from previous
+               // tests.
+
+               setupScenario := func() (string, []byte) {
+                       nextKey++
+                       blk := []byte(fmt.Sprintf("%d", nextKey))
+                       loc := fmt.Sprintf("%x", md5.Sum(blk))
+                       c.Log("\t", loc)
+                       putS3Obj(scenario.dataT, loc, blk)
+                       putS3Obj(scenario.recentT, "recent/"+loc, nil)
+                       putS3Obj(scenario.trashT, "trash/"+loc, blk)
+                       v.serverClock.now = &t0
+                       return loc, blk
+               }
+
+               // Check canGet
+               loc, blk := setupScenario()
+               buf := make([]byte, len(blk))
+               _, err := v.Get(loc, buf)
+               c.Check(err == nil, check.Equals, scenario.canGet)
+               if err != nil {
+                       c.Check(os.IsNotExist(err), check.Equals, true)
+               }
+
+               // Call Trash, then check canTrash and canGetAfterTrash
+               loc, blk = setupScenario()
+               err = v.Trash(loc)
+               c.Check(err == nil, check.Equals, scenario.canTrash)
+               _, err = v.Get(loc, buf)
+               c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
+               if err != nil {
+                       c.Check(os.IsNotExist(err), check.Equals, true)
+               }
+
+               // Call Untrash, then check canUntrash
+               loc, blk = setupScenario()
+               err = v.Untrash(loc)
+               c.Check(err == nil, check.Equals, scenario.canUntrash)
+               if scenario.dataT != none || scenario.trashT != none {
+                       // In all scenarios where the data exists, we
+                       // should be able to Get after Untrash --
+                       // regardless of timestamps, errors, race
+                       // conditions, etc.
+                       _, err = v.Get(loc, buf)
+                       c.Check(err, check.IsNil)
+               }
+
+               // Call EmptyTrash, then check haveTrashAfterEmpty and
+               // freshAfterEmpty
+               loc, blk = setupScenario()
+               v.EmptyTrash()
+               _, err = v.Bucket.Head("trash/"+loc, nil)
+               c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
+               if scenario.freshAfterEmpty {
+                       t, err := v.Mtime(loc)
+                       c.Check(err, check.IsNil)
+                       // new mtime must be current (with an
+                       // allowance for 1s timestamp precision)
+                       c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
+               }
+
+               // Check for current Mtime after Put (applies to all
+               // scenarios)
+               loc, blk = setupScenario()
+               err = v.Put(loc, blk)
+               c.Check(err, check.IsNil)
+               t, err := v.Mtime(loc)
+               c.Check(err, check.IsNil)
+               c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
+       }
+}
+
 // PutRaw skips the ContentMD5 test
 func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
        err := v.Bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
@@ -121,9 +320,9 @@ func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
 // while we do this.
 func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) {
        v.serverClock.now = &lastPut
-       err := v.Touch(locator)
-       if err != nil && !strings.Contains(err.Error(), "PutCopy returned old LastModified") {
-               log.Printf("Touch: %+v", err)
+       err := v.Bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{})
+       if err != nil {
+               panic(err)
        }
        v.serverClock.now = nil
 }
index 62f63d57c8edb655b5078ebf637ce6d0ed0475bb..d11bc05192246a75e8ba4c95bd544b0712279ff6 100644 (file)
@@ -22,7 +22,7 @@ func RunTrashWorker(trashq *WorkQueue) {
 
 // TrashItem deletes the indicated block from every writable volume.
 func TrashItem(trashRequest TrashRequest) {
-       reqMtime := time.Unix(trashRequest.BlockMtime, 0)
+       reqMtime := time.Unix(0, trashRequest.BlockMtime)
        if time.Since(reqMtime) < blobSignatureTTL {
                log.Printf("WARNING: data manager asked to delete a %v old block %v (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
                        time.Since(reqMtime),
@@ -39,8 +39,8 @@ func TrashItem(trashRequest TrashRequest) {
                        log.Printf("%v Delete(%v): %v", volume, trashRequest.Locator, err)
                        continue
                }
-               if trashRequest.BlockMtime != mtime.Unix() {
-                       log.Printf("%v Delete(%v): mtime on volume is %v does not match trash list value %v", volume, trashRequest.Locator, mtime.Unix(), trashRequest.BlockMtime)
+               if trashRequest.BlockMtime != mtime.UnixNano() {
+                       log.Printf("%v Delete(%v): stored mtime %v does not match trash list value %v", volume, trashRequest.Locator, mtime.UnixNano(), trashRequest.BlockMtime)
                        continue
                }
 
index d111caeac8e5b571202502e0aea63f07816365ba..94798d95acfd85216ad60982b71282d84530ef7d 100644 (file)
@@ -236,7 +236,7 @@ func performTrashWorkerTest(testData TrashWorkerTestData, t *testing.T) {
        // Create TrashRequest for the test
        trashRequest := TrashRequest{
                Locator:    testData.DeleteLocator,
-               BlockMtime: oldBlockTime.Unix(),
+               BlockMtime: oldBlockTime.UnixNano(),
        }
 
        // Run trash worker and put the trashRequest on trashq
index f8fe0d0ebce719c6c823fe9caa9fcce12324eb49..bc3e537a89a815037102af7fb920e8b9d2b84f61 100644 (file)
@@ -7,6 +7,7 @@ import (
        "os"
        "regexp"
        "sort"
+       "strconv"
        "strings"
        "time"
 
@@ -355,10 +356,22 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
        v := factory(t)
        defer v.Teardown()
 
+       // minMtime and maxMtime are the minimum and maximum
+       // acceptable values the index can report for our test
+       // blocks. 1-second precision is acceptable.
+       minMtime := time.Now().UTC().UnixNano()
+       minMtime -= minMtime % 1e9
+
        v.PutRaw(TestHash, TestBlock)
        v.PutRaw(TestHash2, TestBlock2)
        v.PutRaw(TestHash3, TestBlock3)
 
+       maxMtime := time.Now().UTC().UnixNano()
+       if maxMtime%1e9 > 0 {
+               maxMtime -= maxMtime % 1e9
+               maxMtime += 1e9
+       }
+
        // Blocks whose names aren't Keep hashes should be omitted from
        // index
        v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
@@ -371,15 +384,21 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
        indexRows := strings.Split(string(buf.Bytes()), "\n")
        sort.Strings(indexRows)
        sortedIndex := strings.Join(indexRows, "\n")
-       m, err := regexp.MatchString(
-               `^\n`+TestHash+`\+\d+ \d+\n`+
-                       TestHash3+`\+\d+ \d+\n`+
-                       TestHash2+`\+\d+ \d+$`,
-               sortedIndex)
-       if err != nil {
-               t.Error(err)
-       } else if !m {
+       m := regexp.MustCompile(
+               `^\n` + TestHash + `\+\d+ (\d+)\n` +
+                       TestHash3 + `\+\d+ \d+\n` +
+                       TestHash2 + `\+\d+ \d+$`,
+       ).FindStringSubmatch(sortedIndex)
+       if m == nil {
                t.Errorf("Got index %q for empty prefix", sortedIndex)
+       } else {
+               mtime, err := strconv.ParseInt(m[1], 10, 64)
+               if err != nil {
+                       t.Error(err)
+               } else if mtime < minMtime || mtime > maxMtime {
+                       t.Errorf("got %d for TestHash timestamp, expected %d <= t <= %d",
+                               mtime, minMtime, maxMtime)
+               }
        }
 
        for _, prefix := range []string{"f", "f15", "f15ac"} {
@@ -396,7 +415,7 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
 
        for _, prefix := range []string{"zero", "zip", "zilch"} {
                buf = new(bytes.Buffer)
-               v.IndexTo(prefix, buf)
+               err := v.IndexTo(prefix, buf)
                if err != nil {
                        t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
                } else if buf.Len() != 0 {
@@ -722,7 +741,7 @@ func testTrashUntrash(t TB, factory TestableVolumeFactory) {
        v := factory(t)
        defer v.Teardown()
        defer func() {
-               trashLifetime = 0 * time.Second
+               trashLifetime = 0
        }()
 
        trashLifetime = 3600 * time.Second
@@ -811,7 +830,7 @@ func testTrashEmptyTrashUntrash(t TB, factory TestableVolumeFactory) {
 
        // First set: EmptyTrash before reaching the trash deadline.
 
-       trashLifetime = 1 * time.Hour
+       trashLifetime = time.Hour
 
        v.PutRaw(TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*blobSignatureTTL))
@@ -861,23 +880,27 @@ func testTrashEmptyTrashUntrash(t TB, factory TestableVolumeFactory) {
        // Because we Touch'ed, need to backdate again for next set of tests
        v.TouchWithDate(TestHash, time.Now().Add(-2*blobSignatureTTL))
 
-       // Untrash should fail if the only block in the trash has
-       // already been untrashed.
+       // If the only block in the trash has already been untrashed,
+       // most volumes will fail a subsequent Untrash with a 404, but
+       // it's also acceptable for Untrash to succeed.
        err = v.Untrash(TestHash)
-       if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       if err != nil && !os.IsNotExist(err) {
+               t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
        }
 
-       // The failed Untrash should not interfere with our
+       // The additional Untrash should not interfere with our
        // already-untrashed copy.
        err = checkGet()
        if err != nil {
                t.Fatal(err)
        }
 
+       // Untrash might have updated the timestamp, so backdate again
+       v.TouchWithDate(TestHash, time.Now().Add(-2*blobSignatureTTL))
+
        // Second set: EmptyTrash after the trash deadline has passed.
 
-       trashLifetime = 1 * time.Nanosecond
+       trashLifetime = time.Nanosecond
 
        err = v.Trash(TestHash)
        if err != nil {
@@ -908,7 +931,6 @@ func testTrashEmptyTrashUntrash(t TB, factory TestableVolumeFactory) {
        if err == nil || !os.IsNotExist(err) {
                t.Fatalf("os.IsNotExist(%v) should have been true", err)
        }
-       // EmptryTrash
        v.EmptyTrash()
 
        // Untrash won't find it
index 7aff85e59a4357acb1e27ce5386756feb96fa0e1..5982fb0484eae0a37ef09e24b9526492c5b0459f 100644 (file)
@@ -138,9 +138,8 @@ func (v *UnixVolume) Touch(loc string) error {
                return e
        }
        defer unlockfile(f)
-       now := time.Now().Unix()
-       utime := syscall.Utimbuf{now, now}
-       return syscall.Utime(p, &utime)
+       ts := syscall.NsecToTimespec(time.Now().UnixNano())
+       return syscall.UtimesNano(p, []syscall.Timespec{ts, ts})
 }
 
 // Mtime returns the stored timestamp for the given locator.
@@ -307,7 +306,7 @@ var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
 //     e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
 //
 func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
-       var lastErr error = nil
+       var lastErr error
        rootdir, err := os.Open(v.root)
        if err != nil {
                return err
@@ -353,7 +352,7 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
                        _, err = fmt.Fprint(w,
                                name,
                                "+", fileInfo[0].Size(),
-                               " ", fileInfo[0].ModTime().Unix(),
+                               " ", fileInfo[0].ModTime().UnixNano(),
                                "\n")
                }
                blockdir.Close()
@@ -398,10 +397,8 @@ func (v *UnixVolume) Trash(loc string) error {
        // anyway (because the permission signatures have expired).
        if fi, err := os.Stat(p); err != nil {
                return err
-       } else {
-               if time.Since(fi.ModTime()) < blobSignatureTTL {
-                       return nil
-               }
+       } else if time.Since(fi.ModTime()) < blobSignatureTTL {
+               return nil
        }
 
        if trashLifetime == 0 {
@@ -507,11 +504,14 @@ func (v *UnixVolume) String() string {
        return fmt.Sprintf("[UnixVolume %s]", v.root)
 }
 
-// Writable returns false if all future Put, Mtime, and Delete calls are expected to fail.
+// Writable returns false if all future Put, Mtime, and Delete calls
+// are expected to fail.
 func (v *UnixVolume) Writable() bool {
        return !v.readonly
 }
 
+// Replication returns the number of replicas promised by the
+// underlying device (currently assumed to be 1).
 func (v *UnixVolume) Replication() int {
        return 1
 }
index 74c67f2dd0a6c1ee69748d24c162d95b5c98b16a..6b31795293ebd38eaa3837316fe001519c91b072 100644 (file)
@@ -98,7 +98,7 @@ func TestWorkQueueDoneness(t *testing.T) {
        gate := make(chan struct{})
        go func() {
                <-gate
-               for _ = range b.NextItem {
+               for range b.NextItem {
                        <-gate
                        time.Sleep(time.Millisecond)
                        b.DoneItem <- struct{}{}
index db799bc16b806beb09313fb56bbcee8e88f4a00a..c78f1c6b8d63160c40e7e57d9b29f57d07e59dcf 100644 (file)
@@ -211,8 +211,10 @@ class BaseComputeNodeDriver(RetryMixin):
         # libcloud compute drivers typically raise bare Exceptions to
         # represent API errors.  Return True for any exception that is
         # exactly an Exception, or a better-known higher-level exception.
-        if (exception is BaseHTTPError and
-            self.message and self.message.startswith("InvalidInstanceID.NotFound")):
+        if (type(exception) is BaseHTTPError and
+            exception.message and
+            (exception.message.startswith("InvalidInstanceID.NotFound") or
+             exception.message.startswith("InstanceLimitExceeded"))):
             return True
         return (isinstance(exception, cls.CLOUD_ERRORS) or
                 type(exception) is Exception)
index 227b5e5f3471ba4cf2e484461cd2c651f26a96e1..c3774c1b7afd8fa3c53f36ace1444daab0a22d81 100644 (file)
@@ -11,7 +11,10 @@ import mock
 import pykka
 import threading
 
+from libcloud.common.exceptions import BaseHTTPError
+
 import arvnodeman.computenode.dispatch as dispatch
+from arvnodeman.computenode.driver import BaseComputeNodeDriver
 from . import testutil
 
 class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
@@ -25,6 +28,7 @@ class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
         self.api_client.nodes().update().execute.side_effect = arvados_effect
         self.cloud_client = mock.MagicMock(name='cloud_client')
         self.cloud_client.create_node.return_value = testutil.cloud_node_mock(1)
+        self.cloud_client.is_cloud_exception = BaseComputeNodeDriver.is_cloud_exception
 
     def make_actor(self, arv_node=None):
         if not hasattr(self, 'timer'):
@@ -86,6 +90,28 @@ class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
         self.make_actor()
         self.wait_for_assignment(self.setup_actor, 'cloud_node')
 
+    def test_unknown_basehttperror_not_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            BaseHTTPError(400, "Unknown"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        finished = threading.Event()
+        self.setup_actor.subscribe(lambda _: finished.set())
+        assert(finished.wait(self.TIMEOUT))
+        self.assertEqual(0, self.cloud_client.post_create_node.call_count)
+
+    def test_known_basehttperror_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            BaseHTTPError(400, "InstanceLimitExceeded"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.assertEqual(1, self.cloud_client.post_create_node.call_count)
+
     def test_failed_post_create_retried(self):
         self.make_mocks()
         self.cloud_client.post_create_node.side_effect = [
index 842f612fac089aed33ed9ffcd68d650955dad5f1..2ebe13c895eb16a775ca247b028a60cdc6eba59c 100755 (executable)
@@ -101,6 +101,9 @@ wait_for_arvbox() {
 }
 
 run() {
+    CONFIG=$1
+    TAG=$2
+
     if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
         echo "Container $ARVBOX_CONTAINER is already running"
         exit 0
@@ -110,8 +113,13 @@ run() {
         echo "Container $ARVBOX_CONTAINER already exists but is not running; use restart or rebuild"
         exit 1
     fi
+   
+    if test ! -z "$TAG"
+    then
+       TAG=":$TAG"
+    fi
 
-    if echo "$1" | grep '^public' ; then
+    if echo "$CONFIG" | grep '^public' ; then
         if test -n "$ARVBOX_PUBLISH_IP" ; then
             localip=$ARVBOX_PUBLISH_IP
         else
@@ -136,7 +144,7 @@ run() {
         PUBLIC=""
     fi
 
-    if echo "$1" | grep 'demo$' ; then
+    if echo "$CONFIG" | grep 'demo$' ; then
         if test -d "$ARVBOX_DATA" ; then
             echo "It looks like you already have a development container named $ARVBOX_CONTAINER."
             echo "Set ARVBOX_CONTAINER to set a different name for your demo container"
@@ -153,7 +161,7 @@ run() {
                --privileged \
                --volumes-from $ARVBOX_CONTAINER-data \
                $PUBLIC \
-               arvados/arvbox-demo
+               arvados/arvbox-demo$TAG
         updateconf
         wait_for_arvbox
     else
@@ -167,8 +175,7 @@ run() {
             git clone https://github.com/curoverse/sso-devise-omniauth-provider.git "$SSO_ROOT"
         fi
 
-        if test "$1" = test ; then
-            shift
+        if test "$CONFIG" = test ; then
 
             mkdir -p $VAR_DATA/test
 
@@ -184,7 +191,7 @@ run() {
                    "--volume=$GEMS:/var/lib/gems:rw" \
                    "--volume=$PIPCACHE:/var/lib/pip:rw" \
                    "--volume=$GOSTUFF:/var/lib/gopath:rw" \
-                   arvados/arvbox-dev \
+                   arvados/arvbox-dev$TAG \
                    /usr/local/bin/runsvinit -svdir=/etc/test-service
 
             docker exec -ti \
@@ -210,7 +217,7 @@ run() {
                    WORKSPACE=/usr/src/arvados \
                    GEM_HOME=/var/lib/gems \
                    "$@"
-        elif echo "$1" | grep 'dev$' ; then
+        elif echo "$CONFIG" | grep 'dev$' ; then
             docker run \
                    --detach \
                    --name=$ARVBOX_CONTAINER \
@@ -224,12 +231,12 @@ run() {
                    "--volume=$PIPCACHE:/var/lib/pip:rw" \
                    "--volume=$GOSTUFF:/var/lib/gopath:rw" \
                    $PUBLIC \
-                   arvados/arvbox-dev
+                   arvados/arvbox-dev$TAG
             updateconf
             wait_for_arvbox
             echo "The Arvados source code is checked out at: $ARVADOS_ROOT"
         else
-            echo "Unknown configuration '$1'"
+            echo "Unknown configuration '$CONFIG'"
         fi
     fi
 }
@@ -426,7 +433,7 @@ case "$subcmd" in
         echo
         echo "build   <config>      build arvbox Docker image"
         echo "rebuild <config>      build arvbox Docker image, no layer cache"
-        echo "start|run <config>  start $ARVBOX_CONTAINER container"
+        echo "start|run <config> [tag]  start $ARVBOX_CONTAINER container"
         echo "open       open arvbox workbench in a web browser"
         echo "shell      enter arvbox shell"
         echo "ip         print arvbox docker container ip address"
index a04c06da44292f7643edbeaa6973371b5d05def8..57105ea88bf4a58bf397e0b2fd74c1d520afee06 100644 (file)
@@ -1,8 +1,12 @@
 FROM arvados/arvbox-base
+ARG arvados_version=master
+ARG sso_version=master
 
 RUN cd /usr/src && \
-    git clone https://github.com/curoverse/arvados.git && \
-    git clone https://github.com/curoverse/sso-devise-omniauth-provider.git sso
+    git clone --no-checkout https://github.com/curoverse/arvados.git && \
+    git -C arvados checkout ${arvados_version} && \
+    git clone --no-checkout https://github.com/curoverse/sso-devise-omniauth-provider.git sso && \
+    git -C sso checkout ${sso_version}
 
 ADD service/ /var/lib/arvbox/service
 RUN rmdir /etc/service && ln -sf /var/lib/arvbox/service /etc
index 76062d6c0eb441434c10d4be2faee67711e2f1bf..e68642fecd5a2f0ff343eb869d569a43b945cd6a 100644 (file)
@@ -6,7 +6,7 @@ RUN apt-get update && \
 
 RUN set -e && \
  PJS=phantomjs-1.9.7-linux-x86_64 && \
- curl -L -o/tmp/$PJS.tar.bz2 https://bitbucket.org/ariya/phantomjs/downloads/$PJS.tar.bz2 && \
+ curl -L -o/tmp/$PJS.tar.bz2 http://cache.arvados.org/$PJS.tar.bz2 && \
  tar -C /usr/local -xjf /tmp/$PJS.tar.bz2 && \
  ln -s ../$PJS/bin/phantomjs /usr/local/bin/