Merge branch 'master' into 13822-nm-delayed-daemon
authorPeter Amstutz <pamstutz@veritasgenetics.com>
Tue, 17 Jul 2018 14:26:29 +0000 (10:26 -0400)
committerPeter Amstutz <pamstutz@veritasgenetics.com>
Tue, 17 Jul 2018 14:27:05 +0000 (10:27 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz@veritasgenetics.com>

582 files changed:
.licenseignore
AUTHORS
apps/workbench/.gitignore
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/models/session_db.js
apps/workbench/app/controllers/users_controller.rb
apps/workbench/app/controllers/work_units_controller.rb
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/helpers/version_helper.rb
apps/workbench/app/models/arvados_base.rb
apps/workbench/app/models/user.rb
apps/workbench/app/views/application/_report_issue_popup.html.erb
apps/workbench/app/views/layouts/body.html.erb
apps/workbench/app/views/tests/mithril.html
apps/workbench/app/views/users/inactive.html.erb
apps/workbench/app/views/users/link_account.html.erb [new file with mode: 0644]
apps/workbench/config/application.default.yml
apps/workbench/config/environments/production.rb.example
apps/workbench/config/environments/test.rb.example
apps/workbench/config/routes.rb
apps/workbench/lib/app_version.rb
apps/workbench/test/controllers/application_controller_test.rb
apps/workbench/test/controllers/collections_controller_test.rb
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/controllers/repositories_controller_test.rb
apps/workbench/test/controllers/users_controller_test.rb
apps/workbench/test/integration/collections_test.rb
apps/workbench/test/integration/link_account_test.rb [new file with mode: 0644]
apps/workbench/test/integration_helper.rb
apps/workbench/test/test_helper.rb
build/build-dev-docker-jobs-image.sh
build/build.list
build/check-copyright-notices
build/libcloud-pin.sh
build/package-build-dockerfiles/Makefile
build/package-build-dockerfiles/centos7/Dockerfile
build/package-build-dockerfiles/debian8/Dockerfile
build/package-build-dockerfiles/debian9/Dockerfile
build/package-build-dockerfiles/ubuntu1404/Dockerfile
build/package-build-dockerfiles/ubuntu1604/Dockerfile
build/package-test-dockerfiles/ubuntu1404/Dockerfile
build/package-testing/test-packages-ubuntu1204.sh [deleted symlink]
build/run-build-docker-jobs-image.sh
build/run-build-packages-one-target.sh
build/run-build-packages.sh
build/run-library.sh
build/run-tests.sh
cmd/arvados-client/cmd.go
cmd/arvados-server/arvados-controller.service [new file with mode: 0644]
cmd/arvados-server/cmd.go [new file with mode: 0644]
doc/Rakefile
doc/_config.yml
doc/_includes/_container_scheduling_parameters.liquid
doc/_includes/_events_py.liquid [deleted file]
doc/_includes/_example_sdk_go_imports.liquid [deleted file]
doc/_includes/_install_compute_docker.liquid
doc/_includes/_navbar_top.liquid
doc/_layouts/default.html.liquid
doc/admin/activation.html.textile.liquid [new file with mode: 0644]
doc/admin/change-account-owner.html.textile.liquid [deleted file]
doc/admin/index.html.textile.liquid [new file with mode: 0644]
doc/admin/merge-remote-account.html.textile.liquid
doc/admin/migrating-providers.html.textile.liquid [new file with mode: 0644]
doc/admin/spot-instances.html.textile.liquid [new file with mode: 0644]
doc/admin/storage-classes.html.textile.liquid [new file with mode: 0644]
doc/admin/upgrading.html.textile.liquid [new file with mode: 0644]
doc/api/execution.html.textile.liquid
doc/api/methods.html.textile.liquid
doc/api/methods/collections.html.textile.liquid
doc/api/methods/container_requests.html.textile.liquid
doc/api/methods/groups.html.textile.liquid
doc/api/methods/links.html.textile.liquid
doc/api/methods/nodes.html.textile.liquid
doc/api/permission-model.html.textile.liquid
doc/api/storage.html.textile.liquid
doc/api/tokens.html.textile.liquid
doc/architecture/Arvados_arch.odg [new file with mode: 0644]
doc/architecture/index.html.textile.liquid [new file with mode: 0644]
doc/css/images.css [new file with mode: 0644]
doc/images/Arvados_arch.svg [new file with mode: 0644]
doc/index.html.liquid
doc/install/arvados-on-kubernetes-GKE.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes-minikube.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes.html.textile.liquid [new file with mode: 0644]
doc/install/cheat_sheet.html.textile.liquid
doc/install/configure-azure-blob-storage.html.textile.liquid
doc/install/configure-fs-storage.html.textile.liquid [new file with mode: 0644]
doc/install/configure-s3-object-storage.html.textile.liquid [new file with mode: 0644]
doc/install/create-standard-objects.html.textile.liquid [deleted file]
doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
doc/install/crunch2-slurm/install-slurm.html.textile.liquid
doc/install/index.html.textile.liquid
doc/install/install-arv-git-httpd.html.textile.liquid
doc/install/install-components.html.textile.liquid [new file with mode: 0644]
doc/install/install-composer.html.textile.liquid [new file with mode: 0644]
doc/install/install-keep-balance.html.textile.liquid
doc/install/install-keepproxy.html.textile.liquid
doc/install/install-keepstore.html.textile.liquid
doc/install/install-manual-prerequisites.html.textile.liquid
doc/install/install-nodemanager.html.textile.liquid
doc/install/install-postgresql.html.textile.liquid
doc/install/migrate-docker19.html.textile.liquid
doc/sdk/R/index.html.textile.liquid [deleted file]
doc/sdk/go/index.html.textile.liquid
doc/sdk/python/crunch-utility-libraries.html.textile.liquid
doc/sdk/python/events.html.textile.liquid
doc/user/cwl/cwl-extensions.html.textile.liquid
doc/user/index.html.textile.liquid
doc/user/topics/arvados-sync-groups.html.textile.liquid
doc/user/topics/link-accounts.html.textile.liquid [new file with mode: 0644]
doc/user/topics/storage-classes.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-trait-search.html.textile.liquid
lib/cmd/cmd.go
lib/cmd/cmd_test.go
lib/controller/cmd.go [new file with mode: 0644]
lib/controller/handler.go [new file with mode: 0644]
lib/controller/handler_test.go [new file with mode: 0644]
lib/dispatchcloud/node_size.go
lib/dispatchcloud/node_size_test.go
lib/service/cmd.go [new file with mode: 0644]
sdk/R/NAMESPACE
sdk/R/R/Arvados.R
sdk/R/R/ArvadosFile.R
sdk/R/R/Collection.R
sdk/R/R/CollectionTree.R
sdk/R/R/HttpParser.R
sdk/R/R/HttpRequest.R
sdk/R/R/RESTService.R
sdk/R/R/Subcollection.R
sdk/R/R/autoGenAPI.R [new file with mode: 0644]
sdk/R/R/util.R
sdk/R/R/zzz.R [new file with mode: 0644]
sdk/R/README.Rmd
sdk/R/createDoc.R [new file with mode: 0644]
sdk/R/install_deps.R
sdk/R/man/Arvados.Rd
sdk/R/man/api_client_authorizations.create.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.create_system_auth.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.current.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.delete.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.get.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.list.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.update.Rd [new file with mode: 0644]
sdk/R/man/api_clients.create.Rd [new file with mode: 0644]
sdk/R/man/api_clients.delete.Rd [new file with mode: 0644]
sdk/R/man/api_clients.get.Rd [new file with mode: 0644]
sdk/R/man/api_clients.list.Rd [new file with mode: 0644]
sdk/R/man/api_clients.update.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.create.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.delete.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.get.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.list.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.update.Rd [new file with mode: 0644]
sdk/R/man/collections.create.Rd [new file with mode: 0644]
sdk/R/man/collections.delete.Rd [new file with mode: 0644]
sdk/R/man/collections.get.Rd [new file with mode: 0644]
sdk/R/man/collections.list.Rd [new file with mode: 0644]
sdk/R/man/collections.provenance.Rd [new file with mode: 0644]
sdk/R/man/collections.trash.Rd [new file with mode: 0644]
sdk/R/man/collections.untrash.Rd [new file with mode: 0644]
sdk/R/man/collections.update.Rd [new file with mode: 0644]
sdk/R/man/collections.used_by.Rd [new file with mode: 0644]
sdk/R/man/container_requests.create.Rd [new file with mode: 0644]
sdk/R/man/container_requests.delete.Rd [new file with mode: 0644]
sdk/R/man/container_requests.get.Rd [new file with mode: 0644]
sdk/R/man/container_requests.list.Rd [new file with mode: 0644]
sdk/R/man/container_requests.update.Rd [new file with mode: 0644]
sdk/R/man/containers.auth.Rd [new file with mode: 0644]
sdk/R/man/containers.create.Rd [new file with mode: 0644]
sdk/R/man/containers.current.Rd [new file with mode: 0644]
sdk/R/man/containers.delete.Rd [new file with mode: 0644]
sdk/R/man/containers.get.Rd [new file with mode: 0644]
sdk/R/man/containers.list.Rd [new file with mode: 0644]
sdk/R/man/containers.lock.Rd [new file with mode: 0644]
sdk/R/man/containers.secret_mounts.Rd [new file with mode: 0644]
sdk/R/man/containers.unlock.Rd [new file with mode: 0644]
sdk/R/man/containers.update.Rd [new file with mode: 0644]
sdk/R/man/groups.contents.Rd [new file with mode: 0644]
sdk/R/man/groups.create.Rd [new file with mode: 0644]
sdk/R/man/groups.delete.Rd [new file with mode: 0644]
sdk/R/man/groups.get.Rd [new file with mode: 0644]
sdk/R/man/groups.list.Rd [new file with mode: 0644]
sdk/R/man/groups.trash.Rd [new file with mode: 0644]
sdk/R/man/groups.untrash.Rd [new file with mode: 0644]
sdk/R/man/groups.update.Rd [new file with mode: 0644]
sdk/R/man/humans.create.Rd [new file with mode: 0644]
sdk/R/man/humans.delete.Rd [new file with mode: 0644]
sdk/R/man/humans.get.Rd [new file with mode: 0644]
sdk/R/man/humans.list.Rd [new file with mode: 0644]
sdk/R/man/humans.update.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.create.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.delete.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.get.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.list.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.update.Rd [new file with mode: 0644]
sdk/R/man/jobs.cancel.Rd [new file with mode: 0644]
sdk/R/man/jobs.create.Rd [new file with mode: 0644]
sdk/R/man/jobs.delete.Rd [new file with mode: 0644]
sdk/R/man/jobs.get.Rd [new file with mode: 0644]
sdk/R/man/jobs.list.Rd [new file with mode: 0644]
sdk/R/man/jobs.lock.Rd [new file with mode: 0644]
sdk/R/man/jobs.queue.Rd [new file with mode: 0644]
sdk/R/man/jobs.queue_size.Rd [new file with mode: 0644]
sdk/R/man/jobs.update.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.create.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.delete.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.get.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.list.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.ping.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.update.Rd [new file with mode: 0644]
sdk/R/man/keep_services.accessible.Rd [new file with mode: 0644]
sdk/R/man/keep_services.create.Rd [new file with mode: 0644]
sdk/R/man/keep_services.delete.Rd [new file with mode: 0644]
sdk/R/man/keep_services.get.Rd [new file with mode: 0644]
sdk/R/man/keep_services.list.Rd [new file with mode: 0644]
sdk/R/man/keep_services.update.Rd [new file with mode: 0644]
sdk/R/man/links.create.Rd [new file with mode: 0644]
sdk/R/man/links.delete.Rd [new file with mode: 0644]
sdk/R/man/links.get.Rd [new file with mode: 0644]
sdk/R/man/links.get_permissions.Rd [new file with mode: 0644]
sdk/R/man/links.list.Rd [new file with mode: 0644]
sdk/R/man/links.update.Rd [new file with mode: 0644]
sdk/R/man/listAll.Rd [new file with mode: 0644]
sdk/R/man/logs.create.Rd [new file with mode: 0644]
sdk/R/man/logs.delete.Rd [new file with mode: 0644]
sdk/R/man/logs.get.Rd [new file with mode: 0644]
sdk/R/man/logs.list.Rd [new file with mode: 0644]
sdk/R/man/logs.update.Rd [new file with mode: 0644]
sdk/R/man/nodes.create.Rd [new file with mode: 0644]
sdk/R/man/nodes.delete.Rd [new file with mode: 0644]
sdk/R/man/nodes.get.Rd [new file with mode: 0644]
sdk/R/man/nodes.list.Rd [new file with mode: 0644]
sdk/R/man/nodes.ping.Rd [new file with mode: 0644]
sdk/R/man/nodes.update.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.cancel.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.create.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.delete.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.get.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.list.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.update.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.create.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.delete.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.get.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.list.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.update.Rd [new file with mode: 0644]
sdk/R/man/print.Arvados.Rd [deleted file]
sdk/R/man/projects.create.Rd [new file with mode: 0644]
sdk/R/man/projects.delete.Rd [new file with mode: 0644]
sdk/R/man/projects.get.Rd [new file with mode: 0644]
sdk/R/man/projects.list.Rd [new file with mode: 0644]
sdk/R/man/projects.update.Rd [new file with mode: 0644]
sdk/R/man/repositories.create.Rd [new file with mode: 0644]
sdk/R/man/repositories.delete.Rd [new file with mode: 0644]
sdk/R/man/repositories.get.Rd [new file with mode: 0644]
sdk/R/man/repositories.get_all_permissions.Rd [new file with mode: 0644]
sdk/R/man/repositories.list.Rd [new file with mode: 0644]
sdk/R/man/repositories.update.Rd [new file with mode: 0644]
sdk/R/man/specimens.create.Rd [new file with mode: 0644]
sdk/R/man/specimens.delete.Rd [new file with mode: 0644]
sdk/R/man/specimens.get.Rd [new file with mode: 0644]
sdk/R/man/specimens.list.Rd [new file with mode: 0644]
sdk/R/man/specimens.update.Rd [new file with mode: 0644]
sdk/R/man/traits.create.Rd [new file with mode: 0644]
sdk/R/man/traits.delete.Rd [new file with mode: 0644]
sdk/R/man/traits.get.Rd [new file with mode: 0644]
sdk/R/man/traits.list.Rd [new file with mode: 0644]
sdk/R/man/traits.update.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.create.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.delete.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.get.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.list.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.new.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.sign.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.signatures.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.update.Rd [new file with mode: 0644]
sdk/R/man/users.activate.Rd [new file with mode: 0644]
sdk/R/man/users.create.Rd [new file with mode: 0644]
sdk/R/man/users.current.Rd [new file with mode: 0644]
sdk/R/man/users.delete.Rd [new file with mode: 0644]
sdk/R/man/users.get.Rd [new file with mode: 0644]
sdk/R/man/users.list.Rd [new file with mode: 0644]
sdk/R/man/users.setup.Rd [new file with mode: 0644]
sdk/R/man/users.system.Rd [new file with mode: 0644]
sdk/R/man/users.unsetup.Rd [new file with mode: 0644]
sdk/R/man/users.update.Rd [new file with mode: 0644]
sdk/R/man/users.update_uuid.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.create.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.delete.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.get.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.get_all_logins.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.list.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.logins.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.update.Rd [new file with mode: 0644]
sdk/R/man/workflows.create.Rd [new file with mode: 0644]
sdk/R/man/workflows.delete.Rd [new file with mode: 0644]
sdk/R/man/workflows.get.Rd [new file with mode: 0644]
sdk/R/man/workflows.list.Rd [new file with mode: 0644]
sdk/R/man/workflows.update.Rd [new file with mode: 0644]
sdk/R/run_test.R
sdk/R/tests/testthat.R
sdk/R/tests/testthat/fakes/FakeArvados.R
sdk/R/tests/testthat/fakes/FakeHttpParser.R
sdk/R/tests/testthat/fakes/FakeHttpRequest.R
sdk/R/tests/testthat/fakes/FakeRESTService.R
sdk/R/tests/testthat/test-Arvados.R [deleted file]
sdk/R/tests/testthat/test-ArvadosFile.R
sdk/R/tests/testthat/test-Collection.R
sdk/R/tests/testthat/test-CollectionTree.R
sdk/R/tests/testthat/test-HttpParser.R
sdk/R/tests/testthat/test-HttpRequest.R
sdk/R/tests/testthat/test-RESTService.R
sdk/R/tests/testthat/test-Subcollection.R
sdk/R/tests/testthat/test-util.R
sdk/cli/arvados-cli.gemspec
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arv-cwl-schema.yml
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/arvdocker.py
sdk/cwl/arvados_cwl/arvjob.py
sdk/cwl/arvados_cwl/arvtool.py
sdk/cwl/arvados_cwl/arvworkflow.py
sdk/cwl/arvados_cwl/context.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/crunch_script.py
sdk/cwl/arvados_cwl/done.py
sdk/cwl/arvados_cwl/fsaccess.py
sdk/cwl/arvados_cwl/http.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/pathmapper.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/arvados_cwl/task_queue.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/util.py [new file with mode: 0644]
sdk/cwl/arvados_version.py
sdk/cwl/setup.py
sdk/cwl/tests/12213-keepref-expr.cwl
sdk/cwl/tests/12213-keepref-job.yml
sdk/cwl/tests/12213-keepref-tool.cwl
sdk/cwl/tests/12213-keepref-wf.cwl
sdk/cwl/tests/12418-glob-empty-collection.cwl
sdk/cwl/tests/arvados-tests.sh
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/collection_per_tool/collection_per_tool_packed.cwl
sdk/cwl/tests/makes_intermediates/echo.cwl [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/hello1.txt [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/run_in_single.cwl [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/subwf.cwl [new file with mode: 0644]
sdk/cwl/tests/secondary/ls.cwl
sdk/cwl/tests/secondary/sub.cwl
sdk/cwl/tests/secondary/wf-job.yml
sdk/cwl/tests/secondary/wf.cwl
sdk/cwl/tests/secondaryFiles/example1.cwl [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/example3.cwl [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/hello.txt [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/hello.txt.idx [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/inp3.yml [new file with mode: 0644]
sdk/cwl/tests/secret_test_job.yml
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_http.py [new file with mode: 0644]
sdk/cwl/tests/test_job.py
sdk/cwl/tests/test_make_output.py
sdk/cwl/tests/test_pathmapper.py
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/test_tq.py [new file with mode: 0644]
sdk/cwl/tests/test_util.py [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir1.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir3.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir4.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir5.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir6.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir6a.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir7.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir7a.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/inp1/hello.txt [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf1.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf3.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf4.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf5.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf6.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf7.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/check_mem.py
sdk/cwl/tests/wf/echo-subwf.cwl
sdk/cwl/tests/wf/echo-wf.cwl
sdk/cwl/tests/wf/echo_a.cwl
sdk/cwl/tests/wf/echo_b.cwl
sdk/cwl/tests/wf/expect_packed.cwl
sdk/cwl/tests/wf/runin-reqs-wf.cwl
sdk/cwl/tests/wf/runin-reqs-wf2.cwl
sdk/cwl/tests/wf/runin-reqs-wf3.cwl
sdk/cwl/tests/wf/runin-reqs-wf4.cwl
sdk/cwl/tests/wf/secret_job.cwl
sdk/cwl/tests/wf/secret_wf.cwl
sdk/cwl/tests/wf/submit_wf_runner_resources.cwl [new file with mode: 0644]
sdk/go/arvados/byte_size.go [new file with mode: 0644]
sdk/go/arvados/byte_size_test.go [new file with mode: 0644]
sdk/go/arvados/client.go
sdk/go/arvados/client_test.go
sdk/go/arvados/collection.go
sdk/go/arvados/config.go
sdk/go/arvados/config_test.go [new file with mode: 0644]
sdk/go/arvados/container.go
sdk/go/arvados/fs_backend.go [new file with mode: 0644]
sdk/go/arvados/fs_base.go [new file with mode: 0644]
sdk/go/arvados/fs_collection.go [moved from sdk/go/arvados/collection_fs.go with 60% similarity]
sdk/go/arvados/fs_collection_test.go [moved from sdk/go/arvados/collection_fs_test.go with 99% similarity]
sdk/go/arvados/fs_deferred.go [new file with mode: 0644]
sdk/go/arvados/fs_filehandle.go [new file with mode: 0644]
sdk/go/arvados/fs_getternode.go [new file with mode: 0644]
sdk/go/arvados/fs_lookup.go [new file with mode: 0644]
sdk/go/arvados/fs_project.go [new file with mode: 0644]
sdk/go/arvados/fs_project_test.go [new file with mode: 0644]
sdk/go/arvados/fs_site.go [new file with mode: 0644]
sdk/go/arvados/fs_site_test.go [new file with mode: 0644]
sdk/go/arvados/fs_users.go [new file with mode: 0644]
sdk/go/arvados/group.go
sdk/go/arvados/keep_service.go
sdk/go/arvados/keep_service_test.go [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadostest/fixtures.go
sdk/go/arvadostest/run_servers.go
sdk/go/health/aggregator.go
sdk/go/health/aggregator_test.go
sdk/go/httpserver/error.go [new file with mode: 0644]
sdk/go/httpserver/id_generator.go
sdk/go/httpserver/logger.go
sdk/go/httpserver/logger_test.go
sdk/go/httpserver/responsewriter.go
sdk/go/keepclient/discover_test.go
sdk/go/keepclient/keepclient.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
sdk/python/arvados/__init__.py
sdk/python/arvados/api.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/_util.py
sdk/python/arvados/commands/keepdocker.py
sdk/python/arvados/commands/put.py
sdk/python/arvados/commands/run.py
sdk/python/arvados/keep.py
sdk/python/arvados/safeapi.py
sdk/python/setup.py
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
sdk/python/tests/test_arv_put.py
sdk/python/tests/test_collections.py
sdk/python/tests/test_keep_client.py
sdk/ruby/arvados.gemspec
services/api/Gemfile.lock
services/api/app/controllers/arvados/v1/collections_controller.rb
services/api/app/controllers/arvados/v1/containers_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/controllers/user_sessions_controller.rb
services/api/app/models/api_client_authorization.rb
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/app/models/group.rb
services/api/app/models/user.rb
services/api/config/application.default.yml
services/api/config/initializers/lograge.rb
services/api/config/routes.rb
services/api/db/migrate/20170704160233_yaml_to_json.rb
services/api/db/migrate/20170706141334_json_collection_properties.rb
services/api/db/migrate/20171027183824_add_index_to_containers.rb
services/api/db/migrate/20171208203841_fix_trash_flag_follow.rb
services/api/db/migrate/20171212153352_add_gin_index_to_collection_properties.rb
services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb
services/api/db/migrate/20180313180114_change_container_priority_bigint.rb
services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb [new file with mode: 0644]
services/api/db/migrate/20180607175050_properties_to_jsonb.rb [new file with mode: 0644]
services/api/db/migrate/20180608123145_add_properties_to_groups.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/app_version.rb
services/api/lib/crunch_dispatch.rb
services/api/lib/update_priority.rb [new file with mode: 0644]
services/api/lib/whitelist_update.rb
services/api/test/fixtures/api_client_authorizations.yml
services/api/test/fixtures/collections.yml
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/users.yml
services/api/test/functional/arvados/v1/schema_controller_test.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/integration/remote_user_test.rb
services/api/test/integration/user_sessions_test.rb
services/api/test/integration/users_test.rb
services/api/test/unit/arvados_model_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/update_priority_test.rb [new file with mode: 0644]
services/api/test/unit/user_test.rb
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.service
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-dispatch-slurm/squeue.go
services/crunch-run/copier.go [new file with mode: 0644]
services/crunch-run/copier_test.go [new file with mode: 0644]
services/crunch-run/crunchrun.go
services/crunch-run/crunchrun_test.go
services/crunch-run/git_mount.go
services/crunch-run/logging_test.go
services/crunch-run/upload.go [deleted file]
services/crunch-run/upload_test.go [deleted file]
services/fuse/arvados_fuse/fusedir.py
services/fuse/setup.py
services/fuse/tests/test_mount.py
services/health/main.go
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keep-balance/block_state.go
services/keep-balance/change_set.go
services/keep-balance/change_set_test.go
services/keep-balance/main.go
services/keep-balance/usage.go
services/keep-web/cache.go
services/keep-web/cadaver_test.go
services/keep-web/doc.go
services/keep-web/handler.go
services/keep-web/handler_test.go
services/keep-web/main.go
services/keep-web/server.go
services/keep-web/server_test.go
services/keep-web/webdav.go
services/keep-web/webdav_test.go
services/keepproxy/keepproxy.go
services/keepproxy/keepproxy_test.go
services/keepproxy/proxy_client.go
services/keepstore/azure_blob_volume.go
services/keepstore/azure_blob_volume_test.go
services/keepstore/config.go
services/keepstore/handlers.go
services/keepstore/keepstore.go
services/keepstore/s3_volume.go
services/keepstore/server.go [new file with mode: 0644]
services/keepstore/server_test.go [new file with mode: 0644]
services/keepstore/usage.go
services/keepstore/volume_unix.go
services/login-sync/arvados-login-sync.gemspec
services/login-sync/bin/arvados-login-sync
services/login-sync/test/test_add_user.rb
services/nodemanager/arvnodeman/computenode/__init__.py
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
services/nodemanager/arvnodeman/computenode/driver/__init__.py
services/nodemanager/arvnodeman/computenode/driver/azure.py
services/nodemanager/arvnodeman/computenode/driver/ec2.py
services/nodemanager/arvnodeman/computenode/driver/gce.py
services/nodemanager/arvnodeman/config.py
services/nodemanager/arvnodeman/daemon.py
services/nodemanager/arvnodeman/jobqueue.py
services/nodemanager/arvnodeman/launcher.py
services/nodemanager/arvnodeman/nodelist.py
services/nodemanager/arvnodeman/test/fake_driver.py
services/nodemanager/doc/ec2.example.cfg
services/nodemanager/setup.py
services/nodemanager/tests/fake_azure.cfg.template
services/nodemanager/tests/fake_ec2.cfg.template
services/nodemanager/tests/fake_gce.cfg.template
services/nodemanager/tests/integration_test.py
services/nodemanager/tests/test_computenode.py
services/nodemanager/tests/test_computenode_dispatch.py
services/nodemanager/tests/test_computenode_dispatch_slurm.py
services/nodemanager/tests/test_computenode_driver.py
services/nodemanager/tests/test_computenode_driver_azure.py
services/nodemanager/tests/test_computenode_driver_ec2.py
services/nodemanager/tests/test_computenode_driver_gce.py
services/nodemanager/tests/test_config.py
services/nodemanager/tests/test_daemon.py
services/nodemanager/tests/test_failure.py
services/nodemanager/tests/test_jobqueue.py
services/nodemanager/tests/test_nodelist.py
services/nodemanager/tests/testutil.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/runit/1
tools/arvbox/lib/arvbox/docker/runit/2
tools/arvbox/lib/arvbox/docker/runit/3
tools/arvbox/lib/arvbox/docker/runit/ctrlaltdel
tools/arvbox/lib/arvbox/docker/service/sdk/run-service
tools/crunchstat-summary/crunchstat_summary/reader.py
tools/crunchstat-summary/crunchstat_summary/synchronizer.js
vendor/vendor.json

index 51980b16c2ffcbdd2ab93729c4677048b5160c39..51a1e7cbd2f0aabca972527475630923f1f1ef75 100644 (file)
@@ -26,6 +26,8 @@ docker/jobs/apt.arvados.org.list
 *.gz.report
 *.ico
 *.jpg
+*.svg
+*.odg
 *.json
 *LICENSE*.html
 .licenseignore
@@ -59,3 +61,11 @@ services/arv-web/sample-cgi-app/public/index.cgi
 services/keepproxy/pkg-extras/etc/default/keepproxy
 *.tar
 tools/crunchstat-summary/tests/crunchstat_error_messages.txt
+tools/crunchstat-summary/crunchstat_summary/synchronizer.js
+build/package-build-dockerfiles/debian9/D39DC0E3.asc
+build/package-test-dockerfiles/debian9/D39DC0E3.asc
+sdk/R/DESCRIPTION
+sdk/R/NAMESPACE
+sdk/R/.Rbuildignore
+sdk/R/ArvadosR.Rproj
+*.Rd
diff --git a/AUTHORS b/AUTHORS
index ea9fa4c7092e8c2069a2105d8eafb25e6107d3ab..9a861a6315099a8faec86b854d8737078adc22b7 100644 (file)
--- a/AUTHORS
+++ b/AUTHORS
@@ -17,3 +17,4 @@ Joshua Randall <joshua.randall@sanger.ac.uk>
 President and Fellows of Harvard College <*@harvard.edu>
 Thomas Mooney <tmooney@genome.wustl.edu>
 Chen Chen <aflyhorse@gmail.com>
+Veritas Genetics, Inc. <*@veritasgenetics.com>
index 5fb3718f38dd8aa5a480b7561bdd1e9824be7c47..156fc86a5eadee7b9cef56c004db808c8a3d8d03 100644 (file)
@@ -5,6 +5,7 @@
 /log/*.log
 /log/*.log.gz
 /tmp
+.byebug_history
 
 /config/.secret_token
 /config/initializers/secret_token.rb
index 1e9a5cc70d8ece81b21d6c15233499dd31a35002..b62df6c0219b401110134603591a75aeee9bda0f 100644 (file)
@@ -4,9 +4,7 @@
 
 source 'https://rubygems.org'
 
-# Having a dependency '~> 4.1' make rails 4.2.10 to be installed when it's
-# supposed to avoid that.
-gem 'rails', '< 4.2'
+gem 'rails', '~> 4.2.0'
 gem 'arvados', '>= 0.1.20150511150219'
 
 gem 'activerecord-nulldb-adapter'
@@ -14,6 +12,7 @@ gem 'multi_json'
 gem 'oj'
 gem 'sass'
 gem 'mime-types'
+gem 'responders', '~> 2.0'
 
 # Note: keeping this out of the "group :assets" section "may" allow us
 # to use Coffescript for UJS responses. It also prevents a
@@ -37,6 +36,7 @@ group :development do
   gem 'ruby-debug-passenger'
   gem 'rack-mini-profiler', require: false
   gem 'flamegraph', require: false
+  #gem 'web-console', '~> 2.0'
 end
 
 group :test, :diagnostics, :performance do
index 93a92ce11cb85f57d43237c49d4fec7f07f9842f..9c4cd678b0cf9aa38a110a32e1223586dceb0afb 100644 (file)
@@ -9,39 +9,47 @@ GEM
   remote: https://rubygems.org/
   specs:
     RedCloth (4.3.2)
-    actionmailer (4.1.16)
-      actionpack (= 4.1.16)
-      actionview (= 4.1.16)
+    actionmailer (4.2.10)
+      actionpack (= 4.2.10)
+      actionview (= 4.2.10)
+      activejob (= 4.2.10)
       mail (~> 2.5, >= 2.5.4)
-    actionpack (4.1.16)
-      actionview (= 4.1.16)
-      activesupport (= 4.1.16)
-      rack (~> 1.5.2)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+    actionpack (4.2.10)
+      actionview (= 4.2.10)
+      activesupport (= 4.2.10)
+      rack (~> 1.6)
       rack-test (~> 0.6.2)
-    actionview (4.1.16)
-      activesupport (= 4.1.16)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.2)
+    actionview (4.2.10)
+      activesupport (= 4.2.10)
       builder (~> 3.1)
       erubis (~> 2.7.0)
-    activemodel (4.1.16)
-      activesupport (= 4.1.16)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.3)
+    activejob (4.2.10)
+      activesupport (= 4.2.10)
+      globalid (>= 0.3.0)
+    activemodel (4.2.10)
+      activesupport (= 4.2.10)
       builder (~> 3.1)
-    activerecord (4.1.16)
-      activemodel (= 4.1.16)
-      activesupport (= 4.1.16)
-      arel (~> 5.0.0)
+    activerecord (4.2.10)
+      activemodel (= 4.2.10)
+      activesupport (= 4.2.10)
+      arel (~> 6.0)
     activerecord-nulldb-adapter (0.3.8)
       activerecord (>= 2.0.0)
-    activesupport (4.1.16)
-      i18n (~> 0.6, >= 0.6.9)
-      json (~> 1.7, >= 1.7.7)
+    activesupport (4.2.10)
+      i18n (~> 0.7)
       minitest (~> 5.1)
-      thread_safe (~> 0.1)
+      thread_safe (~> 0.3, >= 0.3.4)
       tzinfo (~> 1.1)
     addressable (2.5.2)
       public_suffix (>= 2.0.2, < 4.0)
     andand (1.3.3)
     angularjs-rails (1.3.15)
-    arel (5.0.1.20140414130214)
+    arel (6.0.4)
     arvados (0.1.20180302192246)
       activesupport (>= 3)
       andand (~> 1.3, >= 1.3.3)
@@ -85,6 +93,7 @@ GEM
     coffee-script-source (1.12.2)
     commonjs (0.2.7)
     concurrent-ruby (1.0.5)
+    crass (1.0.4)
     deep_merge (1.2.1)
     docile (1.1.5)
     erubis (2.7.0)
@@ -94,6 +103,8 @@ GEM
       multipart-post (>= 1.2, < 3)
     ffi (1.9.23)
     flamegraph (0.9.5)
+    globalid (0.4.1)
+      activesupport (>= 4.2.0)
     google-api-client (0.8.7)
       activesupport (>= 3.2, < 5.0)
       addressable (~> 2.3)
@@ -122,7 +133,7 @@ GEM
     jquery-rails (3.1.4)
       railties (>= 3.0, < 5.0)
       thor (>= 0.14, < 2.0)
-    json (1.8.6)
+    json (2.1.0)
     jwt (1.5.6)
     launchy (2.4.3)
       addressable (~> 2.3)
@@ -145,6 +156,9 @@ GEM
       railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
+    loofah (2.2.2)
+      crass (~> 1.0.2)
+      nokogiri (>= 1.5.9)
     mail (2.7.0)
       mini_mime (>= 0.1.1)
     memoist (0.16.0)
@@ -187,28 +201,37 @@ GEM
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
     public_suffix (3.0.2)
-    rack (1.5.5)
+    rack (1.6.10)
     rack-mini-profiler (0.10.7)
       rack (>= 1.2.0)
     rack-test (0.6.3)
       rack (>= 1.0)
-    rails (4.1.16)
-      actionmailer (= 4.1.16)
-      actionpack (= 4.1.16)
-      actionview (= 4.1.16)
-      activemodel (= 4.1.16)
-      activerecord (= 4.1.16)
-      activesupport (= 4.1.16)
+    rails (4.2.10)
+      actionmailer (= 4.2.10)
+      actionpack (= 4.2.10)
+      actionview (= 4.2.10)
+      activejob (= 4.2.10)
+      activemodel (= 4.2.10)
+      activerecord (= 4.2.10)
+      activesupport (= 4.2.10)
       bundler (>= 1.3.0, < 2.0)
-      railties (= 4.1.16)
-      sprockets-rails (~> 2.0)
+      railties (= 4.2.10)
+      sprockets-rails
+    rails-deprecated_sanitizer (1.0.3)
+      activesupport (>= 4.2.0.alpha)
+    rails-dom-testing (1.0.9)
+      activesupport (>= 4.2.0, < 5.0)
+      nokogiri (~> 1.6)
+      rails-deprecated_sanitizer (>= 1.0.1)
+    rails-html-sanitizer (1.0.4)
+      loofah (~> 2.2, >= 2.2.2)
     rails-perftest (0.0.7)
-    railties (4.1.16)
-      actionpack (= 4.1.16)
-      activesupport (= 4.1.16)
+    railties (4.2.10)
+      actionpack (= 4.2.10)
+      activesupport (= 4.2.10)
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.3.0)
+    rake (12.3.1)
     raphael-rails (2.1.2)
     rb-fsevent (0.10.3)
     rb-inotify (0.9.10)
@@ -216,6 +239,9 @@ GEM
     ref (2.0.0)
     request_store (1.4.0)
       rack (>= 1.4)
+    responders (2.4.0)
+      actionpack (>= 4.2.0, < 5.3)
+      railties (>= 4.2.0, < 5.3)
     retriable (1.4.1)
     ruby-debug-passenger (0.2.0)
     ruby-prof (0.17.0)
@@ -250,13 +276,13 @@ GEM
     simplecov-html (0.10.2)
     simplecov-rcov (0.2.3)
       simplecov (>= 0.4.1)
-    sprockets (3.7.1)
+    sprockets (3.7.2)
       concurrent-ruby (~> 1.0)
       rack (> 1, < 3)
-    sprockets-rails (2.3.3)
-      actionpack (>= 3.0)
-      activesupport (>= 3.0)
-      sprockets (>= 2.8, < 4.0)
+    sprockets-rails (3.2.1)
+      actionpack (>= 4.0)
+      activesupport (>= 4.0)
+      sprockets (>= 3.0.0)
     sshkey (1.9.0)
     therubyracer (0.12.3)
       libv8 (~> 3.16.14.15)
@@ -312,9 +338,10 @@ DEPENDENCIES
   piwik_analytics
   poltergeist (~> 1.5.1)
   rack-mini-profiler
-  rails (< 4.2)
+  rails (~> 4.2.0)
   rails-perftest
   raphael-rails
+  responders (~> 2.0)
   ruby-debug-passenger
   ruby-prof
   rvm-capistrano
index fab8fe3925c00da42f5223a763a659ff18e70441..7d1b3b15926816229acbc8d83b0ffa52443055b6 100644 (file)
@@ -68,7 +68,7 @@ window.SessionDB = function() {
                 url = 'https://' + url;
             }
             url = new URL(url);
-            return m.request(url.origin + '/discovery/v1/apis/arvados/v1/rest').then(function() {
+            return db.discoveryDoc({baseURL: url.origin}).map(function() {
                 return url.origin + '/';
             }).catch(function(err) {
                 // If url is a Workbench site (and isn't too old),
@@ -94,9 +94,9 @@ window.SessionDB = function() {
             }
             var session = db.loadLocal();
             var apiHostname = new URL(session.baseURL).hostname;
-            m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest').then(function(localDD) {
+            db.discoveryDoc(session).map(function(localDD) {
                 var uuidPrefix = localDD.uuidPrefix;
-                m.request(baseURL+'discovery/v1/apis/arvados/v1/rest').then(function(dd) {
+                db.discoveryDoc({baseURL: baseURL}).map(function(dd) {
                     if (uuidPrefix in dd.remoteHosts ||
                         (dd.remoteHostsViaDNS && apiHostname.endsWith('.arvadosapi.com'))) {
                         // Federated identity login via salted token
@@ -233,7 +233,16 @@ window.SessionDB = function() {
             var cache = db.discoveryCache[session.baseURL];
             if (!cache) {
                 db.discoveryCache[session.baseURL] = cache = m.stream();
-                m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest').then(cache);
+                m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest')
+                    .then(function (dd) {
+                        // Just in case we're talking with an old API server.
+                        dd.remoteHosts = dd.remoteHosts || {};
+                        if (dd.remoteHostsViaDNS === undefined) {
+                            dd.remoteHostsViaDNS = false;
+                        }
+                        return dd;
+                    })
+                    .then(cache);
             }
             return cache;
         },
@@ -308,8 +317,7 @@ window.SessionDB = function() {
             if (userUUIDPrefix === session.user.owner_uuid.slice(0, 5)) {
                 return;
             }
-            var doc = db.discoveryDoc(session);
-            doc.map(function(d) {
+            db.discoveryDoc(session).map(function (d) {
                 // Guess the remote host from the local discovery doc settings
                 var rHost = null;
                 if (d.remoteHosts[userUUIDPrefix]) {
@@ -323,8 +331,7 @@ window.SessionDB = function() {
                 }
                 // Get the remote cluster workbench url & redirect there.
                 db.findAPI(rHost).then(function (apiUrl) {
-                    var doc = db.discoveryDoc({baseURL: apiUrl});
-                    doc.map(function (d) {
+                    db.discoveryDoc({baseURL: apiUrl}).map(function (d) {
                         document.location = d.workbenchUrl + path;
                     });
                 });
index 2e3ced69a534485ca5d18df22b19ac53abeea793..8cfc2c10f1c29eee67a11074acfabe70da19aaba 100644 (file)
@@ -4,8 +4,8 @@
 
 class UsersController < ApplicationController
   skip_around_filter :require_thread_api_token, only: :welcome
-  skip_before_filter :check_user_agreements, only: [:welcome, :inactive]
-  skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile]
+  skip_before_filter :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
+  skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
   skip_before_filter :find_object_by_uuid, only: [:welcome, :activity, :storage]
   before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
 
@@ -317,6 +317,11 @@ class UsersController < ApplicationController
     RequestShellAccessReporter.send_request(current_user, params).deliver
   end
 
+  def merge
+    User.merge params[:new_user_token], params[:direction]
+    redirect_to "/"
+  end
+
   protected
 
   def find_current_links user
index d2896821b28a16d90f62028a6644aded29e56496..8527b4d48cb717b941ab376b68255e917c5797a3 100644 (file)
@@ -85,19 +85,43 @@ class WorkUnitsController < ApplicationController
       attrs['state'] = "Uncommitted"
 
       # required
-      attrs['command'] = ["arvados-cwl-runner", "--local", "--api=containers", "/var/lib/cwl/workflow.json#main", "/var/lib/cwl/cwl.input.json"]
+      attrs['command'] = ["arvados-cwl-runner",
+                          "--local",
+                          "--api=containers",
+                          "--project-uuid=#{params['work_unit']['owner_uuid']}",
+                          "/var/lib/cwl/workflow.json#main",
+                          "/var/lib/cwl/cwl.input.json"]
       attrs['container_image'] = "arvados/jobs"
       attrs['cwd'] = "/var/spool/cwl"
       attrs['output_path'] = "/var/spool/cwl"
 
+      # runtime constriants
+      runtime_constraints = {
+        "vcpus" => 1,
+        "ram" => 1024 * 1024 * 1024,
+        "API" => true
+      }
+
       input_defaults = {}
       if wf_json
-        inputs = get_cwl_inputs(wf_json)
-        inputs.each do |input|
+        main = get_cwl_main(wf_json)
+        main[:inputs].each do |input|
           if input[:default]
             input_defaults[cwl_shortname(input[:id])] = input[:default]
           end
         end
+        if main[:hints]
+          main[:hints].each do |hint|
+            if hint[:class] == "http://arvados.org/cwl#WorkflowRunnerResources"
+              if hint[:coresMin]
+                runtime_constraints["vcpus"] = hint[:coresMin]
+              end
+              if hint[:ramMin]
+                runtime_constraints["ram"] = hint[:ramMin] * 1024 * 1024
+              end
+            end
+          end
+        end
       end
 
       # mounts
@@ -123,12 +147,6 @@ class WorkUnitsController < ApplicationController
       end
       attrs['mounts'] = mounts
 
-      # runtime constriants
-      runtime_constraints = {
-        "vcpus" => 1,
-        "ram" => 256000000,
-        "API" => true
-      }
       attrs['runtime_constraints'] = runtime_constraints
     else
       raise ArgumentError, "Unsupported template uuid: #{template_uuid}"
index 57b8d8780c6859e9063cabb7c43cfcb30a14d6bf..106716a0f72f178e826afc6eaaf2908ecb8afe0a 100644 (file)
@@ -426,18 +426,23 @@ module ApplicationHelper
     lt
   end
 
-  def get_cwl_inputs(workflow)
-    if workflow[:inputs]
-      return workflow[:inputs]
+  def get_cwl_main(workflow)
+    if workflow[:"$graph"].nil?
+      return workflow
     else
       workflow[:"$graph"].each do |tool|
         if tool[:id] == "#main"
-          return tool[:inputs]
+          return tool
         end
       end
     end
   end
 
+  def get_cwl_inputs(workflow)
+    get_cwl_main(workflow)[:inputs]
+  end
+
+
   def cwl_shortname(id)
     if id[0] == "#"
       id = id[1..-1]
index 915c3a9d381984abec802e78680f84a23ec54b6e..e673c812102143d451fa48887b4cdf9d28e060a6 100644 (file)
@@ -9,6 +9,12 @@ module VersionHelper
     arvados_api_client.discovery[:source_version]
   end
 
+  # Get the packageVersion given in the API server's discovery
+  # document.
+  def api_package_version
+    arvados_api_client.discovery[:packageVersion]
+  end
+
   # URL for browsing source code for the given version.
   def version_link_target version
     "https://arvados.org/projects/arvados/repository/changes?rev=#{version.sub(/-.*/, "")}"
index 8e1cfae8a2858fba0a0b6f8c836b6dc65168a44f..d7a65bdcee182d61aa47fe56bd4f648811de012d 100644 (file)
@@ -61,11 +61,11 @@ class ArvadosBase < ActiveRecord::Base
   end
 
   def self.columns
-    return @columns if @columns.andand.any?
-    @columns = []
+    return @discovered_columns if @discovered_columns.andand.any?
+    @discovered_columns = []
     @attribute_info ||= {}
     schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
-    return @columns if schema.nil?
+    return @discovered_columns if schema.nil?
     schema[:properties].each do |k, coldef|
       case k
       when :etag, :kind
@@ -73,10 +73,10 @@ class ArvadosBase < ActiveRecord::Base
       else
         if coldef[:type] == coldef[:type].downcase
           # boolean, integer, etc.
-          @columns << column(k, coldef[:type].to_sym)
+          @discovered_columns << column(k, coldef[:type])
         else
           # Hash, Array
-          @columns << column(k, :text)
+          @discovered_columns << column(k, coldef[:type], coldef[:type].constantize.new)
           serialize k, coldef[:type].constantize
         end
         define_method k do
@@ -90,11 +90,16 @@ class ArvadosBase < ActiveRecord::Base
         @attribute_info[k] = coldef
       end
     end
-    @columns
+    @discovered_columns
   end
 
   def self.column(name, sql_type = nil, default = nil, null = true)
-    ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, sql_type.to_s, null)
+    if sql_type == 'datetime'
+      cast_type = "ActiveRecord::Type::DateTime".constantize.new
+    else
+      cast_type = ActiveRecord::Base.connection.lookup_cast_type(sql_type)
+    end
+    ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, cast_type, sql_type.to_s, null)
   end
 
   def self.attribute_info
@@ -202,7 +207,7 @@ class ArvadosBase < ActiveRecord::Base
       # old value in the update/create command) or has been added to
       # #changed by ActiveRecord's #attr= method.
       if changed.include? col.name or
-          (self.class.serialized_attributes.include? col.name and
+          ([Hash, Array].include?(attributes[col.name].class) and
            @loaded_attributes[col.name])
         obdata[col.name.to_sym] = self.send col.name
       end
index 1f102dbf17acd3fb807110c34f4937686ebb9f2d..865ff6e9519cacf613b248df446fd4a1e0b24636 100644 (file)
@@ -14,6 +14,47 @@ class User < ArvadosBase
     arvados_api_client.unpack_api_response(res)
   end
 
+  def self.merge new_user_token, direction
+    # Merge user accounts.
+    #
+    # If the direction is "in", the current user is merged into the
+    # user represented by new_user_token
+    #
+    # If the direction is "out", the user represented by new_user_token
+    # is merged into the current user.
+
+    if direction == "in"
+      user_a = new_user_token
+      user_b = Thread.current[:arvados_api_token]
+      new_group_name = "Migrated from #{Thread.current[:user].email} (#{Thread.current[:user].uuid})"
+    elsif direction == "out"
+      user_a = Thread.current[:arvados_api_token]
+      user_b = new_user_token
+      res = arvados_api_client.api self, '/current', nil, {:arvados_api_token => user_b}, false
+      user_b_info = arvados_api_client.unpack_api_response(res)
+      new_group_name = "Migrated from #{user_b_info.email} (#{user_b_info.uuid})"
+    else
+      raise "Invalid merge direction, expected 'in' or 'out'"
+    end
+
+    # Create a project owned by user_a to accept everything owned by user_b
+    res = arvados_api_client.api Group, nil, {:group => {
+                                                :name => new_group_name,
+                                                :group_class => "project"},
+                                              :ensure_unique_name => true},
+                                 {:arvados_api_token => user_a}, false
+    target = arvados_api_client.unpack_api_response(res)
+
+    # The merge API merges the "current" user (user_b) into the user
+    # represented by "new_user_token" (user_a).
+    # After merging, the user_b redirects to user_a.
+    res = arvados_api_client.api self, '/merge', {:new_user_token => user_a,
+                                                  :new_owner_uuid => target[:uuid],
+                                                  :redirect_to_new_user => true},
+                                 {:arvados_api_token => user_b}, false
+    arvados_api_client.unpack_api_response(res)
+  end
+
   def self.system
     @@arvados_system_user ||= begin
                                 res = arvados_api_client.api self, '/system'
index 86d550a33f34dc8bbccceb556164c76c5bf8a03b..8823fdd5f78f8ebfe7c4a336c3a144bc479de26f 100644 (file)
@@ -14,8 +14,10 @@ SPDX-License-Identifier: AGPL-3.0 %>
   additional_info_str = additional_info.map {|k,v| "#{k}=#{v}"}.join("\n")
 
   additional_info['api_source_version'] = api_source_version
+  additional_info['api_package_version'] = api_package_version
   additional_info['generated_at'] = generated_at
   additional_info['workbench_version'] = AppVersion.hash
+  additional_info['workbench_package_version'] = AppVersion.package_version
   additional_info['arvados_base'] = arvados_base
   additional_info['support_email'] = support_email
   additional_info['error_message'] = params[:error_message] if params[:error_message]
@@ -73,7 +75,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
           <label for="wb_version" class="col-sm-4 control-label"> Workbench version </label>
           <div class="col-sm-8">
             <p class="form-control-static" name="wb_version">
-              <%= link_to AppVersion.hash, version_link_target(AppVersion.hash) %>
+              <%= AppVersion.package_version %> (<%= link_to AppVersion.hash, version_link_target(AppVersion.hash) %>)
             </p>
           </div>
         </div>
@@ -82,7 +84,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
           <label for="server_version" class="col-sm-4 control-label"> API version </label>
           <div class="col-sm-8">
             <p class="form-control-static" name="server_version">
-              <%= link_to api_source_version, version_link_target(api_source_version) %>
+              <%= api_package_version %> (<%= link_to api_source_version, version_link_target(api_source_version) %>)
             </p>
           </div>
         </div>
index f4be7cad63ab282f2332235e89cd848d6bfb1771..124a78577f3e5cac875569c8912217d65b8fc1ce 100644 (file)
@@ -93,7 +93,8 @@ SPDX-License-Identifier: AGPL-3.0 %>
                   <%= link_to ssh_keys_user_path(current_user), role: 'menu-item' do %>
                     <i class="fa fa-lg fa-key fa-fw"></i> SSH keys
                   <% end %>
-                </li>
+</li>
+                <li role="menuitem"><a href="/users/link_account" role="menuitem"><i class="fa fa-lg fa-link fa-fw"></i> Link account </a></li>
                 <% if Rails.configuration.user_profile_form_fields %>
                   <li role="menuitem"><a href="/users/<%=current_user.uuid%>/profile" role="menuitem"><i class="fa fa-lg fa-user fa-fw"></i> Manage profile</a></li>
                 <% end %>
index a629eb75fda0a8140e00d411543528e05765d4ae..fac2d88c50586e844754a2016bfd9d04dfde72b5 100644 (file)
@@ -1 +1,5 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
 <div data-mount-mithril="TestComponent"></div>
index 389044f92fc2a3ab19337ae95a0f3815950c4314..f3cb3cf5cae7d25bde0ed590a0f81c45a37447f3 100644 (file)
@@ -25,6 +25,11 @@ SPDX-License-Identifier: AGPL-3.0 %>
         <%= link_to 'Retry', (params[:return_to] || '/'), class: 'btn btn-primary' %>
 
       </p>
+
+      <p>
+       Already have an account with a different login?  <a href="/users/link_account">Link this login to your existing account.</a>
+      </p>
+
     </div>
   </div>
 </div>
diff --git a/apps/workbench/app/views/users/link_account.html.erb b/apps/workbench/app/views/users/link_account.html.erb
new file mode 100644 (file)
index 0000000..86a0446
--- /dev/null
@@ -0,0 +1,112 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= javascript_tag do %>
+  function update_visibility() {
+    if (sessionStorage.getItem('link_account_api_token') &&
+      sessionStorage.getItem('link_account_uuid') != '<%= Thread.current[:user].uuid %>')
+    {
+      $("#ready-to-link").css({"display": "inherit"});
+      $("#need-login").css({"display": "none"});
+
+      <% if params[:direction] == "in" %>
+      var user_a = "<b>"+sessionStorage.getItem('link_account_email')+"</b> ("+sessionStorage.getItem('link_account_username')+", "+sessionStorage.getItem('link_account_uuid')+")";
+      var user_b = "<b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid%>)";
+      var user_a_is_active = (sessionStorage.getItem('link_account_is_active') == "true");
+      var user_a_is_admin = (sessionStorage.getItem('link_account_is_admin') == "true");
+      var user_b_is_admin = <%=if Thread.current[:user].is_admin then "true" else "false" end %>;
+      <% else %>
+      var user_a = "<b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid%>)";
+      var user_b = "<b>"+sessionStorage.getItem('link_account_email')+"</b> ("+sessionStorage.getItem('link_account_username')+", "+sessionStorage.getItem('link_account_uuid')+")";
+      var user_a_is_active = <%= Thread.current[:user].is_active %>;
+      var user_a_is_admin = <%=if Thread.current[:user].is_admin then "true" else "false" end %>;
+      var user_b_is_admin = (sessionStorage.getItem('link_account_is_admin') == "true");
+      <% end %>
+
+      $("#new-user-token-input").val(sessionStorage.getItem('link_account_api_token'));
+
+      if (!user_a_is_active) {
+        $("#will-link-to").html("<p>Cannot link "+user_b+" to inactive account "+user_a+".</p>");
+        $("#link-account-submit").prop("disabled", true);
+      } else if (user_b_is_admin && !user_a_is_admin) {
+        $("#will-link-to").html("<p>Cannot link admin account "+user_b+" to non-admin account "+user_a+".</p>");
+        $("#link-account-submit").prop("disabled", true);
+      } else {
+        $("#will-link-to").html("<p>Clicking 'Link accounts' will link "+user_b+" created on <%=Thread.current[:user].created_at%> to "+
+          user_a+" created at <b>"+sessionStorage.getItem('link_account_created_at')+"</b>.</p>"+
+          "<p>After linking, logging in as "+user_b+" will log you into the same account as "+user_a+
+          ".</p>  <p>Any objects owned by "+user_b+" will be transferred to "+user_a+".</p>");
+      }
+    } else {
+      $("#ready-to-link").css({"display": "none"});
+      $("#need-login").css({"display": "inherit"});
+    }
+
+    sessionStorage.removeItem('link_account_api_token');
+    sessionStorage.removeItem('link_account_uuid');
+    sessionStorage.removeItem('link_account_email');
+    sessionStorage.removeItem('link_account_username');
+    sessionStorage.removeItem('link_account_created_at');
+    sessionStorage.removeItem('link_account_is_active');
+    sessionStorage.removeItem('link_account_is_admin');
+  };
+
+  $(window).on("load", function() {
+    update_visibility();
+  });
+
+  function do_login(dir) {
+    sessionStorage.setItem('link_account_api_token', '<%= Thread.current[:arvados_api_token] %>');
+    sessionStorage.setItem('link_account_email', '<%= Thread.current[:user].email %>');
+    sessionStorage.setItem('link_account_username', '<%= Thread.current[:user].username %>');
+    sessionStorage.setItem('link_account_uuid', '<%= Thread.current[:user].uuid %>');
+    sessionStorage.setItem('link_account_created_at', '<%= Thread.current[:user].created_at %>');
+    sessionStorage.setItem('link_account_is_active', <%= if Thread.current[:user].is_active then "true" else "false" end %>);
+    sessionStorage.setItem('link_account_is_admin', <%= if Thread.current[:user].is_admin then "true" else "false" end %>);
+    window.location.replace('<%=arvados_api_client.arvados_logout_url(return_to: arvados_api_client.arvados_login_url(return_to: "#{strip_token_from_path(request.url)}?direction="))%>'+dir);
+  }
+
+  $(document).on("click", "#link-account-in", function(e) { do_login("in"); });
+  $(document).on("click", "#link-account-out", function(e) { do_login("out"); });
+
+  $(document).on("click", "#cancel-link-accounts", function() {
+    window.location.replace('/users/link_account?api_token='+$("#new-user-token-input").val());
+  });
+<% end %>
+
+<div id="need-login" style="display: none">
+
+  <p>You are currently logged in as <b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid %>) created at <b><%= Thread.current[:user].created_at%></b></p>
+
+<p>You can link Arvados accounts.  After linking, either login will take you to the same account.</p>
+
+  <p>
+    <% if Thread.current[:user].is_active %>
+  <button class="btn btn-primary" id="link-account-in" style="margin-right: 1em">
+    <i class="fa fa-fw fa-sign-in"></i> Add another login to this account
+  </button>
+  <% end %>
+  <button class="btn btn-primary" id="link-account-out" style="margin-right: 1em">
+    <i class="fa fa-fw fa-sign-in"></i> Use this login to access another account
+  </button>
+
+</p>
+</div>
+
+<div id="ready-to-link" style="display: none">
+
+  <div id="will-link-to"></div>
+
+  <%= button_tag "Cancel", class: "btn btn-cancel pull-left", id: "cancel-link-accounts", style: "margin-right: 1em" %>
+
+  <%= form_tag do |f| %>
+    <input type="hidden" id="new-user-token-input" name="new_user_token" value="" />
+    <input type="hidden" id="new-user-token-input" name="direction" value="<%=params[:direction]%>" />
+    <%= button_tag class: "btn btn-primary", id: "link-account-submit" do %>
+      <i class="fa fa-fw fa-link"></i> Link accounts
+  <% end %>
+<% end %>
+
+</div>
+</div>
index 137bba09e8365188a9d2cb8ac5ed49bb59f1b48c..e4ec4131286dac66d9a12947ad6d0ddd6bbad358 100644 (file)
@@ -66,12 +66,13 @@ production:
   eager_load: true
   consider_all_requests_local: false
   action_controller.perform_caching: true
-  serve_static_assets: false
+  serve_static_files: false
   assets.compile: false
   assets.digest: true
   i18n.fallbacks: true
   active_support.deprecation: :notify
   profiling_enabled: false
+  log_level: info
 
   arvados_insecure_https: false
 
@@ -88,7 +89,7 @@ production:
 test:
   cache_classes: true
   eager_load: false
-  serve_static_assets: true
+  serve_static_files: true
   static_cache_control: public, max-age=3600
   consider_all_requests_local: true
   action_controller.perform_caching: false
@@ -200,6 +201,11 @@ common:
   # "git log".
   source_version: false
 
+  # Override the automatic package string. With the default value of
+  # false, the package string is read from package-build.version in
+  # Rails.root (included in vendor packages).
+  package_version: false
+
   # report notification to and from addresses
   issue_reporter_email_from: arvados@example.com
   issue_reporter_email_to: arvados@example.com
index cc88886a173fe95d96e78a23f0bbdd2e8b4f7718..8b656c5a7746519eaffe4fdf13a8a50a9145efd2 100644 (file)
@@ -13,7 +13,7 @@ ArvadosWorkbench::Application.configure do
   config.action_controller.perform_caching = true
 
   # Disable Rails's static asset server (Apache or nginx will already do this)
-  config.serve_static_assets = false
+  config.serve_static_files = false
 
   # Compress JavaScripts and CSS
   config.assets.js_compressor = :uglifier
index 1c790208a522f658c5a017020e59dcb2b277b200..7ce5082701274c0564dd3b22a73375a656fa08a5 100644 (file)
@@ -12,7 +12,7 @@ ArvadosWorkbench::Application.configure do
   config.cache_classes = true
 
   # Configure static asset server for tests with Cache-Control for performance
-  config.serve_static_assets = true
+  config.serve_static_files = true
   config.static_cache_control = "public, max-age=3600"
 
   # Show full error reports and disable caching
@@ -36,4 +36,7 @@ ArvadosWorkbench::Application.configure do
   # Log timing data for API transactions
   config.profiling_enabled = false
 
+  # Can be :random or :sorted. Rails 5 will use :random by default
+  config.active_support.test_order = :sorted
+
 end
index d969abd78c2b69d8de936e2a00df0c0d1f1ef0f1..718adfd2ed0583a99f8eebb221b5eae0c7d012c3 100644 (file)
@@ -65,6 +65,8 @@ ArvadosWorkbench::Application.routes.draw do
     get 'virtual_machines', :on => :member
     get 'repositories', :on => :member
     get 'ssh_keys', :on => :member
+    get 'link_account', :on => :collection
+    post 'link_account', :on => :collection, :action => :merge
   end
   get '/current_token' => 'users#current_token'
   get "/add_ssh_key_popup" => 'users#add_ssh_key_popup', :as => :add_ssh_key_popup
@@ -109,7 +111,7 @@ ArvadosWorkbench::Application.routes.draw do
     get 'tab_counts', on: :member
     get 'public', on: :collection
   end
-  
+
   resources :search do
     get 'choose', :on => :collection
   end
@@ -131,9 +133,9 @@ ArvadosWorkbench::Application.routes.draw do
   match '/_health/ping', to: 'healthcheck#ping', via: [:get]
 
   get '/tests/mithril', to: 'tests#mithril'
-  
+
   get '/status', to: 'status#status'
-  
+
   # Send unroutable requests to an arbitrary controller
   # (ends up at ApplicationController#render_not_found)
   match '*a', to: 'links#render_not_found', via: [:get, :post]
index cc4b4dee1928f0a6b278d80c9887999cbc6c2f5d..9db76e25728da4e4127ed68cc8064c1d3a4f5d8c 100644 (file)
@@ -15,6 +15,7 @@ class AppVersion
 
   def self.forget
     @hash = nil
+    @package_version = nil
   end
 
   # Return abbrev commit hash for current code version: "abc1234", or
@@ -54,4 +55,18 @@ class AppVersion
 
     @hash || "unknown"
   end
+
+  def self.package_version
+    if (cached = Rails.configuration.package_version || @package_version)
+      return cached
+    end
+
+    begin
+      @package_version = IO.read(Rails.root.join("package-build.version")).strip
+    rescue Errno::ENOENT
+      @package_version = "unknown"
+    end
+
+    @package_version
+  end
 end
index 0bcf7a19fba5a7b22de2e5100098da1b1d2c189b..45952ceba3ef46fa14378c8e3351fc79d7031df0 100644 (file)
@@ -381,7 +381,7 @@ class ApplicationControllerTest < ActionController::TestCase
       get(:show, {id: test_uuid})
       login_link = css_select(css_selector).first
       assert_not_nil(login_link, "failed to select login link")
-      login_href = URI.unescape(login_link.attributes["href"])
+      login_href = URI.unescape(login_link.attributes["href"].value)
       # The parameter needs to include the full URL to work.
       assert_includes(login_href, "://")
       assert_match(/[\?&]return_to=[^&]*\/projects\/#{test_uuid}(&|$)/,
index abe7f6af453f1c72070251ba90a889407469097a..4f3e098d5c57875a29667b7d88fcda805505c725 100644 (file)
@@ -409,7 +409,7 @@ class CollectionsControllerTest < ActionController::TestCase
     assert_equal true, files.length>0, "Expected one or more files in collection"
 
     disabled = css_select('[disabled="disabled"]').collect do |el|
-      el.attributes['title'].split[-1]
+      el.attributes['title'].value.split[-1]
     end
 
     assert_equal files.sort, disabled.sort, "Expected to see all collection files in disabled list of files"
index 61c882d9f9908ae83e22aaa6a220bfc28e8fb1f0..ada0e33e70ab5f41221389f39cce1e9e2fdf32b3 100644 (file)
@@ -32,7 +32,7 @@ class ProjectsControllerTest < ActionController::TestCase
         id: readonly_project_uuid
       }, session_for(which_user)
       buttons = css_select('[data-method=post]').select do |el|
-        el.attributes['data-remote-href'].match /project.*owner_uuid.*#{readonly_project_uuid}/
+        el.attributes['data-remote-href'].value.match /project.*owner_uuid.*#{readonly_project_uuid}/
       end
       if should_show
         assert_not_empty(buttons, "did not offer to create a subproject")
index 99e7285b3ba150eaa05d0ba0adc5809a744c832c..b81e2384c98db7975349e0ea4a4c387f95223a2a 100644 (file)
@@ -63,7 +63,7 @@ class RepositoriesControllerTest < ActionController::TestCase
       assert_response :success
 
       panes = css_select('[data-toggle=tab]').each do |pane|
-        pane_name = pane.attributes['href']
+        pane_name = pane.attributes['href'].value
         assert_includes expected_panes, pane_name
       end
     end
@@ -96,7 +96,7 @@ class RepositoriesControllerTest < ActionController::TestCase
         commit: sha1,
       }, session_for(user)
       assert_response :success
-      assert_select 'pre', h(commit)
+      assert_select 'pre', commit
     end
 
     test "show blob to #{user}" do
@@ -108,7 +108,7 @@ class RepositoriesControllerTest < ActionController::TestCase
         path: 'COPYING',
       }, session_for(user)
       assert_response :success
-      assert_select 'pre', h(filedata)
+      assert_select 'pre', filedata
     end
   end
 
index ce9282ff77d1e69450c864d8a09f70ec2d8637e7..50b35021c093f23facb414667e74b84890f311b0 100644 (file)
@@ -69,14 +69,14 @@ class UsersControllerTest < ActionController::TestCase
       if username == 'admin'
         assert_match /<a href="\/projects\/#{admin_user['uuid']}">Home<\/a>/, @response.body
         assert_match /<a href="\/projects\/#{active_user['uuid']}">Home<\/a>/, @response.body
-        assert_match /href="\/users\/#{admin_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
-        assert_match /href="\/users\/#{active_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_match /href="\/users\/#{admin_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_match /href="\/users\/#{active_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
         assert_includes @response.body, admin_user['email']
         assert_includes @response.body, active_user['email']
       else
         refute_match  /Home<\/a>/, @response.body
-        refute_match /href="\/users\/#{admin_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
-        assert_match /href="\/users\/#{active_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        refute_match /href="\/users\/#{admin_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_match /href="\/users\/#{active_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
         assert_includes @response.body, active_user['email']
       end
     end
index 443130a4a92c60cd6a46a4f4ca749d9712a5a7f9..9aa868c2b8b90ee2dab6a1bbf94dae39d305df96 100644 (file)
@@ -88,7 +88,7 @@ class CollectionsTest < ActionDispatch::IntegrationTest
         link
       end
     end
-    assert_equal(['foo'], hrefs.compact.sort,
+    assert_equal(['./foo'], hrefs.compact.sort,
                  "download page did provide strictly file links")
     click_link "foo"
     assert_text "foo\nfile\n"
diff --git a/apps/workbench/test/integration/link_account_test.rb b/apps/workbench/test/integration/link_account_test.rb
new file mode 100644 (file)
index 0000000..9c22f5a
--- /dev/null
@@ -0,0 +1,172 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'webrick'
+
+class LinkAccountTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  def start_sso_stub token
+    port = available_port('sso_stub')
+
+    s = WEBrick::HTTPServer.new(
+      :Port => port,
+      :BindAddress => 'localhost',
+      :Logger => WEBrick::Log.new('/dev/null', WEBrick::BasicLog::DEBUG),
+      :AccessLog => [nil,nil]
+    )
+
+    s.mount_proc("/login"){|req, res|
+      res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, req.query["return_to"] + "&api_token=#{token}")
+      s.shutdown
+    }
+
+    s.mount_proc("/logout"){|req, res|
+      res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, req.query["return_to"])
+    }
+
+    Thread.new do
+      s.start
+    end
+
+    "http://localhost:#{port}/"
+  end
+
+  test "Add another login to this account" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['project_viewer_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Add another login to this account").click
+
+    find("#notifications-menu").click
+    assert_text "project-viewer@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Use this login to access another account" do
+    visit page_with_token('project_viewer_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "project-viewer@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Link login of inactive user to this account" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Add another login to this account").click
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Cannot link to inactive user" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    assert_text "Cannot link active-user@arvados.local"
+
+    assert find("#link-account-submit")['disabled']
+
+    find("button", text: "Cancel").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Inactive user can link to active account" do
+    visit page_with_token('inactive_uninvited_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    assert_text "Already have an account with a different login?"
+
+    find("a", text: "Link this login to your existing account").click
+
+    assert_no_text "Add another login to this account"
+
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Admin cannot link to non-admin" do
+    visit page_with_token('admin_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "admin@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    assert_text "Cannot link admin account admin@arvados.local"
+
+    assert find("#link-account-submit")['disabled']
+
+    find("button", text: "Cancel").click
+
+    find("#notifications-menu").click
+    assert_text "admin@arvados.local"
+  end
+
+end
index ef2779cc3e78eedb556ce2dc7114a6e2466112b0..33e50087e77d127e9c30991860b92315aade3d33 100644 (file)
@@ -221,6 +221,8 @@ class ActionDispatch::IntegrationTest
     end
     if Capybara.current_driver == :selenium
       page.execute_script("window.localStorage.clear()")
+    else
+      page.driver.restart if defined?(page.driver.restart)
     end
     Capybara.reset_sessions!
   end
index 60dadec61d86fc74b3ea6769c48248e709643252..2fd926ff18d6d6f555927df43d6764e9dbea3099 100644 (file)
@@ -177,38 +177,14 @@ class ApiServerForTests
   end
 
   def run_test_server
-    env_script = nil
     Dir.chdir PYTHON_TESTS_DIR do
-      # These are no-ops if we're running within run-tests.sh (except
-      # that we do get a useful env_script back from "start", even
-      # though it doesn't need to start up a new server).
-      env_script = check_output %w(python ./run_test_server.py start --auth admin)
-      check_output %w(python ./run_test_server.py start_arv-git-httpd)
-      check_output %w(python ./run_test_server.py start_keep-web)
-      check_output %w(python ./run_test_server.py start_nginx)
-      # This one isn't a no-op, even under run-tests.sh.
       check_output %w(python ./run_test_server.py start_keep)
     end
-    test_env = {}
-    env_script.each_line do |line|
-      line = line.chomp
-      if 0 == line.index('export ')
-        toks = line.sub('export ', '').split '=', 2
-        $stderr.puts "run_test_server.py: #{toks[0]}=#{toks[1]}"
-        test_env[toks[0]] = toks[1]
-      end
-    end
-    test_env
   end
 
   def stop_test_server
     Dir.chdir PYTHON_TESTS_DIR do
       check_output %w(python ./run_test_server.py stop_keep)
-      # These are no-ops if we're running within run-tests.sh
-      check_output %w(python ./run_test_server.py stop_nginx)
-      check_output %w(python ./run_test_server.py stop_arv-git-httpd)
-      check_output %w(python ./run_test_server.py stop_keep-web)
-      check_output %w(python ./run_test_server.py stop)
     end
     @@server_is_running = false
   end
@@ -223,9 +199,9 @@ class ApiServerForTests
       stop_test_server
     end
 
-    test_env = run_test_server
-    $application_config['arvados_login_base'] = "https://#{test_env['ARVADOS_API_HOST']}/login"
-    $application_config['arvados_v1_base'] = "https://#{test_env['ARVADOS_API_HOST']}/arvados/v1"
+    run_test_server
+    $application_config['arvados_login_base'] = "https://#{ENV['ARVADOS_API_HOST']}/login"
+    $application_config['arvados_v1_base'] = "https://#{ENV['ARVADOS_API_HOST']}/arvados/v1"
     $application_config['arvados_insecure_host'] = true
     ActiveSupport::TestCase.reset_application_config
 
index e1e5063f738b1c9c092a166f7877bc7525964254..9393c1accec46a4e6e3bb6cb35df3cd4bef54bf6 100755 (executable)
@@ -22,10 +22,8 @@ EOF
 set -e
 
 if [[ -z "$WORKSPACE" ]] ; then
-    echo "$helpmessage"
-    echo
-    echo "Must set WORKSPACE"
-    exit 1
+    export WORKSPACE=$(readlink -f $(dirname $0)/..)
+    echo "Using WORKSPACE $WORKSPACE"
 fi
 
 if [[ -z "$ARVADOS_API_HOST" || -z "$ARVADOS_API_TOKEN" ]] ; then
index da7c0305c77bcdba3493dae43d0b3081fd8fd417..ef6407031c41f286d47ad08e573f3020a48bf9e9 100644 (file)
@@ -3,49 +3,50 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 #distribution(s)|name|version|iteration|type|architecture|extra fpm arguments
-debian8,debian9,ubuntu1204,centos7|python-gflags|2.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|apache-libcloud|2.3.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|rsa|3.4.2|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|uritemplate|3.0.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
-debian8,debian9,ubuntu1204,centos7|ws4py|0.3.5|2|python|all
-debian8,debian9,ubuntu1204,centos7|pykka|1.2.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|six|1.10.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.3|3|python|amd64
-debian8,debian9,ubuntu1204,centos7|pycrypto|2.6.1|3|python|amd64
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|llfuse|1.2|3|python|amd64
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|pycurl|7.19.5.3|3|python|amd64
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|pyyaml|3.12|2|python|amd64
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|rdflib|4.2.2|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|shellescape|3.4.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|mistune|0.7.3|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|typing|3.5.3.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|avro|1.8.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|ruamel.ordereddict|0.4.9|2|python|amd64
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|cachecontrol|0.11.7|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|pathlib2|2.1.0|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|docker-py|1.7.2|2|python3|all
-debian8,debian9,ubuntu1204,centos7|six|1.10.0|2|python3|all
-debian8,debian9,ubuntu1204,ubuntu1404,centos7|requests|2.12.4|2|python3|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|websocket-client|0.37.0|2|python3|all
-ubuntu1204,ubuntu1404|requests|2.4.3|2|python|all
-ubuntu1204,centos7|contextlib2|0.5.4|2|python|all
-ubuntu1204,centos7|isodate|0.5.4|2|python|all
+debian8,debian9,centos7|python-gflags|2.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
+debian8,debian9,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
+debian8,debian9,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
+debian8,debian9,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|rsa|3.4.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|uritemplate|3.0.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
+debian8,debian9,centos7|ws4py|0.3.5|2|python|all
+debian8,debian9,centos7|pykka|1.2.1|2|python|all
+debian8,debian9,ubuntu1404,centos7|six|1.10.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.6|3|python|amd64
+debian8,debian9,centos7|pycrypto|2.6.1|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|llfuse|1.2|3|python|amd64
+debian8,debian9,ubuntu1404,centos7|pycurl|7.19.5.3|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|pyyaml|3.12|2|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|rdflib|4.2.2|2|python|all
+debian8,debian9,ubuntu1404,centos7|shellescape|3.4.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|mistune|0.7.3|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|typing|3.5.3.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|avro|1.8.1|2|python|all
+debian8,debian9,ubuntu1404,centos7|ruamel.ordereddict|0.4.9|2|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|cachecontrol|0.11.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|pathlib2|2.3.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|scandir|1.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|docker-py|1.7.2|2|python3|all
+debian8,debian9,centos7|six|1.10.0|2|python3|all
+debian8,debian9,ubuntu1404,centos7|requests|2.12.4|2|python3|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|websocket-client|0.37.0|2|python3|all
+ubuntu1404|requests|2.4.3|2|python|all
+centos7|contextlib2|0.5.4|2|python|all
+centos7|isodate|0.5.4|2|python|all
 centos7|python-daemon|2.1.2|1|python|all
 centos7|pbr|0.11.1|2|python|all
 centos7|pyparsing|2.1.10|2|python|all
 centos7|keepalive|0.5|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
-debian8,ubuntu1404,centos7|subprocess32|3.2.7|2|python|all
-all|ruamel.yaml|0.13.7|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
-all|cwltest|1.0.20180209171722|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32'
-all|junit-xml|1.7|3|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
+debian8,debian9,ubuntu1404,ubuntu1604,centos7|subprocess32|3.5.1|2|python|all
+all|ruamel.yaml|0.14.12|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
+all|cwltest|1.0.20180518074130|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32 >= 3.5.0'
+all|junit-xml|1.8|3|python|all
 all|rdflib-jsonld|0.4.0|2|python|all
 all|futures|3.0.5|2|python|all
 all|future|0.16.0|2|python|all
 all|future|0.16.0|2|python3|all
+all|mypy-extensions|0.3.0|1|python|all
index f087188991c5c06a3b19a3b1e38325d9d29e5c52..2a40b50ec1f5b94c2523e293871d04005d962973 100755 (executable)
@@ -96,7 +96,7 @@ do
             | */nodemanager/doc/*.cfg \
             | */nodemanager/tests/fake*.cfg.template \
             | */nginx.conf \
-            | build/build.list)
+            | build/build.list | *.R)
             fixer=fixer
             cc="#"
             ;;
@@ -175,7 +175,7 @@ ${cc}${cc:+ }SPDX-License-Identifier: CC-BY-SA-3.0${ce}"
     wantBYSAmd="[comment]: # (Copyright Â© The Arvados Authors. All rights reserved.)
 [comment]: # ()
 [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)"
-    found=$(head -n20 "$fnm" | egrep -A${grepAfter} -B${grepBefore} 'Copyright.*Arvados' || true)
+    found=$(head -n20 "$fnm" | egrep -A${grepAfter} -B${grepBefore} 'Copyright.*All rights reserved.' || true)
     case ${fnm} in
         Makefile | build/* | lib/* | tools/* | apps/* | services/* | sdk/cli/bin/crunch-job)
             want=${wantGPL}
index 63f65ada8b19382e3940199bc9ce7841fc2a14b2..bb66c6b218c020c5d038c1e5e7b51f8681043db9 100644 (file)
@@ -2,4 +2,11 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-LIBCLOUD_PIN=2.3.0
+LIBCLOUD_PIN=2.3.1.dev1
+
+using_fork=true
+if [[ $using_fork = true ]]; then
+    LIBCLOUD_PIN_SRC="https://github.com/curoverse/libcloud/archive/apache-libcloud-$LIBCLOUD_PIN.zip"
+else
+    LIBCLOUD_PIN_SRC=""
+fi
index 396370dad7c44d6a7393ab93ac8801d559ba34af..ab1ade14deababdcc76abd11d02a99968ac0dac1 100644 (file)
@@ -28,7 +28,7 @@ ubuntu1604/generated: common-generated-all
        test -d ubuntu1604/generated || mkdir ubuntu1604/generated
        cp -rlt ubuntu1604/generated common-generated/*
 
-GOTARBALL=go1.8.3.linux-amd64.tar.gz
+GOTARBALL=go1.10.1.linux-amd64.tar.gz
 NODETARBALL=node-v6.11.2-linux-x64.tar.xz
 
 common-generated-all: common-generated/$(GOTARBALL) common-generated/$(NODETARBALL)
index c2fdfeee559a66fdd82ac5595c2281da31089c53..3a8b03f190b420a69b673780a46d434c7dad8da1 100644 (file)
@@ -17,7 +17,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.8.3.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install nodejs and npm
index 739244d467e9b420296401888d4d1ba05ac9c9fb..54267d708e2cc2ce34c603bf5048cf816c31de86 100644 (file)
@@ -19,7 +19,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.8.3.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install nodejs and npm
index a6e5e88d14514aae04870e0927e62dbc6427b817..9ade5fa27232f6613fd07199a6c8a3d9f54565ca 100644 (file)
@@ -21,7 +21,7 @@ RUN gpg --import /tmp/D39DC0E3.asc && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.8.3.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install nodejs and npm
index 55b9899e839210a92c1fa43ed7d1954ed8f0e94b..4ff47ff315bee92d127814348f621a54f64e789a 100644 (file)
@@ -8,7 +8,7 @@ MAINTAINER Ward Vandewege <ward@curoverse.com>
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip 
 
 # Install RVM
 RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
@@ -19,7 +19,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.8.3.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install nodejs and npm
index 92aee31b3604cbb235ccdce7c46156da3c1928d1..7e5701f871cb987dc581fe843b1b2f3c4a2d3b7c 100644 (file)
@@ -19,7 +19,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
 
 # Install golang binary
-ADD generated/go1.8.3.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
 RUN ln -s /usr/local/go/bin/go /usr/local/bin/
 
 # Install nodejs and npm
index 8416847433e046e3f0c2f36f5c3734f06cd4d60a..a1bc48443ea4eb48e4939d04deb8952c67220882 100644 (file)
@@ -7,9 +7,9 @@ MAINTAINER Ward Vandewege <ward@curoverse.com>
 
 ENV DEBIAN_FRONTEND noninteractive
 
-# Install RVM
+# Install dependencies and RVM
 RUN apt-get update && \
-    apt-get -y install --no-install-recommends curl ca-certificates && \
+    apt-get -y install --no-install-recommends curl ca-certificates python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip binutils build-essential ca-certificates  && \
     gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
     curl -L https://get.rvm.io | bash -s stable && \
     /usr/local/rvm/bin/rvm install 2.3 && \
diff --git a/build/package-testing/test-packages-ubuntu1204.sh b/build/package-testing/test-packages-ubuntu1204.sh
deleted file mode 120000 (symlink)
index 54ce94c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-deb-common-test-packages.sh
\ No newline at end of file
index d221844c8a0e1fd426afd7c3d6e7ea416ba0da9c..b1e99fc66b27c40d3ac13542e6f7de76ba37e202 100755 (executable)
@@ -133,7 +133,7 @@ echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_versi
 cd docker/jobs
 docker build $NOCACHE \
        --build-arg python_sdk_version=${python_sdk_version}-2 \
-       --build-arg cwl_runner_version=${cwl_runner_version}-3 \
+       --build-arg cwl_runner_version=${cwl_runner_version}-4 \
        -t arvados/jobs:$cwl_runner_version .
 
 ECODE=$?
index 31a546fd350d60cf65bdb0237f93f2b17335eede..900a5e25efc6ab024640be8950889a4ec2ac2152 100755 (executable)
@@ -219,7 +219,9 @@ if [[ -n "$test_packages" ]]; then
         fi
         echo
         echo "START: $p test on $IMAGE" >&2
-        if docker run --rm \
+        # ulimit option can be removed when debian8 and ubuntu1404 are retired
+        if docker run --ulimit nofile=4096:4096 \
+            --rm \
             "${docker_volume_args[@]}" \
             --env ARVADOS_DEBUG=$ARVADOS_DEBUG \
             --env "TARGET=$TARGET" \
@@ -245,8 +247,9 @@ else
     set +e
     mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null
     set -e
-    # Build packages
-    if docker run --rm \
+    # Build packages. ulimit option can be removed when debian8 and ubuntu1404 are retired
+    if docker run --ulimit nofile=4096:4096 \
+        --rm \
         "${docker_volume_args[@]}" \
         --env ARVADOS_BUILDING_VERSION="$ARVADOS_BUILDING_VERSION" \
         --env ARVADOS_BUILDING_ITERATION="$ARVADOS_BUILDING_ITERATION" \
index 497545dfacf7f3cb92fe11e393c581af50ebcf61..caebac013d4db721af25655a3551371c35275782 100755 (executable)
@@ -110,9 +110,6 @@ case "$TARGET" in
     debian9)
         FORMAT=deb
         ;;
-    ubuntu1204)
-        FORMAT=deb
-        ;;
     ubuntu1404)
         FORMAT=deb
         ;;
@@ -288,61 +285,16 @@ handle_python_package
     fi
 )
 
-# On older platforms we need to publish a backport of libfuse >=2.9.2,
-# and we need to build and install it here in order to even build an
-# llfuse package.
-cd $WORKSPACE/packages/$TARGET
-if [[ $TARGET =~ ubuntu1204 ]]; then
-    # port libfuse 2.9.2 to Ubuntu 12.04
-    LIBFUSE_DIR=$(mktemp -d)
-    (
-        cd $LIBFUSE_DIR
-        # download fuse 2.9.2 ubuntu 14.04 source package
-        file="fuse_2.9.2.orig.tar.xz" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
-        file="fuse_2.9.2-4ubuntu4.14.04.1.debian.tar.xz" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
-        file="fuse_2.9.2-4ubuntu4.14.04.1.dsc" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
-
-        # install dpkg-source and dpkg-buildpackage commands
-        apt-get install -y --no-install-recommends dpkg-dev
-
-        # extract source and apply patches
-        dpkg-source -x fuse_2.9.2-4ubuntu4.14.04.1.dsc
-        rm -f fuse_2.9.2.orig.tar.xz fuse_2.9.2-4ubuntu4.14.04.1.debian.tar.xz fuse_2.9.2-4ubuntu4.14.04.1.dsc
-
-        # add new version to changelog
-        cd fuse-2.9.2
-        (
-            echo "fuse (2.9.2-5) precise; urgency=low"
-            echo
-            echo "  * Backported from trusty-security to precise"
-            echo
-            echo " -- Joshua Randall <jcrandall@alum.mit.edu>  Thu, 4 Feb 2016 11:31:00 -0000"
-            echo
-            cat debian/changelog
-        ) > debian/changelog.new
-        mv debian/changelog.new debian/changelog
-
-        # install build-deps and build
-        apt-get install -y --no-install-recommends debhelper dh-autoreconf libselinux-dev
-        dpkg-buildpackage -rfakeroot -b
-    )
-    fpm_build "$LIBFUSE_DIR/fuse_2.9.2-5_amd64.deb" fuse "Ubuntu Developers" deb "2.9.2" --iteration 5
-    fpm_build "$LIBFUSE_DIR/libfuse2_2.9.2-5_amd64.deb" libfuse2 "Ubuntu Developers" deb "2.9.2" --iteration 5
-    fpm_build "$LIBFUSE_DIR/libfuse-dev_2.9.2-5_amd64.deb" libfuse-dev "Ubuntu Developers" deb "2.9.2" --iteration 5
-    dpkg -i \
-        "$WORKSPACE/packages/$TARGET/fuse_2.9.2-5_amd64.deb" \
-        "$WORKSPACE/packages/$TARGET/libfuse2_2.9.2-5_amd64.deb" \
-        "$WORKSPACE/packages/$TARGET/libfuse-dev_2.9.2-5_amd64.deb"
-    apt-get -y --no-install-recommends -f install
-    rm -rf $LIBFUSE_DIR
-fi
-
 # Go binaries
 cd $WORKSPACE/packages/$TARGET
 export GOPATH=$(mktemp -d)
 go get github.com/kardianos/govendor
 package_go_binary cmd/arvados-client arvados-client \
     "Arvados command line tool (beta)"
+package_go_binary cmd/arvados-server arvados-server \
+    "Arvados server daemons"
+package_go_binary cmd/arvados-server arvados-controller \
+    "Arvados cluster controller daemon"
 package_go_binary sdk/go/crunchrunner crunchrunner \
     "Crunchrunner executes a command inside a container and uploads the output"
 package_go_binary services/arv-git-httpd arvados-git-httpd \
@@ -397,14 +349,14 @@ rm -rf "$WORKSPACE/sdk/cwl/build"
 arvados_cwl_runner_version=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/cwl/arvados_cwl_runner.egg-info/PKG-INFO)}
 declare -a iterargs=()
 if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then
-    arvados_cwl_runner_iteration=3
+    arvados_cwl_runner_iteration=4
     iterargs+=(--iteration $arvados_cwl_runner_iteration)
 else
     arvados_cwl_runner_iteration=
 fi
 test_package_presence ${PYTHON2_PKG_PREFIX}-arvados-cwl-runner "$arvados_cwl_runner_version" python "$arvados_cwl_runner_iteration"
 if [[ "$?" == "0" ]]; then
-  fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$arvados_cwl_runner_version" "--url=https://arvados.org" "--description=The Arvados CWL runner" --depends "${PYTHON2_PKG_PREFIX}-setuptools" "${iterargs[@]}"
+  fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$arvados_cwl_runner_version" "--url=https://arvados.org" "--description=The Arvados CWL runner" --depends "${PYTHON2_PKG_PREFIX}-setuptools" --depends "${PYTHON2_PKG_PREFIX}-subprocess32 >= 3.5.0" --depends "${PYTHON2_PKG_PREFIX}-pathlib2" --depends "${PYTHON2_PKG_PREFIX}-scandir" "${iterargs[@]}"
 fi
 
 # schema_salad. This is a python dependency of arvados-cwl-runner,
@@ -486,8 +438,30 @@ if [[ "$?" == "0" ]]; then
   fpm_build $WORKSPACE/tools/crunchstat-summary ${PYTHON2_PKG_PREFIX}-crunchstat-summary 'Curoverse, Inc.' 'python' "$crunchstat_summary_version" "--url=https://arvados.org" "--description=Crunchstat-summary reads Arvados Crunch log files and summarize resource usage" --iteration "$iteration"
 fi
 
-## if libcloud becomes our own fork see
-## https://dev.arvados.org/issues/12268#note-27
+# Forked libcloud
+if test_package_presence "$PYTHON2_PKG_PREFIX"-apache-libcloud "$LIBCLOUD_PIN" python 2
+then
+  LIBCLOUD_DIR=$(mktemp -d)
+  (
+      cd $LIBCLOUD_DIR
+      git clone $DASHQ_UNLESS_DEBUG https://github.com/curoverse/libcloud.git .
+      git checkout $DASHQ_UNLESS_DEBUG apache-libcloud-$LIBCLOUD_PIN
+      # libcloud is absurdly noisy without -q, so force -q here
+      OLD_DASHQ_UNLESS_DEBUG=$DASHQ_UNLESS_DEBUG
+      DASHQ_UNLESS_DEBUG=-q
+      handle_python_package
+      DASHQ_UNLESS_DEBUG=$OLD_DASHQ_UNLESS_DEBUG
+  )
+
+  # libcloud >= 2.3.0 now requires python-requests 2.4.3 or higher, otherwise
+  # it throws
+  #   ImportError: No module named packages.urllib3.poolmanager
+  # when loaded. We only see this problem on ubuntu1404, because that is our
+  # only supported distribution that ships with a python-requests older than
+  # 2.4.3.
+  fpm_build $LIBCLOUD_DIR "$PYTHON2_PKG_PREFIX"-apache-libcloud "" python "" --iteration 2 --depends 'python-requests >= 2.4.3'
+  rm -rf $LIBCLOUD_DIR
+fi
 
 # Python 2 dependencies
 declare -a PIP_DOWNLOAD_SWITCHES=(--no-deps)
index fb4df6a79215ea3cfa86f0bd5cfc6c9233fa8233..4b18d037b6b30655714ed174fad84b665ebe7f9f 100755 (executable)
@@ -129,10 +129,7 @@ package_go_binary() {
     # Arvados SDK and the SDK has changed.
     declare -a checkdirs=(vendor)
     if grep -qr git.curoverse.com/arvados .; then
-        checkdirs+=(sdk/go)
-        if [[ "$prog" -eq "crunch-dispatch-slurm" ]]; then
-          checkdirs+=(lib/dispatchcloud)
-        fi
+        checkdirs+=(sdk/go lib)
     fi
     for dir in ${checkdirs[@]}; do
         cd "$GOPATH/src/git.curoverse.com/arvados.git/$dir"
index b89c8d9e5bc51c6f0acd1b1938ad1cd540192f15..636c0306ca94a7948b1e4a63302ae467ca7aea37 100755 (executable)
@@ -70,9 +70,11 @@ apps/workbench_integration (*)
 apps/workbench_benchmark
 apps/workbench_profile
 cmd/arvados-client
+cmd/arvados-server
 doc
 lib/cli
 lib/cmd
+lib/controller
 lib/crunchstat
 lib/dispatchcloud
 services/api
@@ -182,8 +184,8 @@ sanity_checks() {
     echo -n 'go: '
     go version \
         || fatal "No go binary. See http://golang.org/doc/install"
-    [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 8 ]] \
-        || fatal "Go >= 1.8 required. See http://golang.org/doc/install"
+    [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 10 ]] \
+        || fatal "Go >= 1.10 required. See http://golang.org/doc/install"
     echo -n 'gcc: '
     gcc --version | egrep ^gcc \
         || fatal "No gcc. Try: apt-get install build-essential"
@@ -246,9 +248,9 @@ sanity_checks() {
     if [[ "$NEED_SDK_R" = true ]]; then
       # R SDK stuff
       echo -n 'R: '
-      which R || fatal "No R. Try: apt-get install r-base"
+      which Rscript || fatal "No Rscript. Try: apt-get install r-base"
       echo -n 'testthat: '
-      R -q -e "library('testthat')" || fatal "No testthat. Try: apt-get install r-cran-testthat"
+      Rscript -e "library('testthat')" || fatal "No testthat. Try: apt-get install r-cran-testthat"
       # needed for roxygen2, needed for devtools, needed for R sdk
       pkg-config --exists libxml-2.0 || fatal "No libxml2. Try: apt-get install libxml2-dev"
       # needed for pkgdown, builds R SDK doc pages
@@ -270,6 +272,8 @@ declare -a failures
 declare -A skip
 declare -A testargs
 skip[apps/workbench_profile]=1
+# nodemanager_integration tests are not reliable, see #12061.
+skip[services/nodemanager_integration]=1
 
 while [[ -n "$1" ]]
 do
@@ -345,15 +349,19 @@ start_services() {
        rm -f "$WORKSPACE/tmp/api.pid"
     fi
     cd "$WORKSPACE" \
-        && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
+        && eval $(python sdk/python/tests/run_test_server.py start --auth admin || echo fail=1) \
         && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
         && export ARVADOS_TEST_API_INSTALLED="$$" \
+        && python sdk/python/tests/run_test_server.py start_controller \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
         && python sdk/python/tests/run_test_server.py start_ws \
-        && python sdk/python/tests/run_test_server.py start_nginx \
+        && eval $(python sdk/python/tests/run_test_server.py start_nginx || echo fail=1) \
         && (env | egrep ^ARVADOS)
+    if [[ -n "$fail" ]]; then
+       return 1
+    fi
 }
 
 stop_services() {
@@ -367,6 +375,7 @@ stop_services() {
         && python sdk/python/tests/run_test_server.py stop_ws \
         && python sdk/python/tests/run_test_server.py stop_keep-web \
         && python sdk/python/tests/run_test_server.py stop_keep_proxy \
+        && python sdk/python/tests/run_test_server.py stop_controller \
         && python sdk/python/tests/run_test_server.py stop
 }
 
@@ -404,6 +413,8 @@ do
     fi
 done
 
+rm -vf "${WORKSPACE}/tmp/*.log"
+
 setup_ruby_environment() {
     if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
         source "$HOME/.rvm/scripts/rvm"
@@ -489,6 +500,8 @@ setup_virtualenv() {
     local venvdest="$1"; shift
     if ! [[ -e "$venvdest/bin/activate" ]] || ! [[ -e "$venvdest/bin/pip" ]]; then
         virtualenv --setuptools "$@" "$venvdest" || fatal "virtualenv $venvdest failed"
+    elif [[ -n "$short" ]]; then
+        return
     fi
     if [[ $("$venvdest/bin/python" --version 2>&1) =~ \ 3\.[012]\. ]]; then
         # pip 8.0.0 dropped support for python 3.2, e.g., debian wheezy
@@ -506,64 +519,59 @@ export PERLLIB="$PERLINSTALLBASE/lib/perl5:${PERLLIB:+$PERLLIB}"
 export R_LIBS
 
 export GOPATH
-mkdir -p "$GOPATH/src/git.curoverse.com"
-rmdir -v --parents --ignore-fail-on-non-empty "$GOPATH/src/git.curoverse.com/arvados.git/tmp/GOPATH"
-for d in \
-    "$GOPATH/src/git.curoverse.com/arvados.git/arvados.git" \
-    "$GOPATH/src/git.curoverse.com/arvados.git"; do
-    [[ -d "$d" ]] && rmdir "$d"
-    [[ -h "$d" ]] && rm "$d"
-done
-ln -vsnfT "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git" \
-    || fatal "symlink failed"
-go get -v github.com/kardianos/govendor \
-    || fatal "govendor install failed"
-cd "$GOPATH/src/git.curoverse.com/arvados.git" \
-    || fatal
-# Remove cached source dirs in workdir. Otherwise, they won't qualify
-# as +missing or +external below, and we won't be able to detect that
-# they're missing from vendor/vendor.json.
-rm -r vendor/*/
-go get -v -d ...
-"$GOPATH/bin/govendor" sync \
-    || fatal "govendor sync failed"
-[[ -z $("$GOPATH/bin/govendor" list +unused +missing +external | tee /dev/stderr) ]] \
-    || fatal "vendor/vendor.json has unused or missing dependencies -- try:
-* govendor remove +unused
-* govendor add +missing +external
-"
-cd "$WORKSPACE"
-
+(
+    set -e
+    mkdir -p "$GOPATH/src/git.curoverse.com"
+    rmdir -v --parents --ignore-fail-on-non-empty "${temp}/GOPATH"
+    if [[ ! -h "$GOPATH/src/git.curoverse.com/arvados.git" ]]; then
+        for d in \
+            "$GOPATH/src/git.curoverse.com/arvados.git/tmp/GOPATH" \
+                "$GOPATH/src/git.curoverse.com/arvados.git/tmp" \
+                "$GOPATH/src/git.curoverse.com/arvados.git"; do
+            [[ -d "$d" ]] && rmdir "$d"
+        done
+    fi
+    for d in \
+        "$GOPATH/src/git.curoverse.com/arvados.git/arvados" \
+        "$GOPATH/src/git.curoverse.com/arvados.git"; do
+        [[ -h "$d" ]] && rm "$d"
+    done
+    ln -vsfT "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git"
+    go get -v github.com/kardianos/govendor
+    cd "$GOPATH/src/git.curoverse.com/arvados.git"
+    if [[ -n "$short" ]]; then
+        go get -v -d ...
+        "$GOPATH/bin/govendor" sync
+    else
+        # Remove cached source dirs in workdir. Otherwise, they will
+        # not qualify as +missing or +external below, and we won't be
+        # able to detect that they're missing from vendor/vendor.json.
+        rm -rf vendor/*/
+        go get -v -d ...
+        "$GOPATH/bin/govendor" sync
+        [[ -z $("$GOPATH/bin/govendor" list +unused +missing +external | tee /dev/stderr) ]] \
+            || fatal "vendor/vendor.json has unused or missing dependencies -- try:
+
+(export GOPATH=\"${GOPATH}\"; cd \$GOPATH/src/git.curoverse.com/arvados.git && \$GOPATH/bin/govendor add +missing +external && \$GOPATH/bin/govendor remove +unused)
+
+";
+    fi
+) || fatal "Go setup failed"
 
 setup_virtualenv "$VENVDIR" --python python2.7
 . "$VENVDIR/bin/activate"
 
 # Needed for run_test_server.py which is used by certain (non-Python) tests.
-pip freeze 2>/dev/null | egrep ^PyYAML= \
-    || pip install --no-cache-dir PyYAML >/dev/null \
+pip install --no-cache-dir PyYAML \
     || fatal "pip install PyYAML failed"
 
-# Preinstall libcloud, because nodemanager "pip install"
-# won't pick it up by default.
-pip freeze 2>/dev/null | egrep ^apache-libcloud==$LIBCLOUD_PIN \
-    || pip install --pre --ignore-installed --no-cache-dir apache-libcloud>=$LIBCLOUD_PIN >/dev/null \
-    || fatal "pip install apache-libcloud failed"
-
-# We need an unreleased (as of 2017-08-17) llfuse bugfix, otherwise our fuse test suite deadlocks.
-pip freeze | grep -x llfuse==1.2.0 || (
-    set -e
-    yes | pip uninstall llfuse || true
-    cython --version || fatal "no cython; try sudo apt-get install cython"
-    cd "$temp"
-    (cd python-llfuse 2>/dev/null || git clone https://github.com/curoverse/python-llfuse)
-    cd python-llfuse
-    git checkout 620722fd990ea642ddb8e7412676af482c090c0c
-    git checkout setup.py
-    sed -i -e "s:'1\\.2':'1.2.0':" setup.py
-    python setup.py build_cython
-    python setup.py install --force
-) || fatal "llfuse fork failed"
-pip freeze | grep -x llfuse==1.2.0 || fatal "error: installed llfuse 1.2.0 but '$(pip freeze | grep llfuse)' ???"
+# Preinstall libcloud if using a fork; otherwise nodemanager "pip
+# install" won't pick it up by default.
+if [[ -n "$LIBCLOUD_PIN_SRC" ]]; then
+    pip freeze 2>/dev/null | egrep ^apache-libcloud==$LIBCLOUD_PIN \
+        || pip install --pre --ignore-installed --no-cache-dir "$LIBCLOUD_PIN_SRC" >/dev/null \
+        || fatal "pip install apache-libcloud failed"
+fi
 
 # Deactivate Python 2 virtualenv
 deactivate
@@ -608,6 +616,12 @@ then
     gem install --user-install bundler || fatal 'Could not install bundler'
 fi
 
+# Jenkins config requires that glob tmp/*.log match something. Ensure
+# that happens even if we don't end up running services that set up
+# logging.
+mkdir -p "${WORKSPACE}/tmp/" || fatal "could not mkdir ${WORKSPACE}/tmp"
+touch "${WORKSPACE}/tmp/controller.log" || fatal "could not touch ${WORKSPACE}/tmp/controller.log"
+
 retry() {
     remain="${repeat}"
     while :
@@ -644,8 +658,9 @@ do_test() {
             ;;
     esac
     if [[ -z "${skip[$suite]}" && -z "${skip[$1]}" && \
-                (-z "${only}" || "${only}" == "${suite}" || \
-                 "${only}" == "${1}") ]]; then
+              (-z "${only}" || "${only}" == "${suite}" || \
+                   "${only}" == "${1}") ||
+                  "${only}" == "${2}" ]]; then
         retry do_test_once ${@}
     else
         title "Skipping ${1} tests"
@@ -802,7 +817,7 @@ do_install sdk/ruby ruby_sdk
 install_R_sdk() {
   if [[ "$NEED_SDK_R" = true ]]; then
     cd "$WORKSPACE/sdk/R" \
-       && R --quiet --vanilla --file=install_deps.R
+       && Rscript --vanilla install_deps.R
   fi
 }
 do_install sdk/R R_sdk
@@ -863,7 +878,7 @@ install_apiserver() {
     # is a postgresql superuser.
     cd "$WORKSPACE/services/api" \
         && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
-        && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.procpid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
+        && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
 
     mkdir -p "$WORKSPACE/services/api/tmp/pids"
 
@@ -896,8 +911,10 @@ do_install services/api apiserver
 declare -a gostuff
 gostuff=(
     cmd/arvados-client
+    cmd/arvados-server
     lib/cli
     lib/cmd
+    lib/controller
     lib/crunchstat
     lib/dispatchcloud
     sdk/go/arvados
@@ -981,7 +998,7 @@ do_test sdk/ruby ruby_sdk
 test_R_sdk() {
   if [[ "$NEED_SDK_R" = true ]]; then
     cd "$WORKSPACE/sdk/R" \
-        && R --quiet --file=run_test.R
+        && Rscript --vanilla run_test.R
   fi
 }
 
index b616b54bd95ea45e7faf852b7a926a00fa9e2830..4550ae53aced128d0698891c76d95a1730cae316 100644 (file)
@@ -5,24 +5,19 @@
 package main
 
 import (
-       "fmt"
-       "io"
        "os"
-       "regexp"
-       "runtime"
 
        "git.curoverse.com/arvados.git/lib/cli"
        "git.curoverse.com/arvados.git/lib/cmd"
 )
 
 var (
-       version                = "dev"
-       cmdVersion cmd.Handler = versionCmd{}
-       handler                = cmd.Multi(map[string]cmd.Handler{
-               "-e":        cmdVersion,
-               "version":   cmdVersion,
-               "-version":  cmdVersion,
-               "--version": cmdVersion,
+       version = "dev"
+       handler = cmd.Multi(map[string]cmd.Handler{
+               "-e":        cmd.Version(version),
+               "version":   cmd.Version(version),
+               "-version":  cmd.Version(version),
+               "--version": cmd.Version(version),
 
                "copy":     cli.Copy,
                "create":   cli.Create,
@@ -61,14 +56,6 @@ var (
        })
 )
 
-type versionCmd struct{}
-
-func (versionCmd) RunCommand(prog string, args []string, _ io.Reader, stdout, _ io.Writer) int {
-       prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
-       fmt.Fprintf(stdout, "%s %s (%s)\n", prog, version, runtime.Version())
-       return 0
-}
-
 func fixLegacyArgs(args []string) []string {
        flags, _ := cli.LegacyFlagSet()
        return cmd.SubcommandToFront(args, flags)
diff --git a/cmd/arvados-server/arvados-controller.service b/cmd/arvados-server/arvados-controller.service
new file mode 100644 (file)
index 0000000..e857074
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados controller
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+EnvironmentFile=-/etc/arvados/environment
+ExecStart=/usr/bin/arvados-controller
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/cmd/arvados-server/cmd.go b/cmd/arvados-server/cmd.go
new file mode 100644 (file)
index 0000000..1af3745
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "os"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/controller"
+)
+
+var (
+       version = "dev"
+       handler = cmd.Multi(map[string]cmd.Handler{
+               "version":   cmd.Version(version),
+               "-version":  cmd.Version(version),
+               "--version": cmd.Version(version),
+
+               "controller": controller.Command,
+       })
+)
+
+func main() {
+       os.Exit(handler.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
+}
index eaa5410100992dfc8bc4482e2b6974d54b0ff8a0..079f7da27f46b52721849ae9539d6bbe4921dac0 100644 (file)
@@ -28,11 +28,60 @@ end
 file "sdk/R/arvados/index.html" do |t|
   `which R`
   if $? == 0
+    tgt = Dir.pwd
+    Dir.mkdir("sdk/R")
+    Dir.mkdir("sdk/R/arvados")
+    docfiles = []
     Dir.chdir("../sdk/R/") do
-      STDERR.puts `R --quiet --vanilla -e 'pkgdown::build_site()' 2>&1`
+      STDERR.puts `Rscript createDoc.R README.Rmd #{tgt}/sdk/R/README.md 2>&1`
+      Dir.entries("man").each do |rd|
+        if rd[-3..-1] == ".Rd"
+          htmlfile = "#{rd[0..-4]}.html"
+          `R CMD Rdconv -t html man/#{rd} > #{tgt}/sdk/R/arvados/#{htmlfile}`
+          docfiles << htmlfile
+        end
+      end
     end
     raise if $? != 0
-    cp_r("../sdk/R/docs", "sdk/R/arvados")
+
+    File.open("sdk/R/README.md", "r") do |rd|
+    File.open("sdk/R/index.html.md", "w") do |fn|
+      fn.write(<<-EOF
+---
+layout: default
+navsection: sdk
+navmenu: R
+title: "R SDK Overview"
+...
+
+#{rd.read.gsub(/^```$/, "~~~").gsub(/^```(\w)$/, "~~~\\1")}
+EOF
+              )
+      end
+    end
+
+    File.open("sdk/R/arvados/index.html.textile.liquid", "w") do |fn|
+      fn.write(<<-EOF
+---
+layout: default
+navsection: sdk
+navmenu: R
+title: "R Reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+EOF
+              )
+
+      docfiles.sort.each do |d|
+        fn.write("* \"#{d[0..-6]}\":#{d}\n")
+      end
+
+    end
   else
     puts "Warning: R not found, R documentation will not be generated".colorize(:light_red)
   end
@@ -51,8 +100,7 @@ end
 
 task :clean do
   rm_rf "sdk/python/arvados"
-  rm_rf "sdk/R/arvados"
-  rm_rf "../sdk/R/docs"
+  rm_rf "sdk/R"
 end
 
 require "zenweb/tasks"
index 011be51062e0bf2352d150531555dbde1292242d..075111d921602bb1a959a2fedaa3bbc747ebb863 100644 (file)
@@ -17,17 +17,6 @@ arvados_workbench_host: http://localhost
 exclude: ["Rakefile", "tmp", "vendor"]
 
 navbar:
-  #start:
-    #- Getting Started:
-      #- start/index.html.textile.liquid
-    #- Quickstart:
-      #- start/getting_started/publicproject.html.textile.liquid
-      #- start/getting_started/firstpipeline.html.textile.liquid
-    #- Common Use Cases:
-      #- start/getting_started/sharedata.html.textile.liquid
-    #- Next Steps:
-      #- start/getting_started/nextsteps.html.textile.liquid
-
   userguide:
     - Welcome:
       - user/index.html.textile.liquid
@@ -48,11 +37,10 @@ navbar:
       - user/tutorials/tutorial-keep-mount.html.textile.liquid
       - user/topics/keep.html.textile.liquid
       - user/topics/arv-copy.html.textile.liquid
+      - user/topics/storage-classes.html.textile.liquid
     - Running workflows at the command line:
       - user/cwl/cwl-runner.html.textile.liquid
       - user/cwl/cwl-run-options.html.textile.liquid
-      - user/topics/running-pipeline-command-line.html.textile.liquid
-      - user/topics/arv-run.html.textile.liquid
     - Working with git repositories:
       - user/tutorials/add-new-repository.html.textile.liquid
       - user/tutorials/git-arvados-guide.html.textile.liquid
@@ -62,29 +50,26 @@ navbar:
       - user/cwl/cwl-style.html.textile.liquid
       - user/cwl/cwl-extensions.html.textile.liquid
       - user/topics/arv-docker.html.textile.liquid
+    - Reference:
+      - user/topics/link-accounts.html.textile.liquid
+      - user/reference/cookbook.html.textile.liquid
+    - Arvados License:
+      - user/copying/copying.html.textile.liquid
+      - user/copying/agpl-3.0.html
+      - user/copying/LICENSE-2.0.html
+      - user/copying/by-sa-3.0.html
+    - Obsolete documentation:
+      - user/topics/running-pipeline-command-line.html.textile.liquid
+      - user/topics/arv-run.html.textile.liquid
       - user/tutorials/running-external-program.html.textile.liquid
       - user/topics/crunch-tools-overview.html.textile.liquid
       - user/tutorials/tutorial-firstscript.html.textile.liquid
       - user/tutorials/tutorial-submit-job.html.textile.liquid
       - user/topics/tutorial-parallel.html.textile.liquid
-    - Develop a web service:
-      - user/topics/arv-web.html.textile.liquid
-    - Reference:
-      - user/reference/cookbook.html.textile.liquid
       - user/topics/run-command.html.textile.liquid
       - user/reference/job-pipeline-ref.html.textile.liquid
       - user/examples/crunch-examples.html.textile.liquid
-    - Admin tools:
-      - user/topics/arvados-sync-groups.html.textile.liquid
-      - admin/change-account-owner.html.textile.liquid
-      - admin/merge-remote-account.html.textile.liquid
-    - Query the metadata database:
       - user/topics/tutorial-trait-search.html.textile.liquid
-    - Arvados License:
-      - user/copying/copying.html.textile.liquid
-      - user/copying/agpl-3.0.html
-      - user/copying/LICENSE-2.0.html
-      - user/copying/by-sa-3.0.html
   sdk:
     - Overview:
       - sdk/index.html.textile.liquid
@@ -92,10 +77,10 @@ navbar:
       - sdk/python/sdk-python.html.textile.liquid
       - sdk/python/example.html.textile.liquid
       - sdk/python/python.html.textile.liquid
-      - sdk/python/crunch-utility-libraries.html.textile.liquid
       - sdk/python/arvados-fuse.html.textile.liquid
       - sdk/python/events.html.textile.liquid
       - sdk/python/cookbook.html.textile.liquid
+      - sdk/python/crunch-utility-libraries.html.textile.liquid
     - CLI:
       - sdk/cli/install.html.textile.liquid
       - sdk/cli/index.html.textile.liquid
@@ -105,7 +90,8 @@ navbar:
       - sdk/go/index.html.textile.liquid
       - sdk/go/example.html.textile.liquid
     - R:
-      - sdk/R/index.html.textile.liquid
+      - sdk/R/index.html.md
+      - sdk/R/arvados/index.html.textile.liquid
     - Perl:
       - sdk/perl/index.html.textile.liquid
       - sdk/perl/example.html.textile.liquid
@@ -122,9 +108,6 @@ navbar:
       - api/requests.html.textile.liquid
       - api/methods.html.textile.liquid
       - api/resources.html.textile.liquid
-      - api/permission-model.html.textile.liquid
-      - api/storage.html.textile.liquid
-      - api/execution.html.textile.liquid
     - Permission and authentication:
       - api/methods/api_client_authorizations.html.textile.liquid
       - api/methods/api_clients.html.textile.liquid
@@ -155,26 +138,53 @@ navbar:
       - api/methods/humans.html.textile.liquid
       - api/methods/specimens.html.textile.liquid
       - api/methods/traits.html.textile.liquid
+  architecture:
+    - Topics:
+      - architecture/index.html.textile.liquid
+      - api/storage.html.textile.liquid
+      - api/execution.html.textile.liquid
+      - api/permission-model.html.textile.liquid
+  admin:
+    - Topics:
+      - admin/index.html.textile.liquid
+      - admin/upgrading.html.textile.liquid
+      - install/cheat_sheet.html.textile.liquid
+      - user/topics/arvados-sync-groups.html.textile.liquid
+      - admin/storage-classes.html.textile.liquid
+      - admin/activation.html.textile.liquid
+      - admin/migrating-providers.html.textile.liquid
+      - admin/merge-remote-account.html.textile.liquid
+      - admin/spot-instances.html.textile.liquid
+      - install/migrate-docker19.html.textile.liquid
   installguide:
     - Overview:
       - install/index.html.textile.liquid
     - Docker quick start:
       - install/arvbox.html.textile.liquid
+    - Arvados on Kubernetes:
+      - install/arvados-on-kubernetes.html.textile.liquid
     - Manual installation:
       - install/install-manual-prerequisites.html.textile.liquid
+      - install/install-components.html.textile.liquid
+    - Core:
       - install/install-postgresql.html.textile.liquid
-      - install/install-sso.html.textile.liquid
       - install/install-api-server.html.textile.liquid
-      - install/install-ws.html.textile.liquid
-      - install/install-arv-git-httpd.html.textile.liquid
-      - install/install-workbench-app.html.textile.liquid
-      - install/install-shell-server.html.textile.liquid
-      - install/create-standard-objects.html.textile.liquid
+    - Keep:
       - install/install-keepstore.html.textile.liquid
+      - install/configure-fs-storage.html.textile.liquid
+      - install/configure-s3-object-storage.html.textile.liquid
       - install/configure-azure-blob-storage.html.textile.liquid
       - install/install-keepproxy.html.textile.liquid
       - install/install-keep-web.html.textile.liquid
       - install/install-keep-balance.html.textile.liquid
+    - User interface:
+      - install/install-sso.html.textile.liquid
+      - install/install-workbench-app.html.textile.liquid
+      - install/install-composer.html.textile.liquid
+    - Additional services:
+      - install/install-ws.html.textile.liquid
+      - install/install-shell-server.html.textile.liquid
+      - install/install-arv-git-httpd.html.textile.liquid
     - Containers API support on SLURM:
       - install/crunch2-slurm/install-prerequisites.html.textile.liquid
       - install/crunch2-slurm/install-slurm.html.textile.liquid
@@ -186,8 +196,3 @@ navbar:
     - Jobs API support (deprecated):
       - install/install-crunch-dispatch.html.textile.liquid
       - install/install-compute-node.html.textile.liquid
-    - Helpful hints:
-      - install/copy_pipeline_from_curoverse.html.textile.liquid
-      - install/cheat_sheet.html.textile.liquid
-    - Migrating from Docker 1.9:
-      - install/migrate-docker19.html.textile.liquid
index 6eee4e0447c9715c3f88e3da07e003124ad8f001..abbe6f4c06adef5c7f8826d3e3430ea9386278e0 100644 (file)
@@ -11,3 +11,5 @@ Parameters to be passed to the container scheduler (e.g., SLURM) when running a
 table(table table-bordered table-condensed).
 |_. Key|_. Type|_. Description|_. Notes|
 |partitions|array of strings|The names of one or more compute partitions that may run this container. If not provided, the system will choose where to run the container.|Optional.|
+|preemptible|boolean|If true, the dispatcher will ask for a preemptible cloud node instance (eg: AWS Spot Instance) to run this container.|Optional. Default is false.|
+|max_run_time|integer|Maximum running time (in seconds) that this container will be allowed to run before being cancelled.|Optional. Default is 0 (no limit).|
diff --git a/doc/_includes/_events_py.liquid b/doc/_includes/_events_py.liquid
deleted file mode 100644 (file)
index 460fd42..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-import arvados
-import arvados.events
-
-# 'ev' is a dict containing the log table record describing the change.
-def on_message(ev):
-    if ev.get("event_type") == "create" and ev.get("object_kind") == "arvados#collection":
-        print "A new collection was created: %s" % ev["object_uuid"]
-
-api = arvados.api("v1")
-ws = arvados.events.subscribe(api, [], on_message)
-ws.run_forever()
diff --git a/doc/_includes/_example_sdk_go_imports.liquid b/doc/_includes/_example_sdk_go_imports.liquid
deleted file mode 100644 (file)
index 1285c4d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-import (
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-)
index 18347785cd07d018b66247af7a90807a6630e2ec..6a1a7318650ceeb0cfd83436b23c55120e759267 100644 (file)
@@ -49,3 +49,30 @@ On Red Hat-based systems, run:
 </notextile>
 
 Finally, reboot the system to make these changes effective.
+
+h2. Create a project for Docker images
+
+Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
+
+<notextile>
+<pre><code>~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
+~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
+~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$project_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+h2. Download and tag the latest arvados/jobs docker image
+
+In order to start workflows from workbench, there needs to be Docker image tagged @arvados/jobs:latest@. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.  In this example @$project_uuid@ should be the the UUID of the "Arvados Standard Docker Images" project.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-keepdocker --project-uuid $project_uuid --pull arvados/jobs latest</span>
+</code></pre></notextile>
+
+If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
index b09f9ac58a868b04083e5aa7c5dcd7426729ce65..7d96ea011a60103ab54bd39f2c6610ec165d4397 100644 (file)
@@ -20,7 +20,9 @@ SPDX-License-Identifier: CC-BY-SA-3.0
         <!--<li {% if page.navsection == 'start' %} class="active" {% endif %}><a href="{{ site.baseurl }}/start/index.html">Getting&nbsp;Started</a></li>-->
         <li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/index.html">User&nbsp;Guide</a></li>
         <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/index.html">SDKs</a></li>
+        <li {% if page.navsection == 'architecture' %} class="active" {% endif %}><a href="{{ site.baseurl }}/architecture/index.html">Architecture</a></li>
         <li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/index.html">API</a></li>
+        <li {% if page.navsection == 'admin' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/index.html">Admin</a></li>
         <li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/index.html">Install</a></li>
         <li><a href="https://arvados.org" style="padding-left: 2em">arvados.org&nbsp;&raquo;</a></li>
       </ul>
index 3cacd0977a41454ba5e9bbcab1453bff0fbaa13b..7c6d36ec46c328970e6dace938c58a6a8a10853d 100644 (file)
@@ -21,6 +21,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
     <link href="{{ site.baseurl }}/css/font-awesome.css" rel="stylesheet">
     <link href="{{ site.baseurl }}/css/carousel-override.css" rel="stylesheet">
     <link href="{{ site.baseurl }}/css/button-override.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/images.css" rel="stylesheet">
     <style>
       html {
       height:100%;
diff --git a/doc/admin/activation.html.textile.liquid b/doc/admin/activation.html.textile.liquid
new file mode 100644 (file)
index 0000000..4a08e50
--- /dev/null
@@ -0,0 +1,229 @@
+---
+layout: default
+navsection: admin
+title: User activation
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how new users are created and activated.
+
+"Browser login and management of API tokens is described here.":{{site.baseurl}}/api/tokens.html
+
+h3. Authentication
+
+After completing the authentication process, a callback is made from the SSO server to the API server, providing a user record and @identity_url@ (despite the name, this is actually an Arvados user uuid).
+
+The API server searches for a user record with the @identity_url@ supplied by the SSO.  If found, that user account will be used, unless the account has @redirect_to_user_uuid@ set, in which case it will use the user in @redirect_to_user_uuid@ instead (this is used for the "link account":{{site.baseurl}}/user/topics/link-accounts.html feature).
+
+Next, it searches by email address for a "pre-activated account.":#pre-activated
+
+If no existing user record is found, a new user object will be created.
+
+A federated user follows a slightly different flow, whereby a special token is presented and the API server verifies user's identity with the home cluster, however it also results in a user object (representing the remote user) being created.
+
+h3. User setup
+
+If @auto_setup_new_users@ is true, as part of creating the new user object, the user is immediately set up with:
+
+* @can_login@ @permission@ link going (email address &rarr; user uuid) which records @identity_url_prefix@
+* Membership in the "All users" group (can read all users, all users can see new user)
+* A new git repo and @can_manage@ permission if @auto_setup_new_users_with_repository@ is true
+* @can_login@ permission to a shell node if @auto_setup_new_users_with_vm_uuid@ is set to the uuid of a vm
+
+Otherwise, an admin must explicitly invoke "setup" on the user via workbench or the API.
+
+h3. User activation
+
+A newly created user is inactive (@is_active@ is false) by default unless @new_users_are_active@.
+
+An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read.  This implies that if @auto_setup_new_users@ is true, an "inactive" user who has been set up may still be able to do things, such as read things shared with "All users", clone and push to the git repository, or login to a VM.
+
+{% comment %}
+Maybe these services should check is_active.
+
+I believe that when this was originally designed, being able to access git and VM required an ssh key, and an inactive user could not register an ssh key because that required creating a record.  However, it is now possible to authenticate to shell VMs and http+git with just an API token.
+{% endcomment %}
+
+At this point, there are two ways a user can be activated.
+
+# An admin can set the @is_active@ field directly.  This runs @setup_on_activate@ which sets up oid_login_perm and group membership, but does not set repo or vm (even if if @auto_setup_new_users_with_repository@ and/or @auto_setup_new_users_with_vm_uuid@ are set).
+# Self-activation using the @activate@ method of the users controller.
+
+h3. User agreements
+
+The @activate@ method of the users controller checks if the user @is_invited@ and whether the user has "signed" all the user agreements.
+
+@is_invited@ is true if any of these are true:
+* @is_active@ is true
+* @new_users_are_active@ is true
+* the user account has a permission link to read the system "all users" group.
+
+User agreements are accessed by getting a listing on the @user_agreements@ endpoint.  This returns a list of collection uuids.  This is executed as a system user, so it bypasses normal read permission checks.
+
+The available user agreements are represented in the Links table as
+
+<pre>
+{
+  "link_class": "signature",
+  "name": "require",
+  "tail_uuid": "*system user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+The collection contains the user agreement text file.
+
+On workbench, it checks @is_invited@.  If true, it displays the clickthrough agreements which the user can "sign".  If @is_invited@ is false, the user ends up at the "inactive user" page.
+
+The @user_agreements/sign@ endpoint creates a Link object:
+
+<pre>
+{
+  "link_class": "signature"
+  "name": "click",
+  "tail_uuid": "*user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+This is executed as a system user, so it bypasses the restriction that inactive users cannot create objects.
+
+The @user_agreements/signatures@ endpoint returns the list of Link objects that represent signatures by the current user (created by @sign@).
+
+h3. User profile
+
+The user profile is checked by workbench after checking if user agreements need to be signed.  The requirement to fill out the user profile is not enforced by the API server.
+
+h3(#pre-activated). Pre-activate user by email address
+
+You may create a user account for a user that has not yet logged in, and identify the user by email address.
+
+1. As an admin, create a user object:
+
+<pre>
+{
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. Create a link object, where @tail_uuid@ is the user's email address, @head_uuid@ is the user object created in the previous step, and @xxxxx@ is the value of @uuid_prefix@ of the SSO server.
+
+<pre>
+{
+  "link_class": "permission",
+  "name": "can_login",
+  "tail_uuid": "email address",
+  "head_uuid: "user uuid",
+  "properties": {
+    "identity_url_prefix": "xxxxx-tpzed-"
+  }
+}
+</pre>
+
+3. When the user logs in the first time, the email address will be recognized and the user will be associated with the linked user object.
+
+h3. Pre-activate federated user
+
+1. As admin, create a user object with the @uuid@ of the federated user (this is the user's uuid on their home cluster):
+
+<pre>
+{
+  "uuid": "home1-tpzed-000000000000000",
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. When the user logs in, they will be associated with the existing user object.
+
+h3. Auto-activate federated users from trusted clusters
+
+In the API server config, configure @auto_activate_users_from@ with a list of one or more five-character cluster ids.  A federated user from one of the listed clusters which @is_active@ on the home cluster will be automatically set up and activated on this cluster.
+
+h3(#deactivating_users). Deactivating users
+
+Setting @is_active@ is not sufficient to lock out a user.  The user can call @activate@ to become active again.  Instead, use @unsetup@:
+
+* Delete oid_login_perms
+* Delete git repository permission links
+* Delete VM login permission links
+* Remove from "All users" group
+* Delete any "signatures"
+* Clear preferences / profile
+* Mark as inactive
+
+{% comment %}
+Does not revoke @is_admin@, so you can't unsetup an admin unless you turn admin off first.
+
+"inactive" does not prevent user from reading things they previously had access to.
+
+Does not revoke API tokens.
+{% endcomment %}
+
+h3. Activation flows
+
+h4. Private instance
+
+Policy: users must be manually approved.
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+</pre>
+
+# User is created.  Not set up.  @is_active@ is false.
+# Workbench checks @is_invited@ and finds it is false.  User gets "inactive user" page.
+# Admin goes to user page and clicks either "setup user" or manually @is_active@ to true.
+# Clicking "setup user" sets up the user.  This includes adding the user to "All users" which qualifies the user as @is_invited@.
+# On refreshing workbench, the user is still inactive, but is able to self-activate after signing clickthrough agreements (if any).
+# Alternately, directly setting @is_active@ to true also sets up the user, but workbench won't display clickthrough agreements (because the user is already active).
+
+h4. Federated instance
+
+Policy: users from other clusters in the federation are activated, users from outside the federation must be manually approved
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+auto_activate_users_from: [home1]
+</pre>
+
+# Federated user arrives claiming to be from cluster 'home1'
+# API server authenticates user as being from cluster 'home1'
+# Because 'home1' is in @auto_activate_users_from@ the user is set up and activated.
+# User can immediately start using workbench.
+
+h4. Open instance
+
+Policy: anybody who shows up and signs the agreements is activated.
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: false
+</pre>
+
+# User is created and auto-setup.  At this point, @is_active@ is false, but user has been added to "All users" group.
+# Workbench checks @is_invited@ and finds it is true, because the user is a member of "All users" group.
+# Workbench presents user with list of user agreements, user reads and clicks "sign" for each one.
+# Workbench tries to activate user.
+# User is activated.
+
+h4. Developer instance
+
+Policy: avoid wasting developer's time during development/testing
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: true
+</pre>
+
+# User is created, immediately auto-setup, and auto-activated.
+# User can immediately start using workbench.
diff --git a/doc/admin/change-account-owner.html.textile.liquid b/doc/admin/change-account-owner.html.textile.liquid
deleted file mode 100644 (file)
index d48572b..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: default
-navsection: userguide
-title: "Changing account ownership"
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-It is sometimes necessary to reassign an existing Arvados user account to a new Google account.
-
-Examples:
-* A user’s email address has changed from <code>person@old.example.com</code> to <code>person@new.example.com</code>.
-* A user who used to authenticate via LDAP is switching to Google login.
-
-This can be done by an administrator using Arvados APIs.
-
-First, determine the user’s existing UUID, e.g., @aaaaa-tpzed-abcdefghijklmno@.
-
-Ensure the new email address is not already associated with a different Arvados account. If it is, disassociate it by clearing that account’s @identity_url@ and @email@ fields.
-
-Clear the @identity_url@ field of the existing user record.
-
-Create a Link object with the following attributes (where @tail_uuid@ is the new email address, and @head_uuid@ is the existing user UUID):
-
-<notextile>
-<pre><code>{
-  "link_class":"permission",
-  "name":"can_login",
-  "tail_uuid":"<span class="userinput">person@new.example.com</span>",
-  "head_uuid":"<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
-  "properties":{
-    "identity_url_prefix":"https://www.google.com/"
-  }
-}
-</code></pre>
-</notextile>
-
-Have the user log in using their <code>person@new.example.com</code> Google account. You can verify this by checking that the @identity_url@ field has been populated.
diff --git a/doc/admin/index.html.textile.liquid b/doc/admin/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..97549ae
--- /dev/null
@@ -0,0 +1,13 @@
+---
+layout: default
+navsection: admin
+title: "Arvados admin overview"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This section describes how to administer an Arvados cluster.  Cluster admins should already be familiar with the "Arvados architecture.":{{site.baseurl}}/architecture/index.html  For instructions on installing and configuring an Arvados cluster, see the "install guide.":{{site.baseurl}}/install/index.html
index 1ce35e9d4f85ac3a24cffa028ca3025b6ea703e6..b69730c930e0d5ab50ecf57a3e5d285c3dde8fdb 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
-navsection: userguide
-title: "Merging a remote account"
+navsection: admin
+title: "Migrating a user to a federated account"
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
diff --git a/doc/admin/migrating-providers.html.textile.liquid b/doc/admin/migrating-providers.html.textile.liquid
new file mode 100644 (file)
index 0000000..9231dc2
--- /dev/null
@@ -0,0 +1,41 @@
+---
+layout: default
+navsection: admin
+title: "Migrating account providers"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to enable users to use more than one provider to log into the same Arvados account.  This can be used to migrate account providers, for example, from LDAP to Google.  In order to do this, users must be able to log into both the "old" and "new" providers.
+
+h2. Configure multiple providers in SSO
+
+In @application.yml@ for the SSO server, enable both @google_oauth2@ and @ldap@ providers:
+
+<pre>
+production:
+  google_oauth2_client_id: abcd
+  google_oauth2_client_secret: abcd
+
+  use_ldap:
+    title: Example LDAP
+    host: ldap.example.com
+    port: 636
+    method: ssl
+    base: "ou=Users, dc=example, dc=com"
+    uid: uid
+    username: uid
+</pre>
+
+Restart the SSO server after changing the configuration.
+
+h2. Link accounts
+
+Instruct users to go through the process of "linking accounts":{{site.baseurl}}/user/topics/link-accounts.html
+
+After linking accounts, users can use the new provider to access their existing Arvados account.
+
+Once all users have migrated, the old account provider can be removed from the SSO configuration.
diff --git a/doc/admin/spot-instances.html.textile.liquid b/doc/admin/spot-instances.html.textile.liquid
new file mode 100644 (file)
index 0000000..1c61b60
--- /dev/null
@@ -0,0 +1,78 @@
+---
+layout: default
+navsection: admin
+title: Using AWS Spot instances
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to set up the system to take advantage of "Amazon's EC2 spot instances":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html.
+
+h3. Nodemanager
+
+Nodemanager should have configured cloud sizes that include the @preemptible@ boolean parameter. For example, for every on-demand cloud node size, you could create a @.spot@ variant, like this:
+
+<pre>
+[Size m4.large]
+cores = 2
+scratch = 32000
+
+[Size m4.large.spot]
+cores = 2
+instance_type = m4.large
+preemptible = true
+scratch = 32000
+</pre>
+
+h3. Slurm dispatcher
+
+The @crunch-dispatch-slurm@ service needs a matching instance type configuration on @/etc/arvados/config.yml@, following the previous example:
+
+<pre>
+Clusters:
+  uuid_prefix:
+    InstanceTypes:
+    - Name: m4.large
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+    - Name: m4.large.spot
+      Preemptible: true
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+</pre>
+
+@InstanceType@ names should match those defined on nodemanager's config file because it's @crunch-dispatch-slurm@'s job to select the instance type and communicate the decision to @nodemanager@ via Slurm.
+
+h3. API Server
+
+Container requests will need the @preemptible@ scheduling parameter included, to make the dispatcher request a spot instance. The API Server configuration file includes an option that when active, will auto assign the @preemptible@ parameter to any new child container request if it doesn't have it already. To activate this feature, the following should be added to the @application.yml@ file:
+
+<pre>
+preemptible_instances: true
+</pre>
+
+With this configuration active, child container requests should include the @preemptible = false@ parameter at creation time to avoid being scheduled for spot instance usage.
+
+h3. AWS Permissions
+
+When requesting spot instances, Amazon's API may return an authorization error depending on how users and permissions are set on the account. If this is the case check nodemanager's log for:
+
+<pre>
+BaseHTTPError: AuthFailure.ServiceLinkedRoleCreationNotPermitted: The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.
+</pre>
+
+The account needs to have a service linked role created. This can be done by logging into the AWS account, go to _IAM Management_ &rarr; _Roles_ and create the @AWSServiceRoleForEC2Spot@ role by clicking on the @Create@ button, selecting @EC2@ service and @EC2 - Spot Instances@ use case.
+
+h3. Cost Tracking
+
+Amazon's Spot instances prices are declared at instance request time and defined by the maximum price that the user is willing to pay per hour. By default, this price is the same amount as the on-demand version of each instance type, and this setting is the one that nodemanager uses for now, as it doesn't include any pricing data to the spot instance request.
+
+The real price that a spot instance has at any point in time is discovered at the end of each usage hour, depending on instance demand. For this reason, AWS provides a data feed subscription to get hourly logs, as described on "Amazon's User Guide":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html.
\ No newline at end of file
diff --git a/doc/admin/storage-classes.html.textile.liquid b/doc/admin/storage-classes.html.textile.liquid
new file mode 100644 (file)
index 0000000..1a6420d
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: admin
+title: Configuring storage classes
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks.  This can be used to implement data storage policies such as moving data to archival storage.
+
+The storage classes for each volume are set in the per-volume "keepstore configuration":{{site.baseurl}}/install/install-keepstore.html
+
+<pre>
+Volumes:
+ - ... Volume configuration ...
+   #
+   # If no storage classes are specified, will use [default]
+   #
+   StorageClasses: null
+
+ - ... Volume configuration ...
+   #
+   # Specify this volume is in the "archival" storage class.
+   #
+   StorageClasses: [archival]
+
+</pre>
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
+
+h3. Using storage classes
+
+"Discussed in the user guide":{{site.baseurl}}/user/topics/storage-classes.html
+
+h3. Storage management notes
+
+The "keep-balance":{{site.baseurl}}/install/install-keep-balance.html service is responsible for deciding which blocks should be placed on which keepstore volumes.  As part of the rebalancing behavior, it will determine where a block should go in order to satisfy the desired storage classes, and issue pull requests to copy the block from its original volume to the desired volume.  The block will subsequently be moved to trash on the original volume.
+
+If a block appears in multiple collections with different storage classes, the block will be stored in separate volumes for each storage class, even if that results in overreplication, unless there is a volume which has all the desired storage classes.
+
+If a collection has a desired storage class which is not available in any keepstore volume, the collection's blocks will remain in place, and an error will appear in the @keep-balance@ logs.
+
+This feature does not provide a hard guarantee on where data will be stored.  Data may be written to default storage and moved to the desired storage class later.  If controlling data locality is a hard requirement (such as legal restrictions on the location of data) we recommend setting up multiple Arvados clusters.
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
new file mode 100644 (file)
index 0000000..7a330a9
--- /dev/null
@@ -0,0 +1,250 @@
+---
+layout: default
+navsection: admin
+title: "Upgrading Arvados and Release notes"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+What you need to know and do in order to upgrade your Arvados installation.
+
+h2. General process
+
+# Wait for the cluster to be idle and stop Arvados services.
+# Install new packages using @apt-get upgrade@ or @yum upgrade@.
+# Package installation scripts will perform any necessary data migrations.
+# Consult upgrade notes below to see if any manual configuration updates are necessary.
+# Restart Arvados services.
+
+h2. Upgrade notes
+
+Some versions introduce changes that require special attention when upgrading: e.g., there is a new service to install, or there is a change to the default configuration that you might need to override in order to preserve the old behavior.
+
+{% comment %}
+Note to developers: Add new items at the top. Include the date, issue number, commit, and considerations/instructions for those about to upgrade.
+
+TODO: extract this information based on git commit messages and generate changelogs / release notes automatically.
+{% endcomment %}
+
+h3. 2018-04-05: v1.1.4 regression in arvados-cwl-runner for workflows that rely on implicit discovery of secondaryFiles
+
+h4. Secondary files missing from toplevel workflow inputs
+
+If a workflow input does not declare @secondaryFiles@ corresponding to the @secondaryFiles@ of workflow steps which use the input, the workflow would inconsistently succeed or fail depending on whether the input values were specified as local files or referenced an existing collection (and whether the existing collection contained the secondary files or not).  To ensure consistent behavior, the workflow is now required to declare in the top level workflow inputs any secondaryFiles that are expected by workflow steps.
+
+As an example, the following workflow will fail because the @toplevel_input@ does not declare the @secondaryFiles@ that are expected by @step_input@:
+
+<pre>
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  toplevel_input: File
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
+</pre>
+
+When run, this produces an error like this:
+
+<pre>
+cwltool ERROR: [step step1] Cannot make job: Missing required secondary file 'hello.txt.idx' from file object: {
+    "basename": "hello.txt",
+    "class": "File",
+    "location": "keep:ade9d0e032044bd7f58daaecc0d06bc6+51/hello.txt",
+    "size": 0,
+    "nameroot": "hello",
+    "nameext": ".txt",
+    "secondaryFiles": []
+}
+</pre>
+
+To fix this error, add the appropriate @secondaryFiles@ section to @toplevel_input@
+
+<notextile>
+<pre><code>class: Workflow
+cwlVersion: v1.0
+inputs:
+  <span class="userinput">toplevel_input:
+    type: File
+    secondaryFiles:
+      - .idx</span>
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
+</code></pre>
+</notextile>
+
+h4. Secondary files on default file inputs
+
+Due to a bug in Arvados v1.1.4, @File@ inputs that have default values and also expect @secondaryFiles@ and will fail to upload default @secondaryFiles@.  As an example, the following case will fail:
+
+<pre>
+class: CommandLineTool
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    default:
+      class: File
+      location: hello.txt
+outputs: []
+baseCommand: echo
+</pre>
+
+When run, this produces an error like this:
+
+<pre>
+2018-05-03 10:58:47 cwltool ERROR: Unhandled error, try again with --debug for more information:
+  [Errno 2] File not found: u'hello.txt.idx'
+</pre>
+
+To fix this, manually upload the primary and secondary files to keep and explicitly declare @secondaryFiles@ on the default primary file:
+
+<notextile>
+<pre><code>class: CommandLineTool
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    <span class="userinput">default:
+      class: File
+      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
+      secondaryFiles:
+       - class: File
+         location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt.idx</span>
+outputs: []
+baseCommand: echo
+</code></pre>
+</notextile>
+
+This bug will be fixed in an upcoming release of Arvados.
+
+h3. 2017-12-08: #11908 commit:8f987a9271 now requires minimum of Postgres 9.4 (previously 9.3)
+* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
+* Ubuntu 16.04 (pg 9.5) does not require an upgrade
+* Ubuntu 14.04 (pg 9.3) requires upgrade to Postgres 9.4: https://www.postgresql.org/download/linux/ubuntu/
+* CentOS 7 and RHEL7 (pg 9.2) require upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html
+*# Create a database backup using @pg_dump@
+*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
+*# Restore from the backup using @psql@
+
+h3. 2017-09-25: #12032 commit:68bdf4cbb now requires minimum of Postgres 9.3 (previously 9.1)
+* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
+* Ubuntu 16.04 (pg 9.5) does not require an upgrade
+* Ubuntu 14.04 (pg 9.3) is compatible, however upgrading to Postgres 9.4 is recommended: https://www.postgresql.org/download/linux/ubuntu/
+* CentOS 7 and RHEL7 (pg 9.2) should upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html
+*# Create a database backup using @pg_dump@
+*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
+*# Restore from the backup using @psql@
+
+h3. 2017-06-30: #11807 commit:55aafbb converts old "jobs" database records from YAML to JSON, making the upgrade process slower than usual.
+* The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 commit:660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual.
+* The conversion runs as a database migration, i.e., during the deb/rpm package upgrade process, while your API server is unavailable.
+* Expect it to take about 1 minute per 20K jobs that have ever been created/run.
+
+h3. 2017-06-05: #9005 commit:cb230b0 reduces service discovery overhead in keep-web requests.
+* When upgrading keep-web _or keepproxy_ to/past this version, make sure to update API server as well. Otherwise, a bad token in a request can cause keep-web to fail future requests until either keep-web restarts or API server gets upgraded.
+
+h3. 2017-04-12: #11349 commit:2c094e2 adds a "management" http server to nodemanager.
+* To enable it, add to your configuration file: <pre>[Manage]
+  address = 127.0.0.1
+  port = 8989</pre> (see example configuration files in source:services/nodemanager/doc or https://doc.arvados.org/install/install-nodemanager.html for more info)
+* The server responds to @http://{address}:{port}/status.json@ with a summary of how many nodes are in each state (booting, busy, shutdown, etc.)
+
+h3. 2017-03-23: #10766 commit:e8cc0d7 replaces puma with arvados-ws as the recommended websocket server.
+* See http://doc.arvados.org/install/install-ws.html for install/upgrade instructions.
+* Remove the old puma server after the upgrade is complete. Example, with runit: <pre>
+$ sudo sv down /etc/sv/puma
+$ sudo rm -r /etc/sv/puma
+</pre> Example, with systemd: <pre>
+$ systemctl disable puma
+$ systemctl stop puma
+</pre>
+
+h3. 2017-03-06: #11168 commit:660a614 uses JSON instead of YAML to encode hashes and arrays in the database.
+* Aside from a slight performance improvement, this should have no externally visible effect.
+* Downgrading past this version is not supported, and is likely to cause errors. If this happens, the solution is to upgrade past this version.
+* After upgrading, make sure to restart puma and crunch-dispatch-* processes.
+
+h3. 2017-02-03: #10969 commit:74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run.
+* If your compute nodes run a version of *docker older than 1.10* you must override the default by adding to your API server configuration (@/etc/arvados/api/application.yml@): <pre><code class="yaml">docker_image_formats: ["v1"]</code></pre>
+* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue #10969 for more detail.
+* *NOTE:* This does *not* include any support for migrating existing Docker images from v1 to v2 format. This will come later: for now, sites running Docker 1.9 or earlier should still *avoid upgrading Docker further than 1.9.*
+
+h3. 2016-09-27: several Debian and RPM packages -- keep-balance (commit:d9eec0b), keep-web (commit:3399e63), keepproxy (commit:6de67b6), and arvados-git-httpd (commit:9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags (commit:3bbe1cd).
+* On Debian-based systems using systemd, services are enabled automatically when packages are installed.
+* On RedHat-based systems using systemd, unit files are installed but services must be enabled explicitly: e.g., <code>"sudo systemctl enable keep-web; sudo systemctl start keep-web"</code>.
+* The new systemd-supervised services will not start up successfully until configuration files are installed in /etc/arvados/: e.g., <code>"Sep 26 18:23:55 62751f5bb946 keep-web[74]: 2016/09/26 18:23:55 open /etc/arvados/keep-web/keep-web.yml: no such file or directory"</code>
+* To migrate from runit to systemd after installing the new packages, we recommend the following procedure:
+*# Bring down the runit service: "sv down /etc/sv/keep-web"
+*# Create a JSON configuration file (e.g., /etc/arvados/keep-web/keep-web.yml -- see "keep-web -help")
+*# Ensure the service is running correctly under systemd: "systemctl status keep-web" / "journalctl -u keep-web"
+*# Remove the runit service so it doesn't start at next boot
+* Affected services:
+** keep-balance - /etc/arvados/keep-balance/keep-balance.yml
+** keep-web - /etc/arvados/keep-web/keep-web.yml
+** keepproxy - /etc/arvados/keepproxy/keepproxy.yml
+** arvados-git-httpd - /etc/arvados/arv-git-httpd/arv-git-httpd.yml
+
+h3. 2016-05-31: commit:ae72b172c8 and commit:3aae316c25 install Python modules and scripts to different locations on the filesystem.
+* Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection).  Now they get installed to a path under @/usr@.  This improves compatibility with other Python packages provided by the distribution.  See #9242 for more background.
+* If you simply import Python modules from scripts, or call Python tools relying on $PATH, you don't need to make any changes.  If you have hardcoded full paths to some of these files (e.g., in symbolic links or configuration files), you will need to update those paths after this upgrade.
+
+h3. 2016-04-25: commit:eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows.
+* On each Debian-based compute node and shell node, run: @sudo apt-get install crunchrunner@
+* On each Red Hat-based compute node and shell node, run: @sudo yum install crunchrunner@
+
+h3. 2016-04-21: commit:3c88abd changes the Keep permission signature algorithm.
+* All software components that generate signatures must be upgraded together. These are: keepstore, API server, keep-block-check, and keep-rsync. For example, if keepstore < 0.1.20160421183420 but API server >= 0.1.20160421183420, clients will not be able to read or write data in Keep.
+* Jobs and client operations that are in progress during the upgrade (including arv-put's "resume cache") will fail.
+
+h3. 2015-01-05: commit:e1276d6e disables Workbench's "Getting Started" popup by default.
+* If you want new users to continue seeing this popup, set @enable_getting_started_popup: true@ in Workbench's @application.yml@ configuration.
+
+h3. 2015-12-03: commit:5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see #7751)
+* All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge commit:346a558
+* Jobs will fail if the API server (in particular crunch-job from the arvados-cli gem) is upgraded without upgrading arvados-fuse on compute nodes.
+
+h3. 2015-11-11: commit:1e2ace5 changes recommended config for keep-web (see #5824)
+* proxy/dns/ssl config should be updated to route "https://download.uuid_prefix.arvadosapi.com/" requests to keep-web (alongside the existing "collections" routing)
+* keep-web command line adds @-attachment-only-host download.uuid_prefix.arvadosapi.com@
+* Workbench config adds @keep_web_download_url@
+* More info on the (still beta/non-TOC-linked) "keep-web doc page":http://doc.arvados.org/install/install-keep-web.html
+
+h3. 2015-11-04: commit:1d1c6de removes stopped containers (see #7444)
+* arvados-docker-cleaner removes _all_ docker containers as soon as they exit, effectively making @docker run@ default to @--rm@. If you run arvados-docker-cleaner on a host that does anything other than run crunch-jobs, and you still want to be able to use @docker start@, read the "new doc page":http://doc.arvados.org/install/install-compute-node.html to learn how to turn this off before upgrading.
+
+h3. 2015-11-04: commit:21006cf adds a keep-web service (see #5824)
+* Nothing relies on it yet, but early adopters can install it now by following http://doc.arvados.org/install/install-keep-web.html (it is not yet linked in the TOC).
index 998874763ec3ee39334e8499100c8e0282e2e322..3c7347dd60bd8c61a75f1a77929227da4750dea4 100644 (file)
@@ -1,6 +1,6 @@
 ---
 layout: default
-navsection: api
+navsection: architecture
 title: Computing with Crunch
 ...
 {% comment %}
@@ -13,8 +13,6 @@ Crunch is the name for the Arvados system for managing computation.  It provides
 
 h2. Container API
 
-Note: although the preferred API for Arvados going forward, the Container API may not yet be available on all installations.
-
 # To submit work, create a "container request":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state.
 # The system will fufill the container request by creating or reusing a "Container object":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field.  If the same request has been submitted in the past, it may reuse an existing container.  The reuse behavior can be suppressed with @use_existing: false@ in the container request.
 # The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as SLURM).
@@ -22,7 +20,7 @@ Note: although the preferred API for Arvados going forward, the Container API ma
 # When the container associated with the container request is completed, the container request will go into the @Final@ state.
 # The @output_uuid@ field of the container request contains the uuid of output collection produced by container request.
 
-!{{site.baseurl}}/images/Crunch_dispatch.svg!
+!(full-width){{site.baseurl}}/images/Crunch_dispatch.svg!
 
 h2. Job API (deprecated)
 
index 00c120d9f8f1be4aad90022b514fe37024618dc3..937ae706d66295055ffbca485c1b587bc5c40739 100644 (file)
@@ -98,7 +98,7 @@ table(table table-bordered table-condensed).
 |@is_a@|string|Arvados object type|@["head_uuid","is_a","arvados#collection"]@|
 |@exists@|string|Test if a subproperty is present.|@["properties","exists","my_subproperty"]@|
 
-h4. Filtering on subproperties
+h4(#subpropertyfilters). Filtering on subproperties
 
 Some record type have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs.  To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@.  You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.<http://example.com/user_property>@.  Alternately you may also provide a JSON-LD "@context" field, however at this time JSON-LD contexts are not interpreted by Arvados.
 
index d753f0990f71facaa7580ac1a3bee8d1f69829a5..f761c665e57ad811085098c3145ec34ff0fd642b 100644 (file)
@@ -27,6 +27,7 @@ table(table table-bordered table-condensed).
 |_. Attribute|_. Type|_. Description|_. Example|
 |name|string|||
 |description|text|||
+|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
 |portable_data_hash|string|The MD5 sum of the manifest text stripped of block hints other than the size hint.||
 |manifest_text|text|||
 |replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@|
index 1c2550f723f5d8d96241ff12b9d5c09cf136e512..0e2e8ce7c6135490e61585594471080ce1ae1719 100644 (file)
@@ -29,7 +29,7 @@ table(table table-bordered table-condensed).
 |_. Attribute|_. Type|_. Description|_. Notes|
 |name|string|The name of the container_request.||
 |description|string|The description of the container_request.||
-|properties|hash|Client-defined structured data that does not affect how the container is run.||
+|properties|hash|User-defined metadata that does not affect how the container is run.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
 |state|string|The allowed states are "Uncommitted", "Committed", and "Final".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the "Final" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).|
 |requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.|
 |container_uuid|string|The uuid of the container that satisfies this container_request. The system may return a preexisting Container that matches the container request criteria. See "Container reuse":#container_reuse for more details.|Container reuse is the default behavior, but may be disabled with @use_existing: false@ to always create a new container.|
index 2716056caac06ca0976ccd595b0cfa89ae17d438..e87bc51ad4a590b4102fd4f1047c9b878de466a2 100644 (file)
@@ -28,6 +28,7 @@ table(table table-bordered table-condensed).
 |group_class|string|Type of group. This does not affect behavior, but determines how the group is presented in the user interface. For example, @project@ indicates that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects.|@"project"@
 null|
 |description|text|||
+|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
 |writable_by|array|List of UUID strings identifying Users and other Groups that have write permission for this Group.  Only users who are allowed to administer the Group will receive a full list.  Other users will receive a partial list that includes the Group's owner_uuid and (if applicable) their own user UUID.||
 |trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls.  May be untrashed.||
 |delete_at|datetime|If @delete_at@ is non-null and in the past, the group and all objects directly or indirectly owned by the group may be permanently deleted.||
@@ -49,7 +50,7 @@ table(table table-bordered table-condensed).
 |_. Argument |_. Type |_. Description |_. Location |_. Example |
 {background:#ccffcc}.|uuid|string|The UUID of the group in question.|path||
 |limit|integer (default 100)|Maximum number of items to return.|query||
-|order|string|Order in which to return matching items.  Sort within a resource type by prefixing the attribute with the resource name and a dot.|query|@"collections.modified_at desc"@|
+|order|array|Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. Sort within a resource type by prefixing the attribute with the resource name and a period.|query|@["collections.modified_at desc"]@|
 |filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@|
 |recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
 
index ec5d53010456bb36e239927bcdb563f1c6467e7e..04643443e680e4170df952aeb802f3dcf4eea9c7 100644 (file)
@@ -29,7 +29,7 @@ table(table table-bordered table-condensed).
 |tail_uuid|string|The origin or actor in the description or action (may be null).|
 |link_class|string|Type of link|
 |name|string|Primary value of the link.|
-|properties|hash|Additional information, expressed as a key&rarr;value hash. Key: string. Value: string, number, array, or hash.|
+|properties|hash|Additional information, expressed as a key&rarr;value hash. Key: string. Value: string, number, array, or hash.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters|
 
 h2. Link classes
 
index 1b51f01c632ff1e4387d94355f5c6138ec01504f..7ddc62519c1922ad48a254827078f7b0651065ea 100644 (file)
@@ -32,7 +32,8 @@ table(table table-bordered table-condensed).
 |job_uuid|string|The UUID of the job that this node is assigned to work on.  If you do not have permission to read the job, this will be null.||
 |first_ping_at|datetime|||
 |last_ping_at|datetime|||
-|info|hash|||
+|info|hash|Sensitive information about the node (only visible to admin) such as 'ping_secret' and 'ec2_instance_id'. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
+|properties|hash|Public information about the node, such as 'total_cpu_cores', 'total_ram_mb', and 'total_scratch_mb'.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
 
 h2. Methods
 
index 290125bd8cd7e87b7faec04dafc7edfb64bd5cc2..7ee179071aed638a04bddfc2194319c5e0cf6f6a 100644 (file)
@@ -1,6 +1,6 @@
 ---
 layout: default
-navsection: api
+navsection: architecture
 navmenu: Concepts
 title: "Permission model"
 ...
@@ -74,4 +74,4 @@ An Arvado site may be configued to allow users to browse resources without requi
 
 h2. Example
 
-!{{site.baseurl}}/images/Arvados_Permissions.svg!
+!(full-width){{site.baseurl}}/images/Arvados_Permissions.svg!
index c3ce2d6b675b7e44f7ce066835832ea68b396cf2..aa0ed21b9f86788f880721a63a28cca30e7448f8 100644 (file)
@@ -1,6 +1,6 @@
 ---
 layout: default
-navsection: api
+navsection: architecture
 title: Storage in Keep
 ...
 {% comment %}
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Keep clients are applications such as @arv-get@, @arv-put@ and @arv-mount@ which store and retrieve data from Keep.  In doing so, these programs interact with both the API server (which stores file metadata in form of Collection objects) and individual Keep servers (which store the actual data blocks).
 
-!{{site.baseurl}}/images/Keep_reading_writing_block.svg!
+!(full-width){{site.baseurl}}/images/Keep_reading_writing_block.svg!
 
 h2. Storing a file
 
@@ -23,7 +23,7 @@ h2. Storing a file
 # The client creates a "collection":{{site.baseurl}}/api/methods/collections.html and provides the @manifest_text@
 # The API server accepts the collection after validating the signed tokens (proof of knowledge) for each block.
 
-!{{site.baseurl}}/images/Keep_manifests.svg!
+!(full-width){{site.baseurl}}/images/Keep_manifests.svg!
 
 h2. Fetching a file
 
@@ -34,7 +34,7 @@ h2. Fetching a file
 # The client sends the data block request to the keep server, along with the token signature from the API which proves to Keep servers that the client is permitted to read a given block.
 # The server provides the block data after validating the token signature for the block (if the server does not have the block, it returns a 404 and the client tries the next highest priority server)
 
-!{{site.baseurl}}/images/Keep_rendezvous_hashing.svg!
+!(full-width){{site.baseurl}}/images/Keep_rendezvous_hashing.svg!
 
 Each @keep_service@ resource has an assigned uuid.  To determine priority assignments of blocks to servers, for each keep service compute the MD5 sum of the string concatenation of the block locator (hex-coded hash part only) and service uuid, then sort this list in descending order.  Blocks are preferentially placed on servers with the highest weight.
 
index 922df5ab9df5f95dbdfb2a189451d322c2e78d2f..3437003a1874dfef212c66a38a42b28999147686 100644 (file)
@@ -25,6 +25,10 @@ Browser based applications can perform log in via the following highlevel flow:
 
 The "browser authentication process is documented in detail on the Arvados wiki.":https://dev.arvados.org/projects/arvados/wiki/Workbench_authentication_process
 
+h2. User activation
+
+"Creation and activation of new users is described here.":{{site.baseurl}}/admin/activation.html
+
 h2. Creating tokens via the API
 
 The browser login method above issues a new token.  Using that token, it is possible to make API calls to create additional tokens.  To do so, use the @create@ method of the "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html resource.
diff --git a/doc/architecture/Arvados_arch.odg b/doc/architecture/Arvados_arch.odg
new file mode 100644 (file)
index 0000000..8b363c1
Binary files /dev/null and b/doc/architecture/Arvados_arch.odg differ
diff --git a/doc/architecture/index.html.textile.liquid b/doc/architecture/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..c7ea326
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: architecture
+title: "Arvados components"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+!(full-width){{site.baseurl}}/images/Arvados_arch.svg!
+
+h3. Services
+
+Located in @arvados/services@ except for Workbench which is located in @arvados/apps/workbench@.
+
+table(table table-bordered table-condensed).
+|_. Component|_. Description|
+|api|The API server is the core of Arvados.  It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.|
+|arv-git-httpd|Provides a git+http interface to Arvados-managed git repositories, with permissions and authentication based on an Arvados API token.|
+|crunch-dispatch-local|Get compute requests submitted to the API server and execute them locally.|
+|crunch-dispatch-slurm|Get compute requests submitted to the API server and submit them to slurm.|
+|crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.|
+|dockercleaner|Daemon for cleaning up Docker containers and images.|
+|fuse|Filesystem in USErspace (FUSE) filesystem driver for Keep.|
+|health|Health check proxy, contacts configured Arvados services at their health check endpoints and reports results.|
+|keep-balance|Perform storage utilization reporting, optimization and garbage collection.  Moves data blocks to their optimum location, ensures correct replication and storage class, and trashes unreferenced blocks.|
+|keepproxy|Provides low-level access to keepstore services (block-level data access) for clients outside the internal (private) network.|
+|keepstore|Provides access to underlying storage (filesystem or object storage such as Amazon S3 or Azure Blob) with Arvados permissions.|
+|keep-web|Provides high-level WebDAV access to collections (file-level data access).|
+|login-sync|Synchronize virtual machine users with Arvados users and permissions.|
+|nodemanager|Provide elastic computing by creating and destroying cloud based virtual machines on compute demand.|
+|ws|Publishes API server change events over websockets.|
+|workbench|Web application providing user interface to Arvados services.|
+
+h3. Tools
+
+The @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@, the rest are located in @arvados/tools@.
+
+table(table table-bordered table-condensed).
+|_. Component|_. Description |
+|arv|Provides command line access to API, also provides some purpose utilities.|
+|arv-copy|Copy a collection from one cluster to another|
+|arv-get|Get files from a collection.|
+|arv-keepdocker|Upload Docker images from local Docker daemon to Keep.|
+|arv-ls|List files in a collection|
+|arv-migrate-docker19|Migrate Docker images in Keep from v1 format (Docker 1.9 or earlier) to v2 format (Docker 1.10 or later)|
+|arv-normalize|Read manifest text on stdin and produce normalized manifest text on stdout.|
+|arv-put|Upload files to a collection.|
+|arv-ws|Print events from Arvados websocket event source.|
+|arvbash|Helpful @bash@ macros for using Arvados at the command line.|
+|arvbox|Dockerized Arvados environment for development and testing.|
+|crunchstat-summary|Read execution metrics (cpu %, ram, network, etc) collected from a compute container and produce a report.|
+|keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.|
+|keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.|
+|keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.|
+|sync-groups|Take a CSV file listing (group, username) pairs and synchronize membership in Arvados groups.|
diff --git a/doc/css/images.css b/doc/css/images.css
new file mode 100644 (file)
index 0000000..0bd2ec7
--- /dev/null
@@ -0,0 +1,7 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+img.full-width {
+    width: 100%
+}
diff --git a/doc/images/Arvados_arch.svg b/doc/images/Arvados_arch.svg
new file mode 100644 (file)
index 0000000..7680470
--- /dev/null
@@ -0,0 +1,514 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="280mm" height="210mm" viewBox="0 0 28000 21000" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+  <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+   <rect x="0" y="0" width="28000" height="21000"/>
+  </clipPath>
+  <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+   <rect x="28" y="21" width="27944" height="20958"/>
+  </clipPath>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_1" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="y" horiz-adv-x="1059" d="M 604,1 C 579,-64 553,-123 527,-175 500,-227 471,-272 438,-309 405,-346 369,-374 329,-394 289,-413 243,-423 191,-423 168,-423 147,-423 128,-423 109,-423 88,-420 67,-414 L 67,-279 C 80,-282 94,-284 110,-284 126,-284 140,-284 151,-284 204,-284 253,-264 298,-225 343,-186 383,-124 417,-38 L 434,5 5,1082 197,1082 425,484 C 432,466 440,442 451,412 461,382 471,352 482,322 492,292 501,265 509,241 517,217 522,202 523,196 525,203 530,218 538,240 545,261 554,285 564,312 573,339 583,366 593,393 603,420 611,444 618,464 L 830,1082 1020,1082 604,1 Z"/>
+   <glyph unicode="x" horiz-adv-x="1033" d="M 801,0 L 510,444 217,0 23,0 408,556 41,1082 240,1082 510,661 778,1082 979,1082 612,558 1002,0 801,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1535" d="M 1174,0 L 965,0 792,698 C 787,716 781,738 776,765 770,792 764,818 759,843 752,872 746,903 740,934 734,904 728,874 721,845 716,820 710,793 704,766 697,739 691,715 686,694 L 508,0 300,0 -3,1082 175,1082 358,347 C 363,332 367,313 372,291 377,268 381,246 386,225 391,200 396,175 401,149 406,174 412,199 418,223 423,244 429,265 434,286 439,307 444,325 448,339 L 644,1082 837,1082 1026,339 C 1031,322 1036,302 1041,280 1046,258 1051,237 1056,218 1061,195 1067,172 1072,149 1077,174 1083,199 1088,223 1093,244 1098,265 1103,288 1108,310 1112,330 1117,347 L 1308,1082 1484,1082 1174,0 Z"/>
+   <glyph unicode="v" horiz-adv-x="1059" d="M 613,0 L 400,0 7,1082 199,1082 437,378 C 442,363 447,346 454,325 460,304 466,282 473,259 480,236 486,215 492,194 497,173 502,155 506,141 510,155 515,173 522,194 528,215 534,236 541,258 548,280 555,302 562,323 569,344 575,361 580,376 L 826,1082 1017,1082 613,0 Z"/>
+   <glyph unicode="u" horiz-adv-x="901" d="M 314,1082 L 314,396 C 314,343 318,299 326,264 333,229 346,200 363,179 380,157 403,142 432,133 460,124 495,119 537,119 580,119 618,127 653,142 687,157 716,178 741,207 765,235 784,270 797,312 810,353 817,401 817,455 L 817,1082 997,1082 997,228 C 997,205 997,181 998,156 998,131 998,107 999,85 1000,62 1000,43 1001,27 1002,11 1002,3 1003,3 L 833,3 C 832,6 832,15 831,30 830,44 830,61 829,79 828,98 827,117 826,136 825,156 825,172 825,185 L 822,185 C 805,154 786,125 765,100 744,75 720,53 693,36 666,18 634,4 599,-6 564,-15 523,-20 476,-20 416,-20 364,-13 321,2 278,17 242,39 214,70 186,101 166,140 153,188 140,236 133,294 133,361 L 133,1082 314,1082 Z"/>
+   <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 527,1 499,-5 471,-10 442,-14 409,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 467,127 484,128 501,131 517,134 535,137 554,141 L 554,8 Z"/>
+   <glyph unicode="s" horiz-adv-x="927" d="M 950,299 C 950,248 940,203 921,164 901,124 872,91 835,64 798,37 752,16 698,2 643,-13 581,-20 511,-20 448,-20 392,-15 342,-6 291,4 247,20 209,41 171,62 139,91 114,126 88,161 69,203 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 550,117 585,120 618,125 650,130 678,140 701,153 724,166 743,183 756,205 769,226 775,253 775,285 775,318 767,345 752,366 737,387 715,404 688,418 661,432 628,444 589,455 550,465 507,476 460,489 417,500 374,513 331,527 288,541 250,560 216,583 181,606 153,634 132,668 111,702 100,745 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 763,842 752,866 736,885 720,904 701,919 678,931 655,942 630,951 602,956 573,961 544,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,785 282,761 297,742 311,723 331,707 357,694 382,681 413,669 449,660 485,650 525,640 568,629 597,622 626,614 656,606 686,597 715,587 744,576 772,564 799,550 824,535 849,519 870,500 889,478 908,456 923,430 934,401 945,372 950,338 950,299 Z"/>
+   <glyph unicode="r" horiz-adv-x="556" d="M 142,0 L 142,830 C 142,853 142,876 142,900 141,923 141,946 140,968 139,990 139,1011 138,1030 137,1049 137,1067 136,1082 L 306,1082 C 307,1067 308,1049 309,1030 310,1010 311,990 312,969 313,948 313,929 314,910 314,891 314,874 314,861 L 318,861 C 331,902 344,938 359,969 373,999 390,1024 409,1044 428,1063 451,1078 478,1088 505,1097 537,1102 575,1102 590,1102 604,1101 617,1099 630,1096 641,1094 648,1092 L 648,927 C 636,930 622,933 606,935 590,936 572,937 552,937 511,937 476,928 447,909 418,890 394,865 376,832 357,799 344,759 335,714 326,668 322,618 322,564 L 322,0 142,0 Z"/>
+   <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,464 1046,388 1033,319 1020,250 998,190 967,140 936,90 895,51 844,23 793,-6 730,-20 655,-20 578,-20 510,-5 452,24 394,53 350,101 319,168 L 314,168 C 315,167 315,161 316,150 316,139 316,126 317,110 317,94 317,76 318,57 318,37 318,17 318,-2 L 318,-425 138,-425 138,864 C 138,891 138,916 138,940 137,964 137,986 136,1005 135,1025 135,1042 134,1056 133,1070 133,1077 132,1077 L 306,1077 C 307,1075 308,1068 309,1057 310,1045 311,1031 312,1014 313,998 314,980 315,961 316,943 316,925 316,908 L 320,908 C 337,943 356,972 377,997 398,1021 423,1041 450,1057 477,1072 508,1084 542,1091 575,1098 613,1101 655,1101 730,1101 793,1088 844,1061 895,1034 936,997 967,949 998,900 1020,842 1033,774 1046,705 1053,629 1053,546 Z M 864,542 C 864,609 860,668 852,720 844,772 830,816 811,852 791,888 765,915 732,934 699,953 658,962 609,962 569,962 531,956 496,945 461,934 430,912 404,880 377,848 356,804 341,748 326,691 318,618 318,528 318,451 324,387 337,334 350,281 368,238 393,205 417,172 447,149 483,135 519,120 560,113 607,113 657,113 699,123 732,142 765,161 791,189 811,226 830,263 844,308 852,361 860,414 864,474 864,542 Z"/>
+   <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 490,-20 422,-9 363,14 304,37 254,71 213,118 172,165 140,223 119,294 97,364 86,447 86,542 86,915 248,1102 571,1102 655,1102 728,1090 789,1067 850,1044 900,1009 939,962 978,915 1006,857 1025,787 1044,717 1053,635 1053,542 Z M 864,542 C 864,626 858,695 845,750 832,805 813,848 788,881 763,914 732,937 696,950 660,963 619,969 574,969 528,969 487,962 450,949 413,935 381,912 355,879 329,846 309,802 296,747 282,692 275,624 275,542 275,458 282,389 297,334 312,279 332,235 358,202 383,169 414,146 449,133 484,120 522,113 563,113 609,113 651,120 688,133 725,146 757,168 783,201 809,234 829,278 843,333 857,388 864,458 864,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="900" d="M 825,0 L 825,686 C 825,739 821,783 814,818 806,853 793,882 776,904 759,925 736,941 708,950 679,959 644,963 602,963 559,963 521,956 487,941 452,926 423,904 399,876 374,847 355,812 342,771 329,729 322,681 322,627 L 322,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 334,928 353,957 374,982 395,1007 419,1029 446,1047 473,1064 505,1078 540,1088 575,1097 616,1102 663,1102 723,1102 775,1095 818,1080 861,1065 897,1043 925,1012 953,981 974,942 987,894 1000,845 1006,788 1006,721 L 1006,0 825,0 Z"/>
+   <glyph unicode="m" horiz-adv-x="1456" d="M 768,0 L 768,686 C 768,739 765,783 758,818 751,853 740,882 725,904 709,925 688,941 663,950 638,959 607,963 570,963 532,963 498,956 467,941 436,926 410,904 389,876 367,847 350,812 339,771 327,729 321,681 321,627 L 321,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 333,928 350,957 369,982 388,1007 410,1029 435,1047 460,1064 488,1078 521,1088 553,1097 590,1102 633,1102 715,1102 780,1086 828,1053 875,1020 908,968 927,897 L 930,897 C 946,928 964,957 984,982 1004,1007 1027,1029 1054,1047 1081,1064 1111,1078 1144,1088 1177,1097 1215,1102 1258,1102 1313,1102 1360,1095 1400,1080 1439,1065 1472,1043 1497,1012 1522,981 1541,942 1553,894 1565,845 1571,788 1571,721 L 1571,0 1393,0 1393,686 C 1393,739 1390,783 1383,818 1376,853 1365,882 1350,904 1334,925 1313,941 1288,950 1263,959 1232,963 1195,963 1157,963 1123,956 1092,942 1061,927 1035,906 1014,878 992,850 975,815 964,773 952,731 946,682 946,627 L 946,0 768,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="927" d="M 816,0 L 450,494 318,385 318,0 138,0 138,1484 318,1484 318,557 793,1082 1004,1082 565,617 1027,0 816,0 Z"/>
+   <glyph unicode="j" horiz-adv-x="372" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 317,-132 C 317,-174 314,-212 307,-247 300,-283 287,-313 269,-339 251,-365 227,-386 196,-401 165,-416 125,-423 77,-423 54,-423 32,-423 11,-423 -11,-423 -31,-421 -50,-416 L -50,-277 C -41,-278 -31,-280 -19,-281 -7,-282 3,-283 12,-283 37,-283 58,-280 75,-273 91,-266 104,-256 113,-242 122,-227 129,-209 132,-187 135,-164 137,-138 137,-107 L 137,1082 317,1082 317,-132 Z"/>
+   <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+   <glyph unicode="h" horiz-adv-x="874" d="M 317,897 C 337,934 359,965 382,991 405,1016 431,1037 459,1054 487,1071 518,1083 551,1091 584,1098 622,1102 663,1102 732,1102 789,1093 834,1074 878,1055 913,1029 939,996 964,962 982,922 992,875 1001,828 1006,777 1006,721 L 1006,0 825,0 825,686 C 825,732 822,772 817,807 811,842 800,871 784,894 768,917 745,934 716,946 687,957 649,963 602,963 559,963 521,955 487,940 452,925 423,903 399,875 374,847 355,813 342,773 329,733 322,688 322,638 L 322,0 142,0 142,1484 322,1484 322,1098 C 322,1076 322,1054 321,1032 320,1010 320,990 319,971 318,952 317,937 316,924 315,911 315,902 314,897 L 317,897 Z"/>
+   <glyph unicode="g" horiz-adv-x="954" d="M 548,-425 C 486,-425 431,-419 383,-406 335,-393 294,-375 260,-352 226,-328 198,-300 177,-267 156,-234 140,-198 131,-158 L 312,-132 C 324,-182 351,-220 392,-248 433,-274 486,-288 553,-288 594,-288 631,-282 664,-271 697,-260 726,-241 749,-217 772,-191 790,-159 803,-119 816,-79 822,-30 822,27 L 822,201 820,201 C 807,174 790,148 771,123 751,98 727,75 699,56 670,37 637,21 600,10 563,-2 520,-8 472,-8 403,-8 345,4 296,27 247,50 207,84 176,130 145,176 122,233 108,302 93,370 86,449 86,539 86,626 93,704 108,773 122,842 145,901 178,950 210,998 252,1035 304,1061 355,1086 418,1099 492,1099 569,1099 635,1082 692,1047 748,1012 791,962 822,897 L 824,897 C 824,914 825,933 826,953 827,974 828,994 829,1012 830,1031 831,1046 832,1060 833,1073 835,1080 836,1080 L 1007,1080 C 1006,1074 1006,1064 1005,1050 1004,1035 1004,1018 1003,998 1002,978 1002,956 1002,932 1001,907 1001,882 1001,856 L 1001,30 C 1001,-121 964,-234 890,-311 815,-387 701,-425 548,-425 Z M 822,541 C 822,616 814,681 798,735 781,788 760,832 733,866 706,900 676,925 642,941 607,957 572,965 536,965 490,965 451,957 418,941 385,925 357,900 336,866 314,831 298,787 288,734 277,680 272,616 272,541 272,463 277,398 288,345 298,292 314,249 335,216 356,183 383,160 416,146 449,132 488,125 533,125 569,125 604,133 639,148 673,163 704,188 731,221 758,254 780,297 797,350 814,403 822,466 822,541 Z"/>
+   <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1243 185,1280 192,1314 199,1347 213,1377 233,1402 252,1427 279,1446 313,1461 347,1475 391,1482 445,1482 466,1482 489,1481 512,1479 535,1477 555,1474 572,1470 L 572,1333 C 561,1335 548,1337 533,1339 518,1340 504,1341 492,1341 465,1341 444,1337 427,1330 410,1323 396,1312 387,1299 377,1285 370,1268 367,1248 363,1228 361,1205 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,446 282,394 294,347 305,299 323,258 348,224 372,189 403,163 441,144 479,125 525,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 1008,206 992,176 972,146 951,115 924,88 890,64 856,39 814,19 763,4 712,-12 650,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,649 100,735 125,806 150,876 185,933 229,977 273,1021 324,1053 383,1073 442,1092 504,1102 571,1102 662,1102 738,1087 799,1058 860,1029 909,988 946,937 983,885 1009,824 1025,754 1040,684 1048,608 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 538,969 507,964 474,955 441,945 410,928 382,903 354,878 330,845 311,803 292,760 281,706 278,641 L 862,641 Z"/>
+   <glyph unicode="d" horiz-adv-x="954" d="M 821,174 C 788,105 744,55 689,25 634,-5 565,-20 484,-20 347,-20 247,26 183,118 118,210 86,349 86,536 86,913 219,1102 484,1102 566,1102 634,1087 689,1057 744,1027 788,979 821,914 L 823,914 C 823,921 823,931 823,946 822,960 822,975 822,991 821,1006 821,1021 821,1035 821,1049 821,1059 821,1065 L 821,1484 1001,1484 1001,219 C 1001,193 1001,168 1002,143 1002,119 1002,97 1003,77 1004,57 1004,40 1005,26 1006,11 1006,4 1007,4 L 835,4 C 834,11 833,20 832,32 831,44 830,58 829,73 828,89 827,105 826,123 825,140 825,157 825,174 L 821,174 Z M 275,542 C 275,467 280,403 289,350 298,297 313,253 334,219 355,184 381,159 413,143 445,127 484,119 530,119 577,119 619,127 656,142 692,157 722,182 747,217 771,251 789,296 802,351 815,406 821,474 821,554 821,631 815,696 802,749 789,802 771,844 746,877 721,910 691,933 656,948 620,962 579,969 532,969 488,969 450,961 418,946 386,931 359,906 338,872 317,838 301,794 291,740 280,685 275,619 275,542 Z"/>
+   <glyph unicode="c" horiz-adv-x="875" d="M 275,546 C 275,484 280,427 289,375 298,323 313,278 334,241 355,203 384,174 419,153 454,132 497,122 548,122 612,122 666,139 709,173 752,206 778,258 788,328 L 970,328 C 964,283 951,239 931,197 911,155 884,118 850,86 815,54 773,28 724,9 675,-10 618,-20 553,-20 468,-20 396,-6 337,23 278,52 230,91 193,142 156,192 129,251 112,320 95,388 87,462 87,542 87,615 93,679 105,735 117,790 134,839 156,881 177,922 203,957 232,986 261,1014 293,1037 328,1054 362,1071 398,1083 436,1091 474,1098 512,1102 551,1102 612,1102 666,1094 713,1077 760,1060 801,1038 836,1009 870,980 898,945 919,906 940,867 955,824 964,779 L 779,765 C 770,825 746,873 708,908 670,943 616,961 546,961 495,961 452,953 418,936 383,919 355,893 334,859 313,824 298,781 289,729 280,677 275,616 275,546 Z"/>
+   <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,150 316,132 315,113 314,94 313,77 312,61 311,45 310,31 309,19 308,8 307,2 306,2 L 132,2 C 133,8 133,18 134,32 135,47 135,64 136,84 137,104 137,126 138,150 138,174 138,199 138,225 L 138,1484 318,1484 318,1061 C 318,1041 318,1022 318,1004 317,985 317,969 316,955 315,938 315,923 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,615 859,679 850,732 841,785 826,829 805,864 784,898 758,923 726,939 694,955 655,963 609,963 562,963 520,955 484,940 447,925 417,900 393,866 368,832 350,787 337,732 324,677 318,609 318,529 318,452 324,387 337,334 350,281 368,239 393,206 417,173 447,149 483,135 519,120 560,113 607,113 651,113 689,121 721,136 753,151 780,176 801,210 822,244 838,288 849,343 859,397 864,463 864,540 Z"/>
+   <glyph unicode="a" horiz-adv-x="1060" d="M 414,-20 C 305,-20 224,9 169,66 114,124 87,203 87,303 87,375 101,434 128,480 155,526 190,562 234,588 277,614 327,632 383,642 439,652 496,657 554,657 L 797,657 797,717 C 797,762 792,800 783,832 774,863 759,889 740,908 721,928 697,942 668,951 639,960 604,965 565,965 530,965 499,963 471,958 443,953 419,944 398,931 377,918 361,900 348,878 335,855 327,827 323,793 L 135,810 C 142,853 154,892 173,928 192,963 218,994 253,1020 287,1046 330,1066 382,1081 433,1095 496,1102 569,1102 705,1102 807,1071 876,1009 945,946 979,856 979,738 L 979,272 C 979,219 986,179 1000,152 1014,125 1041,111 1080,111 1090,111 1100,112 1110,113 1120,114 1130,116 1139,118 L 1139,6 C 1116,1 1094,-3 1072,-6 1049,-9 1025,-10 1000,-10 966,-10 937,-5 913,4 888,13 868,26 853,45 838,63 826,86 818,113 810,140 805,171 803,207 L 797,207 C 778,172 757,141 734,113 711,85 684,61 653,42 622,22 588,7 549,-4 510,-15 465,-20 414,-20 Z M 455,115 C 512,115 563,125 606,146 649,167 684,194 713,226 741,259 762,294 776,332 790,371 797,408 797,443 L 797,531 600,531 C 556,531 514,528 475,522 435,517 400,506 370,489 340,472 316,449 299,418 281,388 272,349 272,300 272,241 288,195 320,163 351,131 396,115 455,115 Z"/>
+   <glyph unicode="W" horiz-adv-x="1906" d="M 1511,0 L 1283,0 1039,895 C 1032,920 1024,950 1016,985 1007,1020 1000,1053 993,1084 985,1121 977,1158 969,1196 960,1157 952,1120 944,1083 937,1051 929,1018 921,984 913,950 905,920 898,895 L 652,0 424,0 9,1409 208,1409 461,514 C 472,472 483,430 494,389 504,348 513,311 520,278 529,239 537,203 544,168 554,214 564,259 575,304 580,323 584,342 589,363 594,384 599,404 604,424 609,444 614,463 619,482 624,500 628,517 632,532 L 877,1409 1060,1409 1305,532 C 1309,517 1314,500 1319,482 1324,463 1329,444 1334,425 1339,405 1343,385 1348,364 1353,343 1357,324 1362,305 1373,260 1383,215 1393,168 1394,168 1397,180 1402,203 1407,226 1414,254 1422,289 1430,324 1439,361 1449,402 1458,442 1468,479 1478,514 L 1727,1409 1926,1409 1511,0 Z"/>
+   <glyph unicode="S" horiz-adv-x="1139" d="M 1272,389 C 1272,330 1261,275 1238,225 1215,175 1179,132 1131,96 1083,59 1023,31 950,11 877,-10 790,-20 690,-20 515,-20 378,11 280,72 182,133 120,222 93,338 L 278,375 C 287,338 302,305 321,275 340,245 367,219 400,198 433,176 473,159 522,147 571,135 629,129 697,129 754,129 806,134 853,144 900,153 941,168 975,188 1009,208 1036,234 1055,266 1074,297 1083,335 1083,379 1083,425 1073,462 1052,491 1031,520 1001,543 963,562 925,581 880,596 827,609 774,622 716,635 652,650 613,659 573,668 534,679 494,689 456,701 420,716 383,730 349,747 317,766 285,785 257,809 234,836 211,863 192,894 179,930 166,965 159,1006 159,1053 159,1120 173,1177 200,1225 227,1272 264,1311 312,1342 360,1373 417,1395 482,1409 547,1423 618,1430 694,1430 781,1430 856,1423 918,1410 980,1396 1032,1375 1075,1348 1118,1321 1152,1287 1178,1247 1203,1206 1224,1159 1239,1106 L 1051,1073 C 1042,1107 1028,1137 1011,1164 993,1191 970,1213 941,1231 912,1249 878,1263 837,1272 796,1281 747,1286 692,1286 627,1286 572,1280 528,1269 483,1257 448,1241 421,1221 394,1201 374,1178 363,1151 351,1124 345,1094 345,1063 345,1021 356,987 377,960 398,933 426,910 462,892 498,874 540,859 587,847 634,835 685,823 738,811 781,801 825,791 868,781 911,770 952,758 991,744 1030,729 1067,712 1102,693 1136,674 1166,650 1191,622 1216,594 1236,561 1251,523 1265,485 1272,440 1272,389 Z"/>
+   <glyph unicode="P" horiz-adv-x="1086" d="M 1258,985 C 1258,924 1248,867 1228,814 1207,761 1177,715 1137,676 1096,637 1046,606 985,583 924,560 854,549 773,549 L 359,549 359,0 168,0 168,1409 761,1409 C 844,1409 917,1399 979,1379 1041,1358 1093,1330 1134,1293 1175,1256 1206,1211 1227,1159 1248,1106 1258,1048 1258,985 Z M 1066,983 C 1066,1072 1039,1140 984,1187 929,1233 847,1256 738,1256 L 359,1256 359,700 746,700 C 856,700 937,724 989,773 1040,822 1066,892 1066,983 Z"/>
+   <glyph unicode="L" horiz-adv-x="900" d="M 168,0 L 168,1409 359,1409 359,156 1071,156 1071,0 168,0 Z"/>
+   <glyph unicode="I" horiz-adv-x="186" d="M 189,0 L 189,1409 380,1409 380,0 189,0 Z"/>
+   <glyph unicode="F" horiz-adv-x="1006" d="M 359,1253 L 359,729 1145,729 1145,571 359,571 359,0 168,0 168,1409 1169,1409 1169,1253 359,1253 Z"/>
+   <glyph unicode="E" horiz-adv-x="1112" d="M 168,0 L 168,1409 1237,1409 1237,1253 359,1253 359,801 1177,801 1177,647 359,647 359,156 1278,156 1278,0 168,0 Z"/>
+   <glyph unicode="C" horiz-adv-x="1297" d="M 792,1274 C 712,1274 641,1261 580,1234 518,1207 466,1169 425,1120 383,1071 351,1011 330,942 309,873 298,796 298,711 298,626 310,549 333,479 356,408 389,348 432,297 475,246 527,207 590,179 652,151 722,137 800,137 855,137 905,144 950,159 995,173 1035,193 1072,219 1108,245 1140,276 1169,312 1198,347 1223,387 1245,430 L 1401,352 C 1376,299 1344,250 1307,205 1270,160 1226,120 1176,87 1125,54 1068,28 1005,9 941,-10 870,-20 791,-20 677,-20 577,-2 492,35 406,71 334,122 277,187 219,252 176,329 147,418 118,507 104,605 104,711 104,821 119,920 150,1009 180,1098 224,1173 283,1236 341,1298 413,1346 498,1380 583,1413 681,1430 790,1430 940,1430 1065,1401 1166,1342 1267,1283 1341,1196 1388,1081 L 1207,1021 C 1194,1054 1176,1086 1153,1117 1130,1147 1102,1174 1068,1197 1034,1220 994,1239 949,1253 903,1267 851,1274 792,1274 Z"/>
+   <glyph unicode="A" horiz-adv-x="1350" d="M 1167,0 L 1006,412 364,412 202,0 4,0 579,1409 796,1409 1362,0 1167,0 Z M 768,1026 C 757,1053 747,1080 738,1107 728,1134 719,1159 712,1182 705,1204 699,1223 694,1238 689,1253 686,1262 685,1265 684,1262 681,1252 676,1237 671,1222 665,1203 658,1180 650,1157 641,1132 632,1105 622,1078 612,1051 602,1024 L 422,561 949,561 768,1026 Z"/>
+   <glyph unicode="3" horiz-adv-x="980" d="M 1049,389 C 1049,324 1039,267 1018,216 997,165 966,123 926,88 885,53 835,26 776,8 716,-11 648,-20 571,-20 484,-20 410,-9 351,13 291,34 242,63 203,99 164,134 135,175 116,221 97,266 84,313 78,362 L 264,379 C 269,342 279,308 294,277 308,246 327,220 352,198 377,176 407,159 443,147 479,135 522,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,447 851,489 828,521 805,552 776,577 742,595 707,612 670,624 630,630 589,636 552,639 518,639 L 416,639 416,795 514,795 C 548,795 583,799 620,806 657,813 690,825 721,844 751,862 776,887 796,918 815,949 825,989 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 109,1125 126,1179 153,1225 180,1271 214,1309 255,1340 296,1370 342,1393 395,1408 448,1423 504,1430 563,1430 642,1430 709,1420 766,1401 823,1381 869,1354 905,1321 941,1287 968,1247 985,1202 1002,1157 1010,1108 1010,1057 1010,1016 1004,977 993,941 982,905 964,873 940,844 916,815 886,791 849,770 812,749 767,734 715,723 L 715,719 C 772,713 821,700 863,681 905,661 940,636 967,607 994,578 1015,544 1029,507 1042,470 1049,430 1049,389 Z"/>
+   <glyph unicode="0" horiz-adv-x="980" d="M 1059,705 C 1059,570 1046,456 1021,364 995,271 960,197 916,140 871,83 819,42 759,17 699,-8 635,-20 567,-20 498,-20 434,-8 375,17 316,42 264,82 221,139 177,196 143,270 118,363 93,455 80,569 80,705 80,847 93,965 118,1058 143,1151 177,1225 221,1280 265,1335 317,1374 377,1397 437,1419 502,1430 573,1430 640,1430 704,1419 763,1397 822,1374 873,1335 917,1280 961,1225 996,1151 1021,1058 1046,965 1059,847 1059,705 Z M 876,705 C 876,817 869,910 856,985 843,1059 823,1118 797,1163 771,1207 739,1238 702,1257 664,1275 621,1284 573,1284 522,1284 478,1275 439,1256 400,1237 368,1206 342,1162 315,1117 295,1058 282,984 269,909 262,816 262,705 262,597 269,506 283,432 296,358 316,299 343,254 369,209 401,176 439,157 477,137 520,127 569,127 616,127 659,137 697,157 735,176 767,209 794,254 820,299 840,358 855,432 869,506 876,597 876,705 Z"/>
+   <glyph unicode="." horiz-adv-x="186" d="M 187,0 L 187,219 382,219 382,0 187,0 Z"/>
+   <glyph unicode="-" horiz-adv-x="504" d="M 91,464 L 91,624 591,624 591,464 91,464 Z"/>
+   <glyph unicode="," horiz-adv-x="212" d="M 385,219 L 385,51 C 385,16 384,-16 381,-46 378,-74 373,-101 366,-127 359,-151 351,-175 342,-197 332,-219 320,-241 307,-262 L 184,-262 C 214,-219 237,-175 254,-131 270,-87 278,-43 278,0 L 190,0 190,219 385,219 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_2" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="bold" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="x" horiz-adv-x="1139" d="M 819,0 L 567,392 313,0 14,0 410,559 33,1082 336,1082 567,728 797,1082 1102,1082 725,562 1124,0 819,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1615" d="M 436,255 L 645,1082 946,1082 1153,255 1337,1082 1597,1082 1313,0 1016,0 797,882 571,0 274,0 -6,1082 258,1082 436,255 Z"/>
+   <glyph unicode="v" horiz-adv-x="1139" d="M 565,227 L 836,1082 1130,1082 731,0 395,0 8,1082 305,1082 565,227 Z"/>
+   <glyph unicode="t" horiz-adv-x="636" d="M 420,-18 C 337,-18 274,5 229,50 184,95 162,163 162,254 L 162,892 25,892 25,1082 176,1082 264,1336 440,1336 440,1082 645,1082 645,892 440,892 440,330 C 440,277 450,239 470,214 490,189 521,176 563,176 580,176 596,177 610,180 624,183 640,186 657,190 L 657,16 C 622,5 586,-4 547,-10 508,-15 466,-18 420,-18 Z"/>
+   <glyph unicode="s" horiz-adv-x="980" d="M 1055,316 C 1055,264 1044,217 1023,176 1001,135 969,100 928,71 887,42 836,19 776,4 716,-12 648,-20 571,-20 502,-20 440,-15 385,-5 330,5 281,22 240,45 198,68 163,97 135,134 107,171 86,216 72,270 L 319,307 C 327,277 338,253 352,234 366,215 383,201 404,191 425,181 449,174 477,171 504,168 536,166 571,166 603,166 633,168 661,172 688,175 712,182 733,191 753,200 769,212 780,229 791,245 797,265 797,290 797,318 789,340 773,357 756,373 734,386 706,397 677,407 644,416 606,424 567,431 526,440 483,450 438,460 393,472 349,486 305,500 266,519 231,543 196,567 168,598 147,635 126,672 115,718 115,775 115,826 125,872 145,913 165,953 194,987 233,1016 272,1044 320,1066 377,1081 434,1096 499,1103 573,1103 632,1103 686,1098 737,1087 788,1076 833,1058 873,1035 913,1011 947,981 974,944 1001,907 1019,863 1030,811 L 781,785 C 776,811 768,833 756,850 744,867 729,880 712,890 694,900 673,907 650,911 627,914 601,916 573,916 506,916 456,908 423,891 390,874 373,845 373,805 373,780 380,761 394,746 407,731 427,719 452,710 477,700 506,692 541,685 575,678 612,669 653,659 703,648 752,636 801,622 849,607 892,588 930,563 967,538 998,505 1021,466 1044,427 1055,377 1055,316 Z"/>
+   <glyph unicode="r" horiz-adv-x="662" d="M 143,0 L 143,833 C 143,856 143,881 143,907 142,933 142,958 141,982 140,1006 139,1027 138,1046 137,1065 136,1075 135,1075 L 403,1075 C 404,1067 406,1054 407,1035 408,1016 410,995 411,972 412,950 414,927 415,905 416,883 416,865 416,851 L 420,851 C 434,890 448,926 462,957 476,988 493,1014 512,1036 531,1057 553,1074 580,1086 607,1097 640,1103 679,1103 696,1103 712,1102 729,1099 745,1096 757,1092 766,1088 L 766,853 C 748,857 730,861 712,864 693,867 671,868 646,868 576,868 522,840 483,783 444,726 424,642 424,531 L 424,0 143,0 Z"/>
+   <glyph unicode="p" horiz-adv-x="1059" d="M 1167,546 C 1167,464 1159,388 1143,319 1126,250 1101,190 1067,140 1033,90 990,51 938,23 885,-6 823,-20 752,-20 720,-20 688,-17 657,-10 625,-3 595,8 566,23 537,38 511,57 487,82 462,106 441,136 424,172 L 418,172 C 419,169 419,160 420,147 421,134 421,118 422,101 423,83 423,64 424,45 424,25 424,7 424,-10 L 424,-425 143,-425 143,833 C 143,888 142,938 141,981 139,1024 137,1058 135,1082 L 408,1082 C 409,1077 411,1068 413,1055 414,1042 416,1026 417,1009 418,992 418,974 419,955 420,936 420,920 420,906 L 424,906 C 458,977 505,1028 564,1059 623,1090 692,1105 770,1105 839,1105 898,1091 948,1063 998,1035 1039,996 1072,947 1104,898 1128,839 1144,771 1159,702 1167,627 1167,546 Z M 874,546 C 874,669 855,761 818,821 781,880 725,910 651,910 623,910 595,904 568,893 540,881 515,861 494,833 472,804 454,766 441,719 427,671 420,611 420,538 420,467 427,409 440,362 453,315 471,277 493,249 514,221 539,201 566,190 593,178 621,172 649,172 685,172 717,179 745,194 773,208 797,230 816,261 835,291 849,330 859,377 869,424 874,481 874,546 Z"/>
+   <glyph unicode="o" horiz-adv-x="1086" d="M 1171,542 C 1171,459 1160,384 1137,315 1114,246 1079,187 1033,138 987,88 930,49 861,22 792,-6 712,-20 621,-20 533,-20 455,-6 388,21 321,48 264,87 219,136 173,185 138,245 115,314 92,383 80,459 80,542 80,623 91,697 114,766 136,834 170,893 215,943 260,993 317,1032 386,1060 455,1088 535,1102 627,1102 724,1102 807,1088 876,1060 945,1032 1001,993 1045,944 1088,894 1120,835 1141,767 1161,698 1171,623 1171,542 Z M 877,542 C 877,671 856,764 814,822 772,880 711,909 631,909 548,909 485,880 441,821 397,762 375,669 375,542 375,477 381,422 393,375 404,328 421,290 442,260 463,230 489,208 519,194 549,179 582,172 618,172 659,172 696,179 729,194 761,208 788,230 810,260 832,290 849,328 860,375 871,422 877,477 877,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="1006" d="M 844,0 L 844,607 C 844,649 841,688 834,723 827,758 816,788 801,813 786,838 766,857 741,871 716,885 686,892 651,892 617,892 586,885 559,870 531,855 507,833 487,806 467,778 452,745 441,707 430,668 424,626 424,580 L 424,0 143,0 143,845 C 143,868 143,892 143,917 142,942 142,966 141,988 140,1010 139,1031 138,1048 137,1066 136,1075 135,1075 L 403,1075 C 404,1067 406,1055 407,1038 408,1021 410,1002 411,981 412,961 414,940 415,919 416,899 416,881 416,867 L 420,867 C 458,950 506,1010 563,1047 620,1084 689,1103 768,1103 833,1103 889,1092 934,1071 979,1050 1015,1020 1044,983 1072,946 1092,902 1105,851 1118,800 1124,746 1124,687 L 1124,0 844,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="292" d="M 143,0 L 143,1484 424,1484 424,0 143,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="1033" d="M 834,0 L 545,490 424,406 424,0 143,0 143,1484 424,1484 424,634 810,1082 1112,1082 732,660 1141,0 834,0 Z"/>
+   <glyph unicode="i" horiz-adv-x="292" d="M 143,1277 L 143,1484 424,1484 424,1277 143,1277 Z M 143,0 L 143,1082 424,1082 424,0 143,0 Z"/>
+   <glyph unicode="g" horiz-adv-x="1060" d="M 596,-434 C 525,-434 462,-427 408,-413 353,-398 307,-378 269,-353 230,-327 200,-296 177,-261 154,-225 138,-186 129,-143 L 410,-110 C 420,-153 442,-187 475,-212 508,-237 551,-249 604,-249 637,-249 668,-244 696,-235 723,-226 747,-210 767,-188 786,-165 802,-136 813,-99 824,-62 829,-17 829,37 829,56 829,75 829,94 829,113 829,131 830,147 831,166 831,184 831,201 L 829,201 C 796,131 751,80 692,49 633,18 562,2 481,2 412,2 353,16 304,43 254,70 213,107 180,156 147,204 123,262 108,329 92,396 84,469 84,550 84,633 92,709 109,777 126,844 151,902 186,951 220,1000 263,1037 316,1064 368,1090 430,1103 502,1103 574,1103 639,1088 696,1057 753,1026 797,977 829,908 L 834,908 C 834,922 835,939 836,957 837,976 838,994 839,1011 840,1029 842,1044 844,1058 845,1071 847,1078 848,1078 L 1114,1078 C 1113,1054 1111,1020 1110,977 1109,934 1108,885 1108,829 L 1108,32 C 1108,-47 1097,-115 1074,-173 1051,-231 1018,-280 975,-318 931,-357 877,-386 814,-405 750,-424 677,-434 596,-434 Z M 831,556 C 831,624 824,681 811,726 798,771 780,808 759,835 738,862 713,882 686,893 658,904 630,910 602,910 566,910 534,903 507,889 479,875 455,853 436,824 417,795 402,757 392,712 382,667 377,613 377,550 377,433 396,345 433,286 470,227 526,197 600,197 628,197 656,203 684,214 711,225 736,244 758,272 780,299 798,336 811,382 824,428 831,486 831,556 Z"/>
+   <glyph unicode="f" horiz-adv-x="663" d="M 473,892 L 473,0 193,0 193,892 35,892 35,1082 193,1082 193,1195 C 193,1236 198,1275 208,1310 218,1345 235,1375 259,1401 283,1427 315,1447 356,1462 397,1477 447,1484 508,1484 540,1484 572,1482 603,1479 634,1476 661,1472 686,1468 L 686,1287 C 674,1290 661,1292 646,1294 631,1295 617,1296 604,1296 578,1296 557,1293 540,1288 523,1283 509,1275 500,1264 490,1253 483,1240 479,1224 475,1207 473,1188 473,1167 L 473,1082 686,1082 686,892 473,892 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 586,-20 C 508,-20 438,-8 376,15 313,38 260,73 216,120 172,167 138,226 115,297 92,368 80,451 80,546 80,649 94,736 122,807 149,878 187,935 234,979 281,1022 335,1054 396,1073 457,1092 522,1102 590,1102 675,1102 748,1087 809,1057 869,1027 918,986 957,932 996,878 1024,814 1042,739 1060,664 1069,582 1069,491 L 1069,491 375,491 C 375,445 379,402 387,363 395,323 408,289 426,261 444,232 467,209 496,193 525,176 559,168 600,168 649,168 690,179 721,200 752,221 775,253 788,297 L 1053,274 C 1041,243 1024,211 1003,176 981,141 952,110 916,81 880,52 835,28 782,9 728,-10 663,-20 586,-20 Z M 586,925 C 557,925 531,920 506,911 481,901 459,886 441,865 422,844 407,816 396,783 385,750 378,710 377,663 L 797,663 C 792,750 771,816 734,860 697,903 648,925 586,925 Z"/>
+   <glyph unicode="c" horiz-adv-x="1007" d="M 594,-20 C 508,-20 433,-7 369,20 304,47 251,84 208,133 165,182 133,240 112,309 91,377 80,452 80,535 80,625 92,705 115,776 138,846 172,905 216,954 260,1002 314,1039 379,1064 443,1089 516,1102 598,1102 668,1102 730,1092 785,1073 839,1054 886,1028 925,995 964,963 996,924 1021,879 1045,834 1062,786 1071,734 L 788,734 C 780,787 760,830 728,861 696,893 651,909 592,909 517,909 462,878 427,816 392,754 375,664 375,546 375,297 449,172 596,172 649,172 694,188 730,221 766,253 788,302 797,366 L 1079,366 C 1072,315 1057,267 1034,220 1010,174 978,133 938,97 897,62 848,33 791,12 734,-9 668,-20 594,-20 Z"/>
+   <glyph unicode="a" horiz-adv-x="1112" d="M 393,-20 C 341,-20 295,-13 254,2 213,16 178,37 149,65 120,93 98,127 83,168 68,208 60,255 60,307 60,371 71,425 94,469 116,513 146,548 185,575 224,602 269,622 321,634 373,647 428,653 487,653 L 720,653 720,709 C 720,748 717,782 710,808 703,835 692,857 679,873 666,890 649,902 630,909 610,916 587,920 562,920 539,920 518,918 500,913 481,909 465,901 452,890 439,879 428,864 420,845 411,826 405,803 402,774 L 109,774 C 117,822 132,866 153,906 174,946 204,981 242,1010 279,1039 326,1062 381,1078 436,1094 500,1102 574,1102 641,1102 701,1094 754,1077 807,1060 851,1036 888,1003 925,970 953,929 972,881 991,833 1001,777 1001,714 L 1001,320 C 1001,295 1002,272 1005,252 1007,232 1011,215 1018,202 1024,188 1033,178 1045,171 1056,164 1071,160 1090,160 1111,160 1132,162 1152,166 L 1152,14 C 1135,10 1120,6 1107,3 1094,0 1080,-3 1067,-5 1054,-7 1040,-9 1025,-10 1010,-11 992,-12 972,-12 901,-12 849,5 816,40 782,75 762,126 755,193 L 749,193 C 712,126 664,73 606,36 547,-1 476,-20 393,-20 Z M 720,499 L 576,499 C 546,499 518,497 491,493 464,490 440,482 420,470 399,459 383,442 371,420 359,397 353,367 353,329 353,277 365,239 389,214 412,189 444,176 483,176 519,176 552,184 581,199 610,214 635,234 656,259 676,284 692,312 703,345 714,377 720,411 720,444 L 720,499 Z"/>
+   <glyph unicode="S" horiz-adv-x="1218" d="M 1286,406 C 1286,342 1274,284 1251,232 1228,179 1192,134 1143,97 1094,60 1031,31 955,11 878,-10 787,-20 682,-20 589,-20 506,-12 435,5 364,22 303,46 252,79 201,112 159,152 128,201 96,249 73,304 59,367 L 344,414 C 352,383 364,354 379,328 394,302 416,280 443,261 470,242 503,227 544,217 584,206 633,201 690,201 790,201 867,216 920,247 973,277 999,324 999,389 999,428 988,459 967,484 946,509 917,529 882,545 847,561 806,574 760,585 714,596 666,606 616,616 576,625 536,635 496,645 456,655 418,667 382,681 345,695 311,712 280,731 249,750 222,774 199,803 176,831 158,864 145,902 132,940 125,985 125,1036 125,1106 139,1166 167,1216 195,1266 234,1307 284,1339 333,1370 392,1393 461,1408 530,1423 605,1430 686,1430 778,1430 857,1423 923,1409 988,1394 1043,1372 1088,1343 1132,1314 1167,1277 1193,1233 1218,1188 1237,1136 1249,1077 L 963,1038 C 948,1099 919,1144 874,1175 829,1206 764,1221 680,1221 628,1221 585,1217 551,1208 516,1199 489,1186 469,1171 448,1156 434,1138 425,1118 416,1097 412,1076 412,1053 412,1018 420,990 437,968 454,945 477,927 507,912 537,897 573,884 615,874 656,863 702,853 752,842 796,833 840,823 883,813 926,802 968,790 1007,776 1046,762 1083,745 1117,725 1151,705 1181,681 1206,652 1231,623 1250,588 1265,548 1279,508 1286,461 1286,406 Z"/>
+   <glyph unicode="I" horiz-adv-x="292" d="M 137,0 L 137,1409 432,1409 432,0 137,0 Z"/>
+   <glyph unicode="E" horiz-adv-x="1139" d="M 137,0 L 137,1409 1245,1409 1245,1181 432,1181 432,827 1184,827 1184,599 432,599 432,228 1286,228 1286,0 137,0 Z"/>
+   <glyph unicode=")" horiz-adv-x="583" d="M 2,-425 C 55,-347 101,-270 139,-196 177,-120 208,-44 233,33 257,110 275,190 286,272 297,353 303,439 303,530 303,620 297,706 286,788 275,869 257,949 233,1026 208,1103 177,1180 139,1255 101,1330 55,1407 2,1484 L 283,1484 C 334,1410 379,1337 416,1264 453,1191 484,1116 509,1039 533,962 551,882 563,799 574,716 580,626 580,531 580,436 574,347 563,264 551,180 533,99 509,22 484,-55 453,-131 416,-204 379,-277 334,-351 283,-425 L 2,-425 Z"/>
+   <glyph unicode="(" horiz-adv-x="583" d="M 399,-425 C 348,-351 303,-277 266,-204 229,-131 198,-55 174,22 149,99 131,180 120,264 108,347 102,436 102,531 102,626 108,716 120,799 131,882 149,962 174,1039 198,1116 229,1191 266,1264 303,1337 348,1410 399,1484 L 680,1484 C 627,1407 581,1330 543,1255 505,1180 474,1103 450,1026 425,949 407,869 396,788 385,706 379,620 379,530 379,439 385,353 396,272 407,190 425,110 450,33 474,-44 505,-120 543,-196 581,-270 627,-347 680,-425 L 399,-425 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs class="TextShapeIndex">
+  <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23 id24 id25 id26 id27 id28 id29 id30 id31 id32 id33 id34 id35 id36 id37 id38 id39 id40 id41 id42"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+  <g id="bullet-char-template(57356)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+  </g>
+  <g id="bullet-char-template(57354)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+  </g>
+  <g id="bullet-char-template(10146)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10132)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10007)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+  </g>
+  <g id="bullet-char-template(10004)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+  </g>
+  <g id="bullet-char-template(9679)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+  </g>
+  <g id="bullet-char-template(8226)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+  </g>
+  <g id="bullet-char-template(8211)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+  </g>
+  <g id="bullet-char-template(61548)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+  </g>
+ </defs>
+ <defs class="TextEmbeddedBitmaps"/>
+ <g>
+  <g id="id2" class="Master_Slide">
+   <g id="bg-id2" class="Background"/>
+   <g id="bo-id2" class="BackgroundObjects"/>
+  </g>
+ </g>
+ <g class="SlideGroup">
+  <g>
+   <g id="container-id1">
+    <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+     <g class="Page">
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id3">
+        <rect class="BoundingBox" stroke="none" fill="none" x="16493" y="6587" width="2416" height="2289"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 16494,6588 L 18907,8874"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id4">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13572" y="1506" width="2036" height="1909"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,1507 C 15165,1507 15605,1919 15605,2459 15605,2999 15165,3412 14589,3412 14013,3412 13573,2999 13573,2459 13573,1919 14013,1507 14589,1507 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,1507 C 15165,1507 15605,1919 15605,2459 15605,2999 15165,3412 14589,3412 14013,3412 13573,2999 13573,2459 13573,1919 14013,1507 14589,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 14258,2005 C 14311,2005 14352,2076 14352,2169 14352,2262 14311,2333 14258,2333 14205,2333 14165,2262 14165,2169 14165,2076 14205,2005 14258,2005 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14258,2005 C 14311,2005 14352,2076 14352,2169 14352,2262 14311,2333 14258,2333 14205,2333 14165,2262 14165,2169 14165,2076 14205,2005 14258,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 14916,2005 C 14969,2005 15010,2076 15010,2169 15010,2262 14969,2333 14916,2333 14863,2333 14823,2262 14823,2169 14823,2076 14863,2005 14916,2005 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14916,2005 C 14969,2005 15010,2076 15010,2169 15010,2262 14969,2333 14916,2333 14863,2333 14823,2262 14823,2169 14823,2076 14863,2005 14916,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14031,2787 C 14389,3141 14789,3141 15147,2787"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id5">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7349" y="1506" width="2036" height="1909"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 8366,1507 C 8942,1507 9382,1919 9382,2459 9382,2999 8942,3412 8366,3412 7790,3412 7350,2999 7350,2459 7350,1919 7790,1507 8366,1507 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8366,1507 C 8942,1507 9382,1919 9382,2459 9382,2999 8942,3412 8366,3412 7790,3412 7350,2999 7350,2459 7350,1919 7790,1507 8366,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 8035,2005 C 8088,2005 8129,2076 8129,2169 8129,2262 8088,2333 8035,2333 7982,2333 7942,2262 7942,2169 7942,2076 7982,2005 8035,2005 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8035,2005 C 8088,2005 8129,2076 8129,2169 8129,2262 8088,2333 8035,2333 7982,2333 7942,2262 7942,2169 7942,2076 7982,2005 8035,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 8693,2005 C 8746,2005 8787,2076 8787,2169 8787,2262 8746,2333 8693,2333 8640,2333 8600,2262 8600,2169 8600,2076 8640,2005 8693,2005 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8693,2005 C 8746,2005 8787,2076 8787,2169 8787,2262 8746,2333 8693,2333 8640,2333 8600,2262 8600,2169 8600,2076 8640,2005 8693,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7808,2787 C 8166,3141 8566,3141 8924,2787"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id6">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12682" y="5570" width="4194" height="1400"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14779,6968 L 12683,6968 12683,5571 16874,5571 16874,6968 14779,6968 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14779,6968 L 12683,6968 12683,5571 16874,5571 16874,6968 14779,6968 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="13528" y="6441"><tspan fill="rgb(0,0,0)" stroke="none">Workbench</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id7">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5824" y="8618" width="4194" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7921,10270 L 5825,10270 5825,8619 10016,8619 10016,10270 7921,10270 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7921,10270 L 5825,10270 5825,8619 10016,8619 10016,10270 7921,10270 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="6784" y="9339"><tspan fill="rgb(0,0,0)" stroke="none">keepproxy</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="6850" y="9894"><tspan fill="rgb(0,0,0)" stroke="none">keep-web</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id8">
+        <rect class="BoundingBox" stroke="none" fill="none" x="22080" y="8492" width="4194" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 24177,10271 L 22081,10271 22081,8493 26272,8493 26272,10271 24177,10271 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24177,10271 L 22081,10271 22081,8493 26272,8493 26272,10271 24177,10271 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="22856" y="9554"><tspan fill="rgb(0,0,0)" stroke="none">arv-git-httpd</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id9">
+        <rect class="BoundingBox" stroke="none" fill="none" x="17635" y="8492" width="4194" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 19732,10271 L 17636,10271 17636,8493 21827,8493 21827,10271 19732,10271 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19732,10271 L 17636,10271 17636,8493 21827,8493 21827,10271 19732,10271 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="19008" y="9554"><tspan fill="rgb(0,0,0)" stroke="none">arv-ws</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id10">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5825" y="15730" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7604,18144 L 5826,18144 5826,15731 9382,15731 9382,18144 7604,18144 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7604,18144 L 5826,18144 5826,15731 9382,15731 9382,18144 7604,18144 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id11">
+        <rect class="BoundingBox" stroke="none" fill="none" x="6079" y="16111" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7858,18525 L 6080,18525 6080,16112 9636,16112 9636,18525 7858,18525 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7858,18525 L 6080,18525 6080,16112 9636,16112 9636,18525 7858,18525 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id12">
+        <rect class="BoundingBox" stroke="none" fill="none" x="6460" y="16492" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 8239,18906 L 6461,18906 6461,16493 10017,16493 10017,18906 8239,18906 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8239,18906 L 6461,18906 6461,16493 10017,16493 10017,18906 8239,18906 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="7149" y="17871"><tspan fill="rgb(0,0,0)" stroke="none">keepstore</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id13">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12556" y="15730" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14335,18144 L 12557,18144 12557,15731 16113,15731 16113,18144 14335,18144 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14335,18144 L 12557,18144 12557,15731 16113,15731 16113,18144 14335,18144 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id14">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12810" y="16111" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,18525 L 12811,18525 12811,16112 16367,16112 16367,18525 14589,18525 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,18525 L 12811,18525 12811,16112 16367,16112 16367,18525 14589,18525 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id15">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13191" y="16492" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14970,18906 L 13192,18906 13192,16493 16748,16493 16748,18906 14970,18906 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14970,18906 L 13192,18906 13192,16493 16748,16493 16748,18906 14970,18906 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="13671" y="17871"><tspan fill="rgb(0,0,0)" stroke="none">compute0...</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id16">
+        <rect class="BoundingBox" stroke="none" fill="none" x="15477" y="10143" width="5972" height="5972"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 15478,10144 L 21447,16113"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id17">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="6968" width="3" height="1527"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,6969 L 14589,8493"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id18">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7984" y="10270" width="3" height="5464"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 7985,10271 L 7985,15732"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id19">
+        <rect class="BoundingBox" stroke="none" fill="none" x="10016" y="17382" width="2543" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 10017,17383 L 12557,17383"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id20">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12047" y="13064" width="5210" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14652,14843 L 12048,14843 12048,13065 17255,13065 17255,14843 14652,14843 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14652,14843 L 12048,14843 12048,13065 17255,13065 17255,14843 14652,14843 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="12209" y="14126"><tspan fill="rgb(0,0,0)" stroke="none">crunch-dispatch-slurm</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id21">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="10143" width="3" height="2924"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,10144 L 14589,13065"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id22">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="14842" width="3" height="892"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,14843 L 14589,15732"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id23">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1582" y="12123" width="24872" height="107"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 1635,12176 L 1844,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 1978,12176 L 2187,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 2322,12176 L 2531,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 2665,12176 L 2874,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3009,12176 L 3218,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3352,12176 L 3561,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3696,12176 L 3904,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4039,12176 L 4248,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4383,12176 L 4591,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4726,12176 L 4935,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5069,12176 L 5278,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5413,12176 L 5622,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5756,12176 L 5965,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6100,12176 L 6309,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6443,12176 L 6652,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6787,12176 L 6995,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7130,12176 L 7339,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7473,12176 L 7682,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7817,12176 L 8026,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8160,12176 L 8369,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8504,12176 L 8713,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8847,12176 L 9056,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9191,12176 L 9399,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9534,12176 L 9743,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9878,12176 L 10086,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10221,12176 L 10430,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10564,12176 L 10773,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10908,12176 L 11117,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11251,12176 L 11460,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11595,12176 L 11804,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11938,12176 L 12147,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12282,12176 L 12490,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12625,12176 L 12834,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12969,12176 L 13177,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13312,12176 L 13521,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13655,12176 L 13864,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13999,12176 L 14208,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 14342,12176 L 14551,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 14686,12176 L 14895,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15029,12176 L 15238,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15373,12176 L 15581,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15716,12176 L 15925,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16059,12176 L 16268,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16403,12176 L 16612,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16746,12176 L 16955,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17090,12176 L 17299,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17433,12176 L 17642,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17777,12176 L 17986,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18120,12176 L 18329,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18464,12176 L 18672,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18807,12176 L 19016,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19150,12176 L 19359,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19494,12176 L 19703,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19837,12176 L 20046,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20181,12176 L 20390,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20524,12176 L 20733,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20868,12176 L 21076,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21211,12176 L 21420,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21555,12176 L 21763,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21898,12176 L 22107,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22241,12176 L 22450,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22585,12176 L 22794,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22928,12176 L 23137,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23272,12176 L 23481,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23615,12176 L 23824,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23959,12176 L 24167,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24302,12176 L 24511,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24645,12176 L 24854,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24989,12176 L 25198,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 25332,12176 L 25541,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 25676,12176 L 25885,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 26019,12176 L 26228,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 26363,12176 L 26400,12176"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id24">
+        <rect class="BoundingBox" stroke="none" fill="none" x="16366" y="9381" width="1273" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 16367,9382 L 17637,9382"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id25">
+        <rect class="BoundingBox" stroke="none" fill="none" x="22462" y="12936" width="3306" height="2417"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 L 22463,15049 C 22463,15213 23213,15351 24114,15351 25015,15351 25766,15213 25766,15049 L 25766,13238 C 25766,13074 25015,12937 24114,12937 L 24114,12937 Z M 22463,12937 L 22463,12937 Z M 25766,15351 L 25766,15351 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 L 22463,15049 C 22463,15213 23213,15351 24114,15351 25015,15351 25766,15213 25766,15049 L 25766,13238 C 25766,13074 25015,12937 24114,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22463,12937 L 22463,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 25766,15351 L 25766,15351 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 22463,13403 23213,13540 24114,13540 25015,13540 25766,13403 25766,13238 25766,13074 25015,12937 24114,12937 L 24114,12937 Z M 22463,12937 L 22463,12937 Z M 25766,15351 L 25766,15351 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 22463,13403 23213,13540 24114,13540 25015,13540 25766,13403 25766,13238 25766,13074 25015,12937 24114,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22463,12937 L 22463,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 25766,15351 L 25766,15351 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="23162" y="14466"><tspan fill="rgb(0,0,0)" stroke="none">git repos</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id26">
+        <rect class="BoundingBox" stroke="none" fill="none" x="23986" y="10270" width="3" height="2670"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 23987,10271 L 23987,12938"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id27">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="4301" width="3" height="1273"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,4302 L 14589,5572"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id28">
+        <rect class="BoundingBox" stroke="none" fill="none" x="9381" y="4809" width="3432" height="3686"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 12811,8493 L 9382,4810"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id29">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7984" y="4809" width="3" height="3813"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 7985,8620 L 7985,4810"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id30">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7350" y="3666" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="7956" y="4105"><tspan fill="rgb(0,0,0)" stroke="none">CLI user</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id31">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13319" y="3539" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="13831" y="3978"><tspan fill="rgb(0,0,0)" stroke="none">Web user</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id32">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5445" y="10651" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="5502" y="11090"><tspan fill="rgb(0,0,0)" stroke="none">Storage access</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id33">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1254" y="10524" width="2541" height="1398"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1783" y="10950"><tspan fill="rgb(0,0,0)" stroke="none">External </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1957" y="11344"><tspan fill="rgb(0,0,0)" stroke="none">facing </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1824" y="11738"><tspan fill="rgb(0,0,0)" stroke="none">services</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id34">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1123" y="12556" width="2811" height="1398"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1889" y="12982"><tspan fill="rgb(0,0,0)" stroke="none">Internal</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1756" y="13376"><tspan fill="rgb(0,0,0)" stroke="none">Services </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1106" y="13770"><tspan fill="rgb(0,0,0)" stroke="none">(private network)</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id35">
+        <rect class="BoundingBox" stroke="none" fill="none" x="17636" y="10525" width="3938" height="1017"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="17792" y="10957"><tspan fill="rgb(0,0,0)" stroke="none">Publish change events </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="18294" y="11351"><tspan fill="rgb(0,0,0)" stroke="none">over websockets</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id36">
+        <rect class="BoundingBox" stroke="none" fill="none" x="11508" y="10271" width="2855" height="1525"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11492" y="10760"><tspan fill="rgb(0,0,0)" stroke="none">Storage metadata,</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11801" y="11154"><tspan fill="rgb(0,0,0)" stroke="none">Compute jobs,</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11977" y="11548"><tspan fill="rgb(0,0,0)" stroke="none">Permissions</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id37">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5444" y="19033" width="5462" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="5526" y="19472"><tspan fill="rgb(0,0,0)" stroke="none">Content-addressed object storage</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id38">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12811" y="19033" width="4065" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="13074" y="19472"><tspan fill="rgb(0,0,0)" stroke="none">Elastic compute nodes</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id39">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1000" y="1127" width="5843" height="2033"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1190" y="2008"><tspan fill="rgb(0,0,0)" stroke="none">An Arvados cluster </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1595" y="2719"><tspan fill="rgb(0,0,0)" stroke="none">From 30000 feet</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id40">
+        <rect class="BoundingBox" stroke="none" fill="none" x="19795" y="15985" width="3814" height="3306"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 L 19796,18876 C 19796,19101 20662,19289 21701,19289 22740,19289 23607,19101 23607,18876 L 23607,16398 C 23607,16173 22740,15986 21701,15986 L 21701,15986 Z M 19796,15986 L 19796,15986 Z M 23607,19289 L 23607,19289 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 L 19796,18876 C 19796,19101 20662,19289 21701,19289 22740,19289 23607,19101 23607,18876 L 23607,16398 C 23607,16173 22740,15986 21701,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19796,15986 L 19796,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23607,19289 L 23607,19289 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 19796,16624 20662,16811 21701,16811 22740,16811 23607,16624 23607,16398 23607,16173 22740,15986 21701,15986 L 21701,15986 Z M 19796,15986 L 19796,15986 Z M 23607,19289 L 23607,19289 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 19796,16624 20662,16811 21701,16811 22740,16811 23607,16624 23607,16398 23607,16173 22740,15986 21701,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19796,15986 L 19796,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23607,19289 L 23607,19289 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="20377" y="18015"><tspan fill="rgb(0,0,0)" stroke="none">Postgres db</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id41">
+        <rect class="BoundingBox" stroke="none" fill="none" x="10016" y="9381" width="2924" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 10017,9382 L 12938,9382"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id42">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12810" y="8491" width="3559" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,10143 L 12811,10143 12811,8492 16367,8492 16367,10143 14589,10143 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,10143 L 12811,10143 12811,8492 16367,8492 16367,10143 14589,10143 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="14189" y="9489"><tspan fill="rgb(0,0,0)" stroke="none">API</tspan></tspan></tspan></text>
+       </g>
+      </g>
+     </g>
+    </g>
+   </g>
+  </g>
+ </g>
+</svg>
\ No newline at end of file
index 22e74ac561826f59b80380ceb338cc83d1d79092..ba5f433d29483c85240837af37c4d470dac0a98d 100644 (file)
@@ -44,29 +44,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
     </div>
     <div class="col-sm-6" style="border-left: solid; border-width: 1px">
-      <p><strong>Quickstart</strong> 
-      <p>
-        Try any pipeline from the <a href="https://cloud.curoverse.com/projects/public">list of public pipelines</a>. For instance, the <a href="http://curover.se/pathomap">Pathomap Pipeline</a> links to these <a href="https://dev.arvados.org/projects/arvados/wiki/pathomap_tutorial/">step-by-step instructions</a> for trying Arvados out right in your browser using Curoverse's <a href="http://lp.curoverse.com/beta-signup/">public Arvados instance</a>.
-      </p>
-        <!--<p>-->
-      <!--<ol>-->
-         <!--<li>-->
-           <!--Go to <a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}/</a>-->
-        <!--</li><li>-->
-          <!--Register with any Google account-->
-        <!--</li><li>-->
-        <!--Follow the Getting Started guide-->
-        <!--<br>-->
-        <!--<em>Tip: Don't see the guide? You can find it by clicking (in the upper-right corner) <span class="fa fa-lg fa-question-circle"></span> &gt; Getting Started)</em>-->
-        <!--</li>-->
-      <!--</ol>-->
-      <!--</p>-->
-      <p><strong>
-        Pipeline Developer Quickstart
-      </strong></p>
-      <p>
-      Want to port your pipeline to Arvados? Check out the step-by-step <a href="https://dev.arvados.org/projects/arvados/wiki/Port_a_Pipeline">Port-a-Pipeline</a> guide on the Arvados wiki.
-      </p>
       <p><strong>More in-depth guides
       </strong></p>
       <!--<p>-->
@@ -78,11 +55,17 @@ SPDX-License-Identifier: CC-BY-SA-3.0
       <p>
         <a href="{{ site.baseurl }}/sdk/index.html">SDK Reference</a> &mdash; Details about the accessing Arvados from various programming languages.
       </p>
+      <p>
+        <a href="{{ site.baseurl }}/architecture/index.html">Arvados Architecture</a> &mdash; Details about the the Arvados components and architecture.
+      </p>
       <p>
         <a href="{{ site.baseurl }}/api/index.html">API Reference</a> &mdash; Details about the the Arvados REST API.
       </p>
       <p>
-        <a href="{{ site.baseurl }}/install/index.html">Install Guide</a> &mdash; How to install Arvados on a cloud platform.
+        <a href="{{ site.baseurl }}/admin/index.html">Admin Guide</a> &mdash; Details about administering an Arvados cluster.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/install/index.html">Install Guide</a> &mdash; How to install Arvados.
       </p>
     </div>
   </div>
diff --git a/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid b/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
new file mode 100644 (file)
index 0000000..88b2d57
--- /dev/null
@@ -0,0 +1,62 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Google Kubernetes Engine
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+
+h3. Install tooling
+
+Install @gcloud@:
+
+* Follow the instructions at "https://cloud.google.com/sdk/downloads":https://cloud.google.com/sdk/downloads
+
+Install @kubectl@:
+
+<pre>
+$ gcloud components install kubectl
+</pre>
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Boot the GKE cluster
+
+This can be done via the "cloud console":https://console.cloud.google.com/kubernetes/ or via the command line:
+
+<pre>
+$ gcloud container clusters create <CLUSTERNAME> --zone us-central1-a --machine-type n1-standard-2 --cluster-version 1.10
+</pre>
+
+It takes a few minutes for the cluster to be initialized.
+
+h3. Reserve a static IP
+
+Reserve a "static IP":https://console.cloud.google.com/networking/addresses in GCE. Make sure the IP is in the same region as your GKE cluster, and is of the "Regional" type.
+
+h3. Connect to the GKE cluster.
+
+Via the web:
+* Click the "Connect" button next to your "GKE cluster"https://console.cloud.google.com/kubernetes/.
+* Execute the "Command-line access" command on your development machine.
+
+Alternatively, use this command:
+
+<pre>
+$ gcloud container clusters get-credentials <CLUSTERNAME> --zone us-central1-a --project <YOUR-PROJECT>
+</pre>
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid b/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
new file mode 100644 (file)
index 0000000..132b443
--- /dev/null
@@ -0,0 +1,34 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Minikube
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
+
+h3. Install tooling
+
+Install @kubectl@:
+
+* Follow the instructions at "https://kubernetes.io/docs/tasks/tools/install-kubectl/":https://kubernetes.io/docs/tasks/tools/install-kubectl/
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Install Minikube
+
+Follow the instructions at "https://kubernetes.io/docs/setup/minikube/":https://kubernetes.io/docs/setup/minikube/
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes.html.textile.liquid b/doc/install/arvados-on-kubernetes.html.textile.liquid
new file mode 100644 (file)
index 0000000..01999f0
--- /dev/null
@@ -0,0 +1,133 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados on Kubernetes is implemented as a Helm Chart.
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+h2(#overview). Overview
+
+This Helm Chart provides a basic, small Arvados cluster.
+
+Current limitations, to be addressed in the future:
+
+* An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down.
+* No dynamic scaling of compute nodes (but you can adjust @values.yaml@ and "reload the Helm Chart":#reload
+* All compute nodes are the same size
+* Compute nodes have no cpu/memory/disk constraints yet
+* No git server
+
+h2. Requirements
+
+* Kubernetes 1.10+ cluster with at least 3 nodes, 2 or more cores per node
+* @kubectl@ and @helm@ installed locally, and able to connect to your Kubernetes cluster
+
+If you do not have a Kubernetes cluster already set up, you can use "Google Kubernetes Engine":/install/arvados-on-kubernetes-GKE.html for multi-node development and testing or "another Kubernetes solution":https://kubernetes.io/docs/setup/pick-right-solution/. Minikube is not supported yet.
+
+h2(#helm). Initialize helm on the Kubernetes cluster
+
+If you already have helm running on the Kubernetes cluster, proceed directly to "Start the Arvados cluster":#Start below.
+
+<pre>
+$ helm init
+$ kubectl create serviceaccount --namespace kube-system tiller
+$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+$ kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+</pre>
+
+Test @helm@ by running
+
+<pre>
+$ helm ls
+</pre>
+
+There should be no errors. The command will return nothing.
+
+h2(#git). Clone the repository
+
+Clone the repository and nagivate to the @arvados-kubernetes/charts/arvados@ directory:
+
+<pre>
+$ git clone https://github.com/curoverse/arvados-kubernetes.git
+$ cd arvados-kubernetes/charts/arvados
+</pre>
+
+h2(#Start). Start the Arvados cluster
+
+Next, determine the IP address that the Arvados cluster will use to expose its API, Workbench, etc. If you want this Arvados cluster to be reachable from places other than the local machine, the IP address will need to be routable as appropriate.
+
+<pre>
+$ ./cert-gen.sh <IP ADDRESS>
+</pre>
+
+The @values.yaml@ file contains a number of variables that can be modified. At a minimum, review and/or modify the values for
+
+<pre>
+  adminUserEmail
+  adminUserPassword
+  superUserSecret
+  anonymousUserSecret
+</pre>
+
+Now start the Arvados cluster:
+
+<pre>
+$ helm install --name arvados . --set externalIP=<IP ADDRESS>
+</pre>
+
+At this point, you can use kubectl to see the Arvados cluster boot:
+
+<pre>
+$ kubectl get pods
+$ kubectl get svc
+</pre>
+
+After a few minutes, you can access Arvados Workbench at the IP address specified
+
+* https://&lt;IP ADDRESS&gt;
+
+with the username and password specified in the @values.yaml@ file.
+
+Alternatively, use the Arvados cli tools or SDKs:
+
+Set the environment variables:
+
+<pre>
+$ export ARVADOS_API_TOKEN=<superUserSecret from values.yaml>
+$ export ARVADOS_API_HOST=<STATIC IP>:444
+$ export ARVADOS_API_HOST_INSECURE=true
+</pre>
+
+Test access with:
+
+<pre>
+$ arv user current
+</pre>
+
+h2(#reload). Reload
+
+If you make changes to the Helm Chart (e.g. to @values.yaml@), you can reload Arvados with
+
+<pre>
+$ helm upgrade arvados .
+</pre>
+
+h2. Shut down
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and <strong>all data stored on the Arvados cluster will be deleted</strong> when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+<pre>
+$ helm del arvados --purge
+</pre>
index bc9d164a083c5cd7de8b7aa0efad001a8b131de5..afff1f45424ca9e29272afe158782dcc09c597fb 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
-navsection: installguide
-title: Cheat Sheet
+navsection: admin
+title: User management
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
index 4ab6fcf5fa9bf3de1802a6f5e971ea3b8e746da0..8a0e7bfa077743b30329cd619ef4da6d1228c172 100644 (file)
@@ -9,11 +9,19 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-As an alternative to local and network-attached POSIX filesystems, Keepstore can store data in an Azure Storage container.
+Keepstore can store data in one or more Azure Storage containers.
 
-h2. Create a container
+h2. Set up VMs and Storage Accounts
 
-Normally, all keepstore services are configured to share a single Azure Storage container.
+Before starting the configuration of individual keepstore servers is good to have an idea of the keepstores servers' final layout. One key decision is the amount of servers and type of VM to run. Azure may change over time the bandwith capacity of each type. After conducting some empirical saturation tests, the conclusion was that the bandwith is proportional to the amount of cores with some exceptions. As a rule of thumb, is better to invest resources in more cores instead of memory or IOps.
+
+Another decision is how many VMs should be running keepstore. For example there could be 8 VMs with one core each or one machine with 8 cores. Or anything in between. Assuming is the same cost for Cloud resources, there is always the benefit of distributing the risk of faulty VMs. The recommendation is to start with 2 VMs and expand in pairs. Having a minimum of 2 cores each. The total amount of VMs will be a function of the budget and the pipeline traffic to avoid saturation during periods of high usage. Standard D v3 family is a balanced choice, making Standard_D2_v3 the 2-core option
+
+There are many options for storage accounts. You can read details from Azure on their documentation https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction. The type of storage and access tier will be a function of the budget and desired responsiveness. A balanced option is to have General-purpose Standard Storage account and use Blob storage, hot access tiers.
+
+Keepstore can be configure to reflect the level of underlaying redundancy the storage will have. This is call data replication option. For example LRS (Locally Redundant Storage) saves 3 copies of the data. There desired redundancy can be chosen at the keepstore layer or at the Storage Accunt layer. The decision where the redundancy will be done and the type of Storage Account data replication (LRS, ZRS, GRS and RA-GRS) has trade-offs. Please read more on https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy and decide what is best for your needs.
+
+h2. Create a storage container
 
 Using the Azure web portal or command line tool, create or choose a storage account with a suitable redundancy profile and availability region. Use the storage account keys to create a new container.
 
@@ -34,34 +42,72 @@ azure storage container create exampleContainerName</span>
 </code></pre>
 </notextile>
 
+Note that Keepstore services may be configued to use multiple Azure Storage accounts and multiple containers within a storage account.
+
 h2. Configure keepstore
 
 Copy the primary storage account key to a file where it will be accessible to keepstore at startup time.
 
 <notextile>
-<pre><code>~$ <span class="userinput">sudo sh -c 'cat &gt;/etc/sv/keepstore/exampleStorageAccountName.key &lt;&lt;EOF'
+<pre><code>~$ <span class="userinput">sudo sh -c 'cat &gt;/etc/arvados/keepstore/azure_storage_account_key.txt &lt;&lt;EOF'
 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
 EOF</span>
-~$ <span class="userinput">sudo chmod 0400 /etc/sv/keepstore/exampleStorageAccountName.key</span>
+~$ <span class="userinput">sudo chmod 0400 /etc/arvados/keepstore/azure_storage_account_key.txt</span>
 </code></pre>
 </notextile>
 
-In your keepstore startup script, instead of specifying a local storage using @-volume /path@ or discovering mount points automatically, use @-azure-*@ arguments to specify the storage container:
+Next, edit the @Volumes@ section of the @keepstore.yml@ config file:
 
-<notextile>
-<pre><code>#!/bin/sh
+<pre>
+Volumes:
+- # The volume type, this indicates Azure blob storage
+  Type: Azure
 
-exec 2&gt;&amp;1
-exec keepstore \
- -azure-storage-account-key-file <span class="userinput">/etc/sv/keepstore/exampleStorageAccountName.key</span> \
- -azure-storage-account-name <span class="userinput">exampleStorageAccountName</span> \
- -azure-storage-container-volume <span class="userinput">exampleContainerName</span>
-</code></pre>
-</notextile>
+  # How much replication is performed by the underlying container.
+  # This is used to inform replication decisions at the Keep layer.
+  AzureReplication: 3
 
-Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
+  # The storage container to use for the backing store.
+  ContainerName: exampleContainerName
 
-<notextile>
-<pre><code>2015/10/26 21:06:24 Using volume azure-storage-container:"exampleContainerName" (writable=true)
-</code></pre>
-</notextile>
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # Amount of time to wait for a response before failing the request
+  RequestTimeout: 2m0s
+
+  # The storage account name, used for authentication
+  StorageAccountName: exampleStorageAccountName
+
+  # The storage account secret key, used for authentication
+  StorageAccountKeyFile: /etc/arvados/keepstore/azure_storage_account_key.txt
+
+  # The cloud environment to use.  If blank, use the default cloud
+  # environment.  See below for an example of an alternate cloud environment.
+  StorageBaseURL: ""
+
+  # Storage classes to associate with this volume.  See "Storage
+  # classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+- # Example configuration to use Azure China.
+  #
+  # The alternate cloud environment to use.
+  # Note that cloud environments are different from regions.  A
+  # cloud environment is an entirely separate instance of Azure with
+  # separate accounts, requiring separate credentials.
+  #
+  StorageBaseURL: core.chinacloudapi.cn
+  StorageAccountKeyFile: /etc/arvados/keepstore/azure_cn_storage_account_key.txt
+  StorageAccountName: cn-account-name
+  ContainerName: exampleChinaContainerName
+
+  # The rest are the same as above
+  Type: Azure
+  AzureReplication: 3
+  ReadOnly: false
+  RequestTimeout: 10m0s
+  StorageClasses: null
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
diff --git a/doc/install/configure-fs-storage.html.textile.liquid b/doc/install/configure-fs-storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..ddd54c3
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: installguide
+title: Filesystem storage
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore can store data in local and network-attached POSIX filesystems.
+
+h2. Setting up filesystem mounts
+
+Volumes are configured in the @Volumes@ section of the configuration file.  You may provide multiple volumes for a single keepstore process to manage multiple disks.  Keepstore distributes blocks among volumes in round-robin fashion.
+
+<pre>
+Volumes:
+- # The volume type, indicates this is a filesystem directory.
+  Type: Directory
+
+  # The directory that will be used as the backing store.
+  Root: /mnt/local-disk
+
+  # How much replication is performed by the underlying filesystem.
+  # (for example, a network filesystem may provide its own replication).
+  # This is used to inform replication decisions at the Keep layer.
+  DirectoryReplication: 1
+
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # When true, read and write operations (for whole 64MiB blocks) on
+  # an individual volume will queued and issued serially.  When
+  # false, read and write operations will be issued concurrently.
+  #
+  # May improve throughput if you experience contention when there are
+  # multiple requests to the same volume.
+  #
+  # When using SSDs, RAID, or a parallel network filesystem, you probably
+  # don't want this.
+  Serialize: false
+
+  # Storage classes to associate with this volume.  See "Storage
+  # classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+  # Example of a second volume section
+- DirectoryReplication: 2
+  ReadOnly: false
+  Root: /mnt/network-disk
+  Serialize: false
+  StorageClasses: null
+  Type: Directory
+</pre>
diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..88172fa
--- /dev/null
@@ -0,0 +1,112 @@
+---
+layout: default
+navsection: installguide
+title: Configure S3 object storage
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, or Ceph RADOS.
+
+h2. Configure keepstore
+
+Copy the "access key" and "secret key" to files where they will be accessible to keepstore at startup time.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sh -c 'cat &gt;/etc/arvados/keepstore/aws_s3_access_key.txt &lt;&lt;EOF'
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+EOF</span>
+~$ <span class="userinput">sudo chmod 0400 /etc/arvados/keepstore/aws_s3_access_key.txt</span>
+</code></pre>
+</notextile>
+
+Next, edit the @Volumes@ section of the @keepstore.yml@ config file.
+
+h3. Example config for Amazon S3
+
+<pre>
+Volumes:
+- # The volume type, this indicates object storage compatible with the S3 API
+  Type: S3
+
+  # Storage provider.  If blank, uses Amazon S3 by default.
+  # See below for example alternate configuration for Google cloud
+  # storage.
+  Endpoint: ""
+
+  # The bucket to use for the backing store.
+  Bucket: example-bucket-name
+
+  # The region where the bucket is located.
+  Region: us-east-1
+
+  # The credentials to use to access the bucket.
+  AccessKeyFile: /etc/arvados/keepstore/aws_s3_access_key.txt
+  SecretKeyFile: /etc/arvados/keepstore/aws_s3_secret_key.txt
+
+  # Maximum time to wait making the initial connection to the backend before
+  # failing the request.
+  ConnectTimeout: 1m0s
+
+  # Page size for s3 "list bucket contents" requests
+  IndexPageSize: 1000
+
+  # True if the region requires a LocationConstraint declaration
+  LocationConstraint: false
+
+  # Maximum eventual consistency latency
+  RaceWindow: 24h0m0s
+
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # Maximum time to wait for a complete response from the backend before
+  # failing the request.
+  ReadTimeout: 2m0s
+
+  # How much replication is performed by the underlying bucket.
+  # This is used to inform replication decisions at the Keep layer.
+  S3Replication: 2
+
+  # Storage classes to associate with this volume.  See
+  # "Storage classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+  # Enable deletion (garbage collection) even when TrashLifetime is
+  # zero.  WARNING: eventual consistency may result in race conditions
+  # that can cause data loss.  Do not enable this unless you know what
+  # you are doing.
+  UnsafeDelete: false
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
+
+h3. Example config for Google cloud storage
+
+See previous section for documentation of configuration fields.
+
+<pre>
+Volumes:
+- # Example configuration using alternate storage provider
+  # Configuration for Google cloud storage
+  Endpoint: https://storage.googleapis.com
+  Region: ""
+
+  AccessKeyFile: /etc/arvados/keepstore/gce_s3_access_key.txt
+  SecretKeyFile: /etc/arvados/keepstore/gce_s3_secret_key.txt
+  Bucket: example-bucket-name
+  ConnectTimeout: 1m0s
+  IndexPageSize: 1000
+  LocationConstraint: false
+  RaceWindow: 24h0m0s
+  ReadOnly: false
+  ReadTimeout: 2m0s
+  S3Replication: 2
+  StorageClasses: null
+  UnsafeDelete: false
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
diff --git a/doc/install/create-standard-objects.html.textile.liquid b/doc/install/create-standard-objects.html.textile.liquid
deleted file mode 100644 (file)
index 8ac3fb0..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Create standard objects
-
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-In these steps we use the Arvados CLI tools on the <strong>shell server</strong> to create a few Arvados objects. The CLI tools require an ARVADOS_API_TOKEN environment variable with a valid admin token. If you haven't already done so, set that up as shown in the "API token guide":../user/reference/api-tokens.html.
-
-h3. Arvados repository
-
-Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
-
-<notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
-~$ <span class="userinput">all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"</span>
-~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
-~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
-</code></pre></notextile>
-
-Create a link object to make the repository object readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
-
-<notextile>
-<pre><code>~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
-<span class="userinput">{
- "tail_uuid":"$all_users_group_uuid",
- "head_uuid":"$repo_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF</span>
-</code></pre></notextile>
-
-In a couple of minutes, your arvados-git-sync cron job will create an empty repository on your git server. Seed it with the real arvados repository. If your git credential helpers were configured correctly when you "set up your shell server":install-shell-server.html, the "git push" command will use your API token instead of prompting you for a username and password.
-
-<notextile>
-<pre><code>~$ <span class="userinput">cd /tmp</span>
-/tmp$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git</span>
-/tmp <span class="userinput">git --git-dir arvados.git push https://git.<b>uuid_prefix.your.domain</b>/arvados.git '*:*'</span>
-</code></pre>
-</notextile>
-
-If you did not set up a HTTPS service, you can push to <code>git@git.uuid_prefix.your.domain:arvados.git</code> using your SSH key, or by logging in to your git server and using sudo.
-
-<notextile>
-<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
-git@gitserver:~$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git /tmp/arvados.git</span>
-git@gitserver:~$ <span class="userinput">cd /tmp/arvados.git</span>
-git@gitserver:/tmp/arvados.git$ <span class="userinput">gitolite push /var/lib/arvados/git/repositories/<b>your_arvados_repo_uuid</b>.git '*:*'</span>
-</code></pre>
-</notextile>
-
-h3. Default project for docker images
-
-Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
-
-<notextile>
-<pre><code>~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
-~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
-~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
-<span class="userinput">{
- "tail_uuid":"$all_users_group_uuid",
- "head_uuid":"$project_uuid",
- "link_class":"permission",
- "name":"can_read"
-}
-EOF</span>
-</code></pre></notextile>
-
-h3. Download and tag the latest arvados/jobs docker image
-
-The @arvados-cwl-runner@ needs access to an arvados/jobs image that is tagged as 'latest'. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.
-
-<notextile>
-<pre><code>~$ <span class="userinput">arv-keepdocker --pull arvados/jobs latest</span>
-</code></pre></notextile>
-
-If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
index 9784266e68b4df509e0ce55f3db8118bbd77d99e..4b3f4ec0b01fe016def2d2dbaf7e92e95b04787f 100644 (file)
@@ -48,7 +48,7 @@ Set up crunch-dispatch-slurm's configuration directory:
 
 <notextile>
 <pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
-~$ <span class="userinput">sudo install -d -o -root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
+~$ <span class="userinput">sudo install -d -o root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
 </code></pre>
 </notextile>
 
@@ -63,7 +63,7 @@ Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authentic
 
 This is the only configuration required by crunch-dispatch-slurm.  The subsections below describe optional configuration flags you can set inside the main configuration object.
 
-h3. Client::KeepServiceURIs
+h3(#KeepServiceURIs). Client::KeepServiceURIs
 
 Override Keep service discovery with a predefined list of Keep URIs. This can be useful if the compute nodes run a local keepstore that should handle all Keep traffic. Example:
 
@@ -76,7 +76,7 @@ Override Keep service discovery with a predefined list of Keep URIs. This can be
 </code></pre>
 </notextile>
 
-h3. PollPeriod
+h3(#PollPeriod). PollPeriod
 
 crunch-dispatch-slurm polls the API server periodically for new containers to run.  The @PollPeriod@ option controls how often this poll happens.  Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@.  For example:
 
@@ -85,7 +85,7 @@ crunch-dispatch-slurm polls the API server periodically for new containers to ru
 </code></pre>
 </notextile>
 
-h3. PrioritySpread
+h3(#PrioritySpread). PrioritySpread
 
 crunch-dispatch-slurm adjusts the "nice" values of its SLURM jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
 * If non-Arvados jobs run on your SLURM cluster, and your Arvados containers are waiting too long in the SLURM queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
@@ -99,11 +99,9 @@ The smallest usable value is @1@. The default value of @10@ is used if this opti
 </code></pre>
 </notextile>
 
+h3(#SbatchArguments). SbatchArguments
 
-
-h3. SbatchArguments
-
-When crunch-dispatch-slurm invokes @sbatch@, you can add switches to the command by specifying @SbatchArguments@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @SbatchArguments@ to an array of strings.  For example:
+When crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @SbatchArguments@ to an array of strings.  For example:
 
 <notextile>
 <pre><code class="userinput">SbatchArguments:
@@ -111,7 +109,9 @@ When crunch-dispatch-slurm invokes @sbatch@, you can add switches to the command
 </code></pre>
 </notextile>
 
-h3. CrunchRunCommand: Dispatch to SLURM cgroups
+Note: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line.  Arguments specified through Arvados are added after the arguments listed in SbatchArguments.  This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments.  As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy.
+
+h3(#CrunchRunCommand-cgroups). CrunchRunCommand: Dispatch to SLURM cgroups
 
 If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups.  This provides consistent enforcement of resource constraints.  To do this, use a crunch-dispatch-slurm configuration like the following:
 
@@ -122,7 +122,7 @@ If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunc
 </code></pre>
 </notextile>
 
-The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected.  The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM.
+The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected.  The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM.  When doing this, you should also set "ReserveExtraRAM":#ReserveExtraRAM .
 
 {% include 'notebox_begin' %}
 
@@ -132,7 +132,7 @@ You can work around this issue by disabling the Docker daemon's systemd integrat
 
 {% include 'notebox_end' %}
 
-h3. CrunchRunCommand: Using host networking for containers
+h3(#CrunchRunCommand-network). CrunchRunCommand: Using host networking for containers
 
 Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups.  This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net".   If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster.  Be aware this reduces container isolation, which may be a security risk.
 
@@ -144,7 +144,7 @@ Older Linux kernels (prior to 3.18) have bugs in network namespace handling whic
 </code></pre>
 </notextile>
 
-h3. MinRetryPeriod: Rate-limit repeated attempts to start containers
+h3(#MinRetryPeriod). MinRetryPeriod: Rate-limit repeated attempts to start containers
 
 If SLURM is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to SLURM more than once in the given time span.
 
@@ -153,6 +153,15 @@ If SLURM is unable to run a container, the dispatcher will submit it again after
 </code></pre>
 </notextile>
 
+h3(#ReserveExtraRAM). ReserveExtraRAM: Extra RAM for jobs
+
+Extra RAM to reserve (in bytes) on each SLURM job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@.  If not provided, the default value is zero.  Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process.  In this situation, at least 256MiB is recommended to accomodate each container's @crunch-run@ and @arv-mount@ processes.
+
+<notextile>
+<pre><code class="userinput">ReserveExtraRAM: <b>268435456</b>
+</code></pre>
+</notextile>
+
 h2. Restart the dispatcher
 
 {% include 'notebox_begin' %}
index c69d18b8e4bd2b0b8e3a19802982fdc284eb0e42..e1593a430a9f89b369e1c67e73f41a6705aa6ce4 100644 (file)
@@ -9,8 +9,6 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-h2(#slurm). Set up SLURM
-
 On the API server, install SLURM and munge, and generate a munge key.
 
 On Debian-based systems:
index a9b2971087ea46ceb4cf71afd11f40bd36c58159..216810de47174a32d5e913c5faebda29ee90a8b8 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Installation overview
+title: Installation options
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -9,9 +9,21 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados components run on GNU/Linux systems, and do not depend on any particular cloud operating stack.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
+Arvados components run on GNU/Linux systems, and supports multiple cloud operating stacks.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
 
-Arvados components can be installed and configured in a number of different ways.  Step-by-step instructions are available to perform a production installation from packages with manual configuration.  This method assumes you have several (virtual) machines at your disposal for running the various Arvados components.
+Arvados components can be installed and configured in a number of different ways.
 
-* "Docker quick start":arvbox.html
-* "Manual installation":install-manual-prerequisites.html
+<div class="offset1">
+table(table table-bordered table-condensed).
+|||\5=. Appropriate for|
+||_. Ease of setup|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados System Testing|
+|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
+|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
+|"Manual installation":install-manual-prerequisites.html|Complicated|yes|yes|yes|no|no|
+|"Cloud demo":https://cloud.curoverse.com by Veritas Genetics|N/A ^3^|yes|yes|no|no|no|
+|"Cluster Operation Subscription":https://curoverse.com/products by Veritas Genetics|N/A ^3^|yes|yes|yes|yes|yes|
+</div>
+
+* ^1^ Assumes a Kubernetes cluster is available
+* ^2^ Arvados on Kubernetes is under development and not yet ready for production use
+* ^3^ No installation necessary, Veritas Genetics run and managed
index 2a4d103c7bfd84ea9ecead8515715edd664fcd4d..7f39bf51d2ebafcde977f03890a56d75c49943ee 100644 (file)
@@ -19,6 +19,7 @@ The git hosting setup involves three components.
 It is not strictly necessary to deploy _both_ SSH and HTTPS access, but we recommend deploying both:
 * SSH is a more appropriate way to authenticate from a user's workstation because it does not require managing tokens on the client side;
 * HTTPS is a more appropriate way to authenticate from a shell VM because it does not depend on SSH agent forwarding (SSH clients' agent forwarding features tend to behave as if the remote machine is fully trusted).
+* HTTPS is also used by Arvados Composer to access git repositories from the browser.
 
 The HTTPS instructions given below will not work if you skip the SSH setup steps.
 
@@ -338,3 +339,47 @@ Restart Nginx to make the Nginx and API server configuration changes take effect
 <pre><code>gitserver:~$ <span class="userinput">sudo nginx -s reload</span>
 </code></pre>
 </notextile>
+
+h2. Clone Arvados repository
+
+Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
+
+<notextile>
+<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+~$ <span class="userinput">all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"</span>
+~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
+~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
+</code></pre></notextile>
+
+Create a link object to make the repository object readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
+
+<notextile>
+<pre><code>~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+In a couple of minutes, your arvados-git-sync cron job will create an empty repository on your git server. Seed it with the real arvados repository. If your git credential helpers were configured correctly when you "set up your shell server":install-shell-server.html, the "git push" command will use your API token instead of prompting you for a username and password.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd /tmp</span>
+/tmp$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git</span>
+/tmp <span class="userinput">git --git-dir arvados.git push https://git.<b>uuid_prefix.your.domain</b>/arvados.git '*:*'</span>
+</code></pre>
+</notextile>
+
+If you did not set up a HTTPS service, you can push to <code>git@git.uuid_prefix.your.domain:arvados.git</code> using your SSH key, or by logging in to your git server and using sudo.
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
+git@gitserver:~$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git /tmp/arvados.git</span>
+git@gitserver:~$ <span class="userinput">cd /tmp/arvados.git</span>
+git@gitserver:/tmp/arvados.git$ <span class="userinput">gitolite push /var/lib/arvados/git/repositories/<b>your_arvados_repo_uuid</b>.git '*:*'</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/install-components.html.textile.liquid b/doc/install/install-components.html.textile.liquid
new file mode 100644 (file)
index 0000000..b21c4bd
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: installguide
+title: Choosing which components to install
+...
+
+Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.)  It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture to understand how these components interact.
+
+table(table table-bordered table-condensed).
+|\3=. *Core*|
+|"Postgres database":install-postgresql.html |Stores data for the API server.|Required.|
+|"API server":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|
+|\3=. *Keep (storage)*|
+|"Keepstore":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.|
+|"Keepproxy":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.|
+|"Keep-web":install-keep-web.html |Gateway service providing read/write HTTP and WebDAV support on top of Keep.|Required to be able to download files from Keep over plain HTTP in Workbench.|
+|"Keep-balance":install-keep-balance.html |Storage cluster maintenance daemon responsible for moving blocks to their optimal server location, adjusting block replication levels, and trashing unreferenced blocks.|Required to free deleted data from underlying storage, and to ensure proper replication and block distribution (including support for storage classes).|
+|\3=. *User interface*|
+|"Single Sign On server":install-sso.html |Login server.|Required for web based login to Workbench.|
+|"Workbench":install-workbench-app.html |Primary graphical user interface for working with file collections and running containers.|Optional.  Depends on API server, SSO server, keep-web, websockets server.|
+|"Workflow Composer":install-composer.html |Graphical user interface for editing Common Workflow Language workflows.|Optional.  Depends on git server (arv-git-httpd).|
+|\3=. *Additional services*|
+|"Websockets server":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.|
+|"Shell server":install-shell-server.html |Synchronize (create/delete/configure) Unix shell accounts with Arvados users.|Optional.|
+|"Git server":install-arv-git-httpd.html |Arvados-hosted git repositories, with Arvados-token based authentication.|Optional, but required by Workflow Composer.|
+|\3=. *Crunch (running containers)*|
+|"crunch-dispatch-slurm":crunch2-slurm/install-prerequisites.html |Run analysis workflows using Docker containers distributed across a SLURM cluster.|Optional if you wish to use Arvados for data management only.|
+|"Node Manager":install-nodemanager.html |Allocate and free cloud VM instances on demand based on workload.|Optional, not needed for a static SLURM cluster (such as on-premise HPC).|
diff --git a/doc/install/install-composer.html.textile.liquid b/doc/install/install-composer.html.textile.liquid
new file mode 100644 (file)
index 0000000..9bd25ed
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: installguide
+title: Install Composer
+...
+
+Arvados Composer is a single-page javascript application for building Common Workflow Languge (CWL) Workflows.
+
+h2. Prerequisites
+
+In addition to Arvados core services, Composer requires "Arvados hosted git repositories":install-arv-git-httpd.html which are used for storing workflow files.
+
+h2. Install
+
+Composer may be installed on the same host as Workbench, or on a different host.  Composer communicates directly with the Arvados API server.  It does not require its own backend and should be served as a static file.
+
+On a Debian-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-composer</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-composer</span>
+</code></pre>
+</notextile>
+
+h2. Configure
+
+h3. composer.yml
+
+Edit @/etc/arvados/composer/composer.yml@ and set @apiEndPoint@ to your API server:
+
+<pre>
+apiEndPoint: https://zzzzz.arvadosapi.com
+</pre>
+
+h3. Nginx
+
+Add Composer to your Nginx configuration.  This example will host Composer at @/composer@.
+
+<pre>
+location /composer {
+  root   /var/www/arvados-composer
+  index  index.html
+}
+</pre>
+
+h3. Workbench link to composer
+
+Edit the workbench @application.yml@ and set @composer_url@ to the location from which it is served.
+
+<pre>
+production:
+  composer_url: 'https://workbench.zzzzz.arvadosapi.com/composer'
+</pre>
index 4c735a1eec1ec286b2652f6ee5282920c48cc797..3a8dce078dd092bfe687639f912415b2553bf14c 100644 (file)
@@ -57,12 +57,7 @@ h3. Create a keep-balance token
 
 Create an Arvados superuser token for use by keep-balance. *On the API server*, run:
 
-<notextile>
-<pre><code>apiserver:~$ <span class="userinput">cd /var/www/arvados-api/current</span>
-apiserver:/var/www/arvados-api/current$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production bundle exec script/create_superuser_token.rb</span>
-zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
-</code></pre>
-</notextile>
+{% include 'create_superuser_token' %}
 
 h3. Update keepstore configuration files
 
index fe690a5eda8880b67f21fca6c2242e8bf62afead..9f580c0f8b2af0f0244c1ae1570c4346d33cd6ac 100644 (file)
@@ -103,7 +103,18 @@ Note: if the Web uploader is failing to upload data and there are no logs from k
 
 h3. Tell the API server about the Keepproxy server
 
-The API server needs to be informed about the presence of your Keepproxy server. Please execute the following commands on your <strong>shell server</strong>.
+The API server needs to be informed about the presence of your Keepproxy server.
+
+First, if you don't already have an admin token, create a superuser token:
+
+{% include 'create_superuser_token' %}
+
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
 
 <notextile>
 <pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
@@ -117,3 +128,13 @@ The API server needs to be informed about the presence of your Keepproxy server.
 }
 EOF</span>
 </code></pre></notextile>
+
+h3. Testing keepproxy
+
+Log into a host that is on an external network from your private Arvados network.  The host should be able to contact your keepproxy server (eg keep.$uuid_prefix.arvadosapi.com), but not your keepstore servers (eg keep[0-9].$uuid_prefix.arvadosapi.com).
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections, for an example see "Testing keep.":install-keepstore.html#testing on the keepstore install page.
index 2b4ee930fa8d0fc99532c24ad8aa43db12b227ee..64a710f9126fe7aa905817b3fb1fae162407a603 100644 (file)
@@ -9,7 +9,11 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-We are going to install two Keepstore servers. By convention, we use the following hostname pattern:
+Keepstore provides access to underlying storage for reading and writing content-addressed blocks, with enforcement of Arvados permissions.  Keepstore supports a variety of cloud object storage and POSIX filesystems for its backing store.
+
+We recommend starting off with two Keepstore servers.  Exact server specifications will be site and workload specific, but in general keepstore will be I/O bound and should be set up to maximize aggregate bandwidth with compute nodes.  To increase capacity (either space or throughput) it is straightforward to add additional servers, or (in cloud environments) to increase the machine size of the existing servers.
+
+By convention, we use the following hostname pattern:
 
 <div class="offset1">
 table(table table-bordered table-condensed).
@@ -18,7 +22,7 @@ table(table table-bordered table-condensed).
 |keep1.@uuid_prefix@.your.domain|
 </div>
 
-Because the Keepstore servers are not directly accessible from the internet, these hostnames only need to resolve on the local network.
+Keepstore servers should not be directly accessible from the Internet (they are accessed via "keepproxy":install-keepproxy.html), so the hostnames only need to resolve on the private network.
 
 h2. Install Keepstore
 
@@ -39,150 +43,207 @@ On Red Hat-based systems:
 Verify that Keepstore is functional:
 
 <notextile>
-<pre><code>~$ <span class="userinput">keepstore -h</span>
-2016/07/01 14:06:21 keepstore starting, pid 32339
-Usage of ./keepstore:
-  -azure-max-get-bytes int
-       Maximum bytes to request in a single GET request. If smaller than 67108864, use multiple concurrent range requests to retrieve a block. (default 67108864)
-  -azure-storage-account-key-file string
-       File containing the account key used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-account-name string
-       Azure storage account name used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-container-volume value
-       Use the given container as a storage volume. Can be given multiple times. (default [])
-  -azure-storage-replication int
-       Replication level to report to clients when data is stored in an Azure container. (default 3)
-  -blob-signature-ttl int
-       Lifetime of blob permission signatures in seconds. Modifying the ttl will invalidate all existing signatures. See services/api/config/application.default.yml. (default 1209600)
-  -blob-signing-key-file string
-       File containing the secret key for generating and verifying blob permission signatures.
-  -data-manager-token-file string
-       File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
-  -enforce-permissions
-       Enforce permission signatures on requests.
-  -listen string
-       Listening address, in the form "host:port". e.g., 10.0.1.24:8000. Omit the host part to listen on all interfaces. (default ":25107")
-  -max-buffers int
-       Maximum RAM to use for data buffers, given in multiples of block size (64 MiB). When this limit is reached, HTTP requests requiring buffers (like GET and PUT) will wait for buffer space to be released. (default 128)
-  -max-requests int
-       Maximum concurrent requests. When this limit is reached, new requests will receive 503 responses. Note: this limit does not include idle connections from clients using HTTP keepalive, so it does not strictly limit the number of concurrent connections. (default 2 * max-buffers)
-  -never-delete
-       If true, nothing will be deleted. Warning: the relevant features in keepstore and data manager have not been extensively tested. You should leave this option alone unless you can afford to lose data. (default true)
-  -permission-key-file string
-       Synonym for -blob-signing-key-file.
-  -permission-ttl int
-       Synonym for -blob-signature-ttl.
-  -pid fuser -k pidfile
-       Path to write pid file during startup. This file is kept open and locked with LOCK_EX until keepstore exits, so fuser -k pidfile is one way to shut down. Exit immediately if there is an error opening, locking, or writing the pid file.
-  -readonly
-       Do not write, delete, or touch anything on the following volumes.
-  -s3-access-key-file string
-       File containing the access key used for subsequent -s3-bucket-volume arguments.
-  -s3-bucket-volume value
-       Use the given bucket as a storage volume. Can be given multiple times. (default [])
-  -s3-endpoint string
-       Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use "https://storage.googleapis.com".
-  -s3-region string
-       AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are ["ap-southeast-1" "eu-west-1" "us-gov-west-1" "sa-east-1" "cn-north-1" "ap-northeast-1" "ap-southeast-2" "eu-central-1" "us-east-1" "us-west-1" "us-west-2"].
-  -s3-replication int
-       Replication level reported to clients for subsequent -s3-bucket-volume arguments. (default 2)
-  -s3-secret-key-file string
-       File containing the secret key used for subsequent -s3-bucket-volume arguments.
-  -s3-unsafe-delete
-       EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.
-  -serialize
-       Serialize read and write operations on the following volumes.
-  -trash-check-interval duration
-       Time duration at which the emptyTrash goroutine will check and delete expired trashed blocks. Default is one day. (default 24h0m0s)
-  -trash-lifetime duration
-       Time duration after a block is trashed during which it can be recovered using an /untrash request
-  -volume value
-       Local storage directory. Can be given more than once to add multiple directories. If none are supplied, the default is to use all directories named "keep" that exist in the top level directory of a mount point at startup time. Can be a comma-separated list, but this is deprecated: use multiple -volume arguments instead. (default [])
-  -volumes value
-       Deprecated synonym for -volume. (default [])
+<pre><code>~$ <span class="userinput">keepstore --version</span>
 </code></pre>
 </notextile>
 
-h3. Prepare storage volumes
-
-{% include 'notebox_begin' %}
-This section uses a local filesystem as a backing store. If you are using Azure Storage, follow the setup instructions on the "Azure Blob Storage":configure-azure-blob-storage.html page instead.
-{% include 'notebox_end' %}
-
-There are two ways to specify a set of local directories where keepstore should store its data files.
-# Implicitly, by creating a directory called @keep@ at the top level of each filesystem you intend to use, and omitting @-volume@ arguments.
-# Explicitly, by providing a @-volume@ argument for each directory.
-
-For example, if there are filesystems mounted at @/mnt@ and @/mnt2@:
-
-<notextile>
-<pre><code>~$ <span class="userinput">mkdir /mnt/keep /mnt2/keep</span>
-~$ <span class="userinput">keepstore</span>
-2015/05/08 13:44:26 keepstore starting, pid 2765
-2015/05/08 13:44:26 Using volume [UnixVolume /mnt/keep] (writable=true)
-2015/05/08 13:44:26 Using volume [UnixVolume /mnt2/keep] (writable=true)
-2015/05/08 13:44:26 listening at :25107
-</code></pre>
-</notextile>
-
-Equivalently:
-
-<notextile>
-<pre><code>~$ <span class="userinput">mkdir /mnt/keep /mnt2/keep</span>
-~$ <span class="userinput">keepstore -volume=/mnt/keep -volume=/mnt2/keep</span>
-2015/05/08 13:44:26 keepstore starting, pid 2765
-2015/05/08 13:44:26 Using volume [UnixVolume /mnt/keep] (writable=true)
-2015/05/08 13:44:26 Using volume [UnixVolume /mnt2/keep] (writable=true)
-2015/05/08 13:44:26 listening at :25107
-</code></pre>
-</notextile>
+h3. Create config file
+
+By default, keepstore will look for its configuration file at @/etc/arvados/keepstore/keepstore.yml@
+
+You can override the configuration file location using the @-config@ command line option to keepstore.
+
+The following is a sample configuration file:
+
+<pre>
+# Duration for which new permission signatures (returned in PUT
+# responses) will be valid.  This should be equal to the API
+# server's blob_signature_ttl configuration entry.
+BlobSignatureTTL: 336h0m0s
+
+# Local file containing the secret blob signing key (used to generate
+# and verify blob signatures).  The contents of the key file must be
+# identical to the API server's blob_signing_key configuration entry.
+BlobSigningKeyFile: ""
+
+# Print extra debug logging
+Debug: false
+
+# Maximum number of concurrent block deletion operations (per
+# volume) when emptying trash. Default is 1.
+EmptyTrashWorkers: 1
+
+# Enable trash and delete features. If false, trash lists will be
+# accepted but blocks will not be trashed or deleted.
+# Keepstore does not delete data on its own.  The keep-balance
+# service determines which blocks are candidates for deletion
+# and instructs the keepstore to move those blocks to the trash.
+EnableDelete: true
+
+# Local port to listen on. Can be 'address:port' or ':port', where
+# 'address' is a host IP address or name and 'port' is a port number
+# or name.
+Listen: :25107
+
+# Format of request/response and error logs: "json" or "text".
+LogFormat: json
+
+# The secret key that must be provided by monitoring services
+# wishing to access the health check endpoint (/_health).
+ManagementToken: ""
+
+# Maximum RAM to use for data buffers, given in multiples of block
+# size (64 MiB). When this limit is reached, HTTP requests requiring
+# buffers (like GET and PUT) will wait for buffer space to be
+# released.
+#
+# It should be set such that MaxBuffers * 64MiB + 10% fits
+# comfortably in memory. On a host dedicated to running keepstore,
+# divide total memory by 88MiB to suggest a suitable value. For example,
+# if grep MemTotal /proc/meminfo reports MemTotal: 7125440 kB,
+# compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
+MaxBuffers: 128
+
+# Maximum concurrent requests. When this limit is reached, new
+# requests will receive 503 responses. Note: this limit does not
+# include idle connections from clients using HTTP keepalive, so it
+# does not strictly limit the number of concurrent connections. If
+# omitted or zero, the default is 2 * MaxBuffers.
+MaxRequests: 0
+
+# Path to write PID file during startup. This file is kept open and
+# locked with LOCK_EX until keepstore exits, so "fuser -k pidfile" is
+# one way to shut down. Exit immediately if there is an error
+# opening, locking, or writing the PID file.
+PIDFile: ""
+
+# Maximum number of concurrent pull operations. Default is 1, i.e.,
+# pull lists are processed serially.  A pull operation copies a block
+# from another keepstore server.
+PullWorkers: 1
+
+# Honor read requests only if a valid signature is provided.  This
+# should be true, except for development use and when migrating from
+# a very old version.
+RequireSignatures: true
+
+# Local file containing the Arvados API token used by keep-balance
+# or data manager.  Delete, trash, and index requests are honored
+# only for this token.
+SystemAuthTokenFile: ""
+
+# Path to server certificate file in X509 format. Enables TLS mode.
+#
+# Example: /var/lib/acme/live/keep0.example.com/fullchain
+TLSCertificateFile: ""
+
+# Path to server key file in X509 format. Enables TLS mode.
+#
+# The key pair is read from disk during startup, and whenever SIGHUP
+# is received.
+#
+# Example: /var/lib/acme/live/keep0.example.com/privkey
+TLSKeyFile: ""
+
+# How often to check for (and delete) trashed blocks whose
+# TrashLifetime has expired.
+TrashCheckInterval: 24h0m0s
+
+# Time duration after a block is trashed during which it can be
+# recovered using an /untrash request.
+TrashLifetime: 336h0m0s
+
+# Maximum number of concurrent trash operations (moving a block to the
+# trash, or permanently deleting it) . Default is 1, i.e., trash lists
+# are processed serially.  If individual trash operations have high
+# latency (eg some cloud platforms) you should increase this.
+TrashWorkers: 1
+</pre>
+
+h3. Notes on storage management
+
+On its own, a keepstore server never deletes data.  The "keep-balance":install-keep-balance.html service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash.
+
+When a block is newly written, it is protected from deletion for the duration in @BlobSignatureTTL@.  During this time, it cannot be trashed.
+
+If keep-balance instructs keepstore to trash a block which is older than @BlobSignatureTTL@, and @EnableDelete@ is true, the block will be moved to "trash".  A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted.  Blocks which are in the trash may be recovered using the "untrash" API endpoint.  Blocks are permanently deleted after they have been in the trash for the duration in @TrashLifetime@.
+
+Keep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their "storage class":{{site.baseurl}}/admin/storage-classes.html and "rendezvous hashing order":{{site.baseurl}}/api/storage.html).  Pulling a block makes a copy.  If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed on the original server.
+
+h3. Configure storage volumes
+
+Available storage volume types include POSIX filesystems and cloud object storage.
+
+* To use a POSIX filesystem, including both local filesystems (ext4, xfs) and network file system such as GPFS or Lustre, follow the setup instructions on "Filesystem storage":configure-fs-storage.html
+* If you are using S3-compatible object storage (including Amazon S3, Google Cloud Storage, and Ceph RADOS), follow the setup instructions on "S3 Object Storage":configure-s3-object-storage.html
+* If you are using Azure Blob Storage, follow the setup instructions on "Azure Blob Storage":configure-azure-blob-storage.html
 
 h3. Run keepstore as a supervised service
 
 Install runit to supervise the keepstore daemon.  {% include 'install_runit' %}
 
-Install this script as the run script for the keepstore service, modifying it as directed below.
+Install this script as the run script @/etc/sv/keepstore/run@ for the keepstore service:
 
 <notextile>
 <pre><code>#!/bin/sh
 
 exec 2>&1
-exec GOGC=10 keepstore \
- -enforce-permissions=true \
- -blob-signing-key-file=<span class="userinput">/etc/keepstore/blob-signing.key</span> \
- -max-buffers=<span class="userinput">100</span> \
- -serialize=true \
- -never-delete=false \
- -volume=<span class="userinput">/mnt/keep</span> \
- -volume=<span class="userinput">/mnt2/keep</span>
+GOGC=10 exec keepstore -config /etc/arvados/keepstore/keepstore.yml
 </code></pre>
 </notextile>
 
-p(#max-buffers). The @-max-buffers@ argument limits keepstore's memory usage. It should be set such that @max-buffers * 64MiB + 10%@ fits comfortably in memory. On a host dedicated to running keepstore, divide total memory by 88MiB to suggest a suitable value. For example, if @grep MemTotal /proc/meminfo@ reports @MemTotal: 7125440 kB@, compute 7125440&divide;(88&times;1024)=79 and configure @-max-buffers=79@.
-
-If you want access control on your Keepstore server(s), you must specify the @-enforce-permissions@ flag and provide a signing key. The @-blob-signing-key-file@ argument should be a file containing a long random alphanumeric string with no internal line breaks (it is also possible to use a socket or FIFO: keepstore reads it only once, at startup). This key must be the same as the @blob_signing_key@ configured in the "API server's":install-api-server.html configuration file, @/etc/arvados/api/application.yml@.
-
-The @-serialize=true@ (default: @false@) argument limits keepstore to one reader/writer process per storage partition. This avoids thrashing by allowing the storage device underneath the storage partition to do read/write operations sequentially. Enabling @-serialize@ can improve Keepstore performance if the storage partitions map 1:1 to physical disks that are dedicated to Keepstore, particularly so for mechanical disks. In some cloud environments, enabling @-serialize@ has also also proven to be beneficial for performance, but YMMV. If your storage partition(s) are backed by network or RAID storage that can handle many simultaneous reader/writer processes without thrashing, you probably do not want to set @-serialize@.
-
 h3. Set up additional servers
 
 Repeat the above sections to prepare volumes and bring up supervised services on each Keepstore server you are setting up.
 
 h3. Tell the API server about the Keepstore servers
 
-The API server needs to be informed about the presence of your Keepstore servers. For each of the Keepstore servers you have created, please execute the following commands on your <strong>shell server</strong>.
+The API server needs to be informed about the presence of your Keepstore servers.
+
+First, if you don't already have an admin token, create a superuser token:
+
+{% include 'create_superuser_token' %}
 
-Make sure to update the @service_host@ value to match each of your Keepstore servers.
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
+
+Use this command to register each keepstore server you have installed.  Make sure to update the @service_host@ value.
 
 <notextile>
 <pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
 ~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
 ~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
 <span class="userinput">{
- "service_host":"<strong>keep0.$prefix.your.domain</strong>",
+ "service_host":"<strong>keep0.$uuid_prefix.your.domain</strong>",
  "service_port":25107,
  "service_ssl_flag":false,
  "service_type":"disk"
 }
 EOF</span>
 </code></pre></notextile>
+
+h3(#testing). Testing keep
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections:
+
+<pre>
+$ echo "hello world!" > hello.txt
+
+$ arv-put --portable-data-hash hello.txt
+2018-07-12 13:35:25 arvados.arv_put[28702] INFO: Creating new cache file at /home/example/.cache/arvados/arv-put/1571ec0adb397c6a18d5c74cc95b3a2a
+0M / 0M 100.0% 2018-07-12 13:35:27 arvados.arv_put[28702] INFO:
+
+2018-07-12 13:35:27 arvados.arv_put[28702] INFO: Collection saved as 'Saved at 2018-07-12 17:35:25 UTC by example@example'
+59389a8f9ee9d399be35462a0f92541c+53
+
+$ arv-get 59389a8f9ee9d399be35462a0f92541c+53/hello.txt
+hello world!
+</pre>
index 5296b6bc141bce9b0187228a1a53318bbc07fabe..e0cc4b8581e65a1a38292f1953418db394f92bee 100644 (file)
@@ -9,13 +9,17 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+h2. Supported Cloud and HPC platforms
+
+Arvados can run in a variety of configurations.  For compute scheduling, Arvados supports HPC clusters using @slurm@, and supports elastic cloud computing on AWS, Google and Azure.  For storage, Arvados can store blocks on regular file systems such as ext4 or xfs, on network file systems such as GPFS, or object storage such as Azure blob storage, Amazon S3, and other object storage that supports the S3 API including Google Cloud Storage and Ceph.
+
 h2. Hardware (or virtual machines)
 
 This guide assumes you have seven systems available in the same network subnet:
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-|_Function_|_Number of nodes_|
+|_. Function|_. Number of nodes|
 |Arvados API, Crunch dispatcher, Git, Websockets and Workbench|1|
 |Arvados Compute node|1|
 |Arvados Keepproxy and Keep-web server|1|
@@ -29,7 +33,7 @@ The number of Keepstore, shell and compute nodes listed above is a minimum. In a
 h2. Supported GNU/Linux distributions
 
 table(table table-bordered table-condensed).
-|_Distribution_|_State_|_Last supported version_|
+|_. Distribution|_. State|_. Last supported version|
 |CentOS 7|Supported|Latest|
 |Debian 8 ("jessie")|Supported|Latest|
 |Debian 9 ("stretch")|Supported|Latest|
@@ -69,7 +73,7 @@ First, register the Curoverse signing key in apt's database:
 Configure apt to retrieve packages from the Arvados package repository. This command depends on your OS vendor and version:
 
 table(table table-bordered table-condensed).
-|OS version|Command|
+|_. OS version|_. Command|
 |Debian 8 ("jessie")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ jessie main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Ubuntu 14.04 ("trusty")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
@@ -124,7 +128,7 @@ By convention, we use the following hostname pattern:
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-|_Function_|_Hostname_|
+|_. Function|_. Hostname|
 |Arvados API|@uuid_prefix@.your.domain|
 |Arvados Git server|git.@uuid_prefix@.your.domain|
 |Arvados Keepproxy server|keep.@uuid_prefix@.your.domain|
index 09c6b5cb1b8e0b84069a958902278f5d125c6457..5e1b1aec5a3b796b8785eef1cab221382788e063 100644 (file)
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados Node Manager provides elastic computing for Arvados and SLURM by creating and destroying virtual machines on demand.  Node Manager currently supports Amazon Web Services (AWS), Google Cloud Platform (GCP) and Microsoft Azure.
 
-Note: node manager is only required for elastic computing cloud environments.  Fixed size clusters do not require node manager.
+Note: node manager is only required for elastic computing cloud environments.  Fixed size clusters (such as on-premise HPC) do not require node manager.
 
 h2. Install
 
@@ -527,11 +527,11 @@ subscription_id = 00000000-0000-0000-0000-000000000000
 
 # The following directions are based on
 # https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/
+# and updated for v2 of the Azure cli tool.
 #
-# azure config mode arm
-# azure ad app create --name "<Your Application Display Name>" --home-page "<https://YourApplicationHomePage>" --identifier-uris "<https://YouApplicationUri>" --password <Your_Password>
-# azure ad sp create "<Application_Id>"
-# azure role assignment create --objectId "<Object_Id>" -o Owner -c /subscriptions/{subscriptionId}/
+# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://<Your_Application_Uri>" --password <Your_Password>
+# az ad sp create "<Application_Id>"
+# az role assignment create --assignee "<Application_Id>" --role Owner --resource-group "<Your_Azure_Arvados_Resource_Group>"
 #
 # Use <Application_Id> for "key" and the <Your_Password> for "secret"
 #
index 599730926a0f29bf17b4ef73c6aa2a0a6ee75cef..aabe6629d939c36b782eedc2185d665f485aa3b2 100644 (file)
@@ -9,16 +9,18 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Two Arvados Rails servers store data in a PostgreSQL database: the SSO server, and the API server.  The API server requires at least version *9.3* of PostgreSQL.  Beyond that, you have the flexibility to deploy PostgreSQL any way that the Rails servers will be able to connect to it.  Our recommended deployment strategy is:
+Two Arvados Rails servers store data in a PostgreSQL database: the SSO server, and the API server.  The API server requires at least version *9.4* of PostgreSQL.  Beyond that, you have the flexibility to deploy PostgreSQL any way that the Rails servers will be able to connect to it.  Our recommended deployment strategy is:
 
 * Install PostgreSQL on the the same host as the SSO server, and dedicate that install to hosting the SSO database.  This provides the best security for the SSO server, because the database does not have to accept any client connections over the network.  Typical load on the SSO server is light enough that deploying both it and its database on the same host does not compromise performance.
 * If you want to provide the most scalability for your Arvados cluster, install PostgreSQL for the API server on a dedicated host.  This gives you the most flexibility to avoid resource contention, and tune performance separately for the API server and its database.  If performance is less of a concern for your installation, you can install PostgreSQL on the API server host directly, as with the SSO server.
 
 Find the section for your distribution below, and follow it to install PostgreSQL on each host where you will deploy it.  Then follow the steps in the later section(s) to set up PostgreSQL for the Arvados service(s) that need it.
 
-h2. Install PostgreSQL 9.3+
+It is important to make sure that autovacuum is enabled for the PostgreSQL database that backs the API server. Autovacuum is enabled by default since PostgreSQL 8.3.
 
-The API server requires at least version *9.3* of PostgreSQL.
+h2. Install PostgreSQL 9.4+
+
+The API server requires at least version *9.4* of PostgreSQL.
 
 h3(#centos7). CentOS 7
 {% assign rh_version = "7" %}
@@ -39,7 +41,9 @@ h3(#centos7). CentOS 7
 
 h3(#debian). Debian or Ubuntu
 
-Debian 8 (Jessie) and Ubuntu 14.04 (Trusty) and later versions include a sufficiently recent version of Postgres.
+Debian 8 (Jessie) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
+
+Ubuntu 14.04 (Trusty) requires an updated PostgreSQL version, see "the PostgreSQL ubuntu repository":https://www.postgresql.org/download/linux/ubuntu/
 
 # Install PostgreSQL:
   <notextile><pre>~$ <span class="userinput">sudo apt-get install postgresql</span></pre></notextile>
index 688850c2922869dbffa8e2c0a902c81c9dd1382c..7b7e2a83cf1e730d90a6892cfe06639d2c2e2eb5 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
-navsection: installguide
-title: Migrating Docker images
+navsection: admin
+title: Migrating from Docker 1.9
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
diff --git a/doc/sdk/R/index.html.textile.liquid b/doc/sdk/R/index.html.textile.liquid
deleted file mode 100644 (file)
index 7b788a1..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: default
-navsection: sdk
-navmenu: R
-title: "R Reference"
-
-no_nav_left: true
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-notextile. <iframe src="arvados/" style="width:100%; height:100%; border:none" />
index 75e01d96073f13b226ee15431dbf36cc486c16ac..a06d518666683e44d6838c01dd4c8f2d0a56da8a 100644 (file)
@@ -18,6 +18,11 @@ h3. Installation
 
 Use @go get git.curoverse.com/arvados.git/sdk/go/arvadosclient@.  The go tools will fetch the relevant code and dependencies for you.
 
-<notextile>{% code 'example_sdk_go_imports' as go %}</notextile>
+{% codeblock as go %}
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+{% endcodeblock %}
 
 If you need pre-release client code, you can use the latest version from the repo by following "these instructions.":https://dev.arvados.org/projects/arvados/wiki/Go#Using-Go-with-Arvados
index 64019bba33848a2bf561d6776a4139ec5b23cddd..3e2631512bd5b44cfa78ea8603d079c14229c66a 100644 (file)
@@ -11,6 +11,8 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+{% include 'pipeline_deprecation_notice' %}
+
 Several utility libraries are included with Arvados. They are intended to make it quicker and easier to write your own crunch scripts.
 
 * "Python SDK extras":#pythonsdk
@@ -224,5 +226,3 @@ On qr1hi.arvadosapi.com, the binary distribution @picard-tools-1.82.zip@ is avai
  ...
 }
 </pre>
-
-
index 9960e668361aca9ac7e0645390b7f7d87cae3764..afbec20d950c518dd29c7882a503f88d1f530712 100644 (file)
@@ -12,6 +12,18 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados applications can subscribe to a live event stream from the database.  Events are described in the "Log resource.":{{site.baseurl}}/api/methods/logs.html
 
-<notextile>
-{% code 'events_py' as python %}
-</notextile>
+{% codeblock as python %}
+#!/usr/bin/env python
+
+import arvados
+import arvados.events
+
+# 'ev' is a dict containing the log table record describing the change.
+def on_message(ev):
+    if ev.get("event_type") == "create" and ev.get("object_kind") == "arvados#collection":
+        print "A new collection was created: %s" % ev["object_uuid"]
+
+api = arvados.api("v1")
+ws = arvados.events.subscribe(api, [], on_message)
+ws.run_forever()
+{% endcodeblock %}
index cf25639b14defda47456d6610458285a06aaecce..f9ecf7a5343b6210ceaf613c796af535a114adb1 100644 (file)
@@ -38,6 +38,11 @@ hints:
     enableReuse: false
   cwltool:Secrets:
     secrets: [input1, input2]
+  cwltool:TimeLimit:
+    timelimit: 14400
+  arv:WorkflowRunnerResources:
+    ramMin: 2048
+    coresMin: 2
 </pre>
 
 The one exception to this is @arv:APIRequirement@, see note below.
@@ -111,3 +116,21 @@ Indicate that one or more input parameters are "secret".  Must be applied at the
 table(table table-bordered table-condensed).
 |_. Field |_. Type |_. Description |
 |secrets|array<string>|Input parameters which are considered "secret".  Must be strings.|
+
+
+h2. cwltool:TimeLimit
+
+Set an upper limit on the execution time of a CommandLineTool or ExpressionTool.  A tool execution which exceeds the time limit may be preemptively terminated and considered failed.  May also be used by batch systems to make scheduling decisions.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|timelimit|int|Execution time limit in seconds. If set to zero, no limit is enforced.|
+
+h2. arv:WorkflowRunnerResources
+
+Specify resource requirements for the workflow runner process (arvados-cwl-runner) that manages a workflow run.  Must be applied to the top level workflow.  Will also be set implicitly when using @--submit-runner-ram@ on the command line along with @--create-workflow@ or @--update-workflow@.  Use this to adjust the runner's allocation if the workflow runner is getting "out of memory" exceptions or being killed by the out-of-memory (OOM) killer.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|ramMin|int|RAM, in mebibytes, to reserve for the arvados-cwl-runner process. Default 1 GiB|
+|coresMin|int|Number of cores to reserve to the arvados-cwl-runner process. Default 1 core.|
index b894e3d5e9f821797a1353f585491f9ca5f0f24c..f428d912cef64dcc98d042d6eaf3b0473c3efa3a 100644 (file)
@@ -9,13 +9,10 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-_If you are new to Arvados, please try the Quickstart on <a href="http://doc.arvados.org">the documentation homepage</a> instead of this detailed User Guide._
-
 This guide provides a reference for using Arvados to solve big data bioinformatics problems, including:
 
 * Robust storage of very large files, such as whole genome sequences, using the "Arvados Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html content-addressable cluster file system.
 * Running compute-intensive genomic analysis pipelines, such as alignment and variant calls using the "Arvados Crunch":{{site.baseurl}}/user/tutorials/intro-crunch.html cluster compute engine.
-* Storing and querying metadata about genome sequence files, such as human subjects and their phenotypic traits using the "Arvados Metadata Database.":{{site.baseurl}}/user/topics/tutorial-trait-search.html
 * Accessing, organizing, and sharing data, pipelines and results using the "Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html web application.
 
 The examples in this guide use the public Arvados instance located at <a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}</a>.  If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
index c9f74b5aa840deb25cd7557805609eca8200fb33..9a609039b4903420f2cd1aeedee530d4a07f82f4 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
-navsection: userguide
-title: "Using arvados-sync-groups"
+navsection: admin
+title: "Synchronizing external groups"
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-The @arvados-sync-groups@ tool allows to synchronize remote groups into Arvados from an external source.
+The @arvados-sync-groups@ tool allows to synchronize groups in Arvados from an external source.
 
 h1. Using arvados-sync-groups
 
diff --git a/doc/user/topics/link-accounts.html.textile.liquid b/doc/user/topics/link-accounts.html.textile.liquid
new file mode 100644 (file)
index 0000000..3854bf6
--- /dev/null
@@ -0,0 +1,38 @@
+---
+layout: default
+navsection: userguide
+title: "Linking alternate login accounts"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to link additional login accounts to the same Arvados account.  This can be used to migrate login accounts, for example, from one Google account to another.  It can also be used to migrate login providers, for example from LDAP to Google.  In order to do this, you must be able to log into both the "old" and "new" accounts.
+
+h2. Link accounts
+
+Follow this process to link the "new" login to the "old" login.
+
+# Log in using the "old" account
+# Under the users menu, choose *Link account*
+# On the link accounts page, press the button *Add another login to this account*
+# Follow login instructions from the login provider (eg Google)
+# You will be returned to the *Link accounts* confirmation page.
+# Press the *Link account* button to confirm.
+# After the accounts are linked, you will be returned to the dashboard.
+# Both the "old" and "new" logins will now log in to the same Arvados account.
+
+h2. Link accounts (alternate flow)
+
+You can also link accounts starting with logging into the "new" account first.
+
+# Log in using the "new" account
+# Under the users menu, choose *Link account* (if the user is inactive, there will be a link on the inactive user page)
+# On the link accounts page, press the button *Use this login to access another account*
+# Follow login instructions from the login provider (eg Google)
+# You will be returned to the *Link accounts* confirmation page.
+# Press the *Link account* button to confirm.
+# After the accounts are linked, you will be returned to the dashboard.
+# Both the "old" and "new" logins will now log in to the same Arvados account.
diff --git a/doc/user/topics/storage-classes.html.textile.liquid b/doc/user/topics/storage-classes.html.textile.liquid
new file mode 100644 (file)
index 0000000..96c8083
--- /dev/null
@@ -0,0 +1,53 @@
+---
+layout: default
+navsection: userguide
+title: Using storage classes
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks.  This can be used to implement data storage policies such as moving data to archival storage.
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
+
+h3. arv-put
+
+You may specify the desired storage class for a collection uploaded using @arv-put@:
+
+<pre>
+$ arv-put --storage-classes=hot myfile.txt
+</pre>
+
+h3. arvados-cwl-runner
+
+You may also specify the desired storage class for the final output collection produced by @arvados-cwl-runner@:
+
+<pre>
+$ arvados-cwl-runner --storage-classes=hot myworkflow.cwl myinput.yml
+</pre>
+
+(Note: intermediate collections produced by a workflow run will have "default" storage class.)
+
+h3. arv command line
+
+You may set the storage class on an existing collection by setting the "storage_classes_desired" field of a Collection.  For example, at the command line:
+
+<pre>
+$ arv collection update --uuid zzzzz-4zz18-dhhm0ay8k8cqkvg --collection '{"storage_classes_desired": ["archival"]}'
+</pre>
+
+By setting "storage_classes_desired" to "archival", the blocks that make up the collection will be preferentially moved to keepstore volumes which are configured with the "archival" storage class.
+
+h3. Storage class notes
+
+Collection blocks will be in the "default" storage class if not otherwise specified.
+
+Currently, a collection may only have one desired storage class.
+
+Any user with write access to a collection may set any storage class on that collection.
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
index 7015d413f9b0a63d0535a22e2e5576979501b613..d396802f72e696f2f6b1f10615b40db46d61d974 100644 (file)
@@ -9,23 +9,27 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+{% include 'notebox_begin_warning' %}
+The humans, specimens and traits tables are deprecated and will be removed in a future release.  The recommended way to store and search on user-defined metadata is using the "properties" field of Arvados resources.
+{% include 'notebox_end' %}
+
 This tutorial introduces the Arvados Metadata Database.  The Metadata Database stores information about files in Keep.  This example will use the Python SDK to find public WGS (Whole Genome Sequencing) data for people who have reported a certain medical condition.
 
 {% include 'tutorial_expectations' %}
 
 In the tutorial examples, three angle brackets (&gt;&gt;&gt;) will be used to denote code to enter at the interactive Python prompt.
 
-Start by running Python.  
+Start by running Python.
 
 <notextile>
 <pre><code>~$ <span class="userinput">python</span>
-Python 2.7.3 (default, Jan  2 2013, 13:56:14) 
+Python 2.7.3 (default, Jan  2 2013, 13:56:14)
 [GCC 4.7.2] on linux2
 Type "help", "copyright", "credits" or "license" for more information.
 &gt;&gt;&gt;
 </code></pre>
 </notextile>
-      
+
 If everything is set up correctly, you will be able to import the arvados SDK.
 
 notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">import arvados</span></pre></code>
@@ -248,7 +252,7 @@ After the jobs have completed, check output file sizes.
   job_uuid = job[collection_uuid]['uuid']
   job_output = arvados.api('v1').jobs().get(uuid=job_uuid).execute()['output']
   output_files = arvados.api('v1').collections().get(uuid=job_output).execute()['files']
-  # Test the output size.  If greater than zero, that means 'grep' found the variant 
+  # Test the output size.  If greater than zero, that means 'grep' found the variant
   if output_files[0][2] > 0:
     print("%s has variant rs1126809" % (pgpid[collection_uuid]))
   else:
index 2cc71e68a8e749e76d558b6c98e9e2c26a23f2db..8c65cf7acf1b6dd7bc02660464be06ea07cc3daa 100644 (file)
@@ -11,6 +11,9 @@ import (
        "fmt"
        "io"
        "io/ioutil"
+       "path/filepath"
+       "regexp"
+       "runtime"
        "sort"
        "strings"
 )
@@ -25,6 +28,14 @@ func (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, std
        return f(prog, args, stdin, stdout, stderr)
 }
 
+type Version string
+
+func (v Version) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
+       fmt.Fprintf(stdout, "%s %s (%s)\n", prog, v, runtime.Version())
+       return 0
+}
+
 // Multi is a Handler that looks up its first argument in a map, and
 // invokes the resulting Handler with the remaining args.
 //
@@ -41,17 +52,21 @@ func (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, std
 type Multi map[string]Handler
 
 func (m Multi) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
-       if len(args) < 1 {
+       _, basename := filepath.Split(prog)
+       basename = strings.TrimPrefix(basename, "arvados-")
+       basename = strings.TrimPrefix(basename, "crunch-")
+       if cmd, ok := m[basename]; ok {
+               return cmd.RunCommand(prog, args, stdin, stdout, stderr)
+       } else if len(args) < 1 {
                fmt.Fprintf(stderr, "usage: %s command [args]\n", prog)
                m.Usage(stderr)
                return 2
-       }
-       if cmd, ok := m[args[0]]; !ok {
-               fmt.Fprintf(stderr, "unrecognized command %q\n", args[0])
+       } else if cmd, ok = m[args[0]]; ok {
+               return cmd.RunCommand(prog+" "+args[0], args[1:], stdin, stdout, stderr)
+       } else {
+               fmt.Fprintf(stderr, "%s: unrecognized command %q\n", prog, args[0])
                m.Usage(stderr)
                return 2
-       } else {
-               return cmd.RunCommand(prog+" "+args[0], args[1:], stdin, stdout, stderr)
        }
 }
 
index d8a4861572341046dab556ade76a0cb4f2ffe342..2fc50985f194c8caa2e7ba332ce7d94bfb7189c9 100644 (file)
@@ -42,6 +42,16 @@ func (s *CmdSuite) TestHello(c *check.C) {
        c.Check(stderr.String(), check.Equals, "")
 }
 
+func (s *CmdSuite) TestHelloViaProg(c *check.C) {
+       defer cmdtest.LeakCheck(c)()
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := testCmd.RunCommand("/usr/local/bin/echo", []string{"hello", "world"}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(exited, check.Equals, 0)
+       c.Check(stdout.String(), check.Equals, "hello world\n")
+       c.Check(stderr.String(), check.Equals, "")
+}
+
 func (s *CmdSuite) TestUsage(c *check.C) {
        defer cmdtest.LeakCheck(c)()
        stdout := bytes.NewBuffer(nil)
@@ -49,7 +59,7 @@ func (s *CmdSuite) TestUsage(c *check.C) {
        exited := testCmd.RunCommand("prog", []string{"nosuchcommand", "hi"}, bytes.NewReader(nil), stdout, stderr)
        c.Check(exited, check.Equals, 2)
        c.Check(stdout.String(), check.Equals, "")
-       c.Check(stderr.String(), check.Matches, `(?ms)^unrecognized command "nosuchcommand"\n.*echo.*\n`)
+       c.Check(stderr.String(), check.Matches, `(?ms)^prog: unrecognized command "nosuchcommand"\n.*echo.*\n`)
 }
 
 func (s *CmdSuite) TestSubcommandToFront(c *check.C) {
diff --git a/lib/controller/cmd.go b/lib/controller/cmd.go
new file mode 100644 (file)
index 0000000..94eb258
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/service"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
+
+func newHandler(cluster *arvados.Cluster, np *arvados.NodeProfile) service.Handler {
+       return &Handler{Cluster: cluster, NodeProfile: np}
+}
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
new file mode 100644 (file)
index 0000000..a1a69a8
--- /dev/null
@@ -0,0 +1,164 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+       "io"
+       "net"
+       "net/http"
+       "net/url"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type Handler struct {
+       Cluster     *arvados.Cluster
+       NodeProfile *arvados.NodeProfile
+
+       setupOnce    sync.Once
+       handlerStack http.Handler
+       proxyClient  *arvados.Client
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       h.setupOnce.Do(h.setup)
+       if req.Method != "GET" && req.Method != "HEAD" {
+               // http.ServeMux returns 301 with a cleaned path if
+               // the incoming request has a double slash. Some
+               // clients (including the Go standard library) change
+               // the request method to GET when following a 301
+               // redirect if the original method was not HEAD
+               // (RFC7231 6.4.2 specifically allows this in the case
+               // of POST). Thus "POST //foo" gets misdirected to
+               // "GET /foo". To avoid this, eliminate double slashes
+               // before passing the request to ServeMux.
+               for strings.Contains(req.URL.Path, "//") {
+                       req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
+               }
+       }
+       h.handlerStack.ServeHTTP(w, req)
+}
+
+func (h *Handler) CheckHealth() error {
+       h.setupOnce.Do(h.setup)
+       _, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       return err
+}
+
+func (h *Handler) setup() {
+       mux := http.NewServeMux()
+       mux.Handle("/_health/", &health.Handler{
+               Token:  h.Cluster.ManagementToken,
+               Prefix: "/_health/",
+       })
+       mux.Handle("/", http.HandlerFunc(h.proxyRailsAPI))
+       h.handlerStack = mux
+
+       // Changing the global isn't the right way to do this, but a
+       // proper solution would conflict with an impending 13493
+       // merge anyway, so this will do for now.
+       arvados.InsecureHTTPClient.CheckRedirect = func(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }
+}
+
+// headers that shouldn't be forwarded when proxying. See
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
+var dropHeaders = map[string]bool{
+       "Connection":          true,
+       "Keep-Alive":          true,
+       "Proxy-Authenticate":  true,
+       "Proxy-Authorization": true,
+       "TE":                true,
+       "Trailer":           true,
+       "Transfer-Encoding": true,
+       "Upgrade":           true,
+}
+
+func (h *Handler) proxyRailsAPI(w http.ResponseWriter, reqIn *http.Request) {
+       urlOut, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       urlOut = &url.URL{
+               Scheme:   urlOut.Scheme,
+               Host:     urlOut.Host,
+               Path:     reqIn.URL.Path,
+               RawPath:  reqIn.URL.RawPath,
+               RawQuery: reqIn.URL.RawQuery,
+       }
+
+       // Copy headers from incoming request, then add/replace proxy
+       // headers like Via and X-Forwarded-For.
+       hdrOut := http.Header{}
+       for k, v := range reqIn.Header {
+               if !dropHeaders[k] {
+                       hdrOut[k] = v
+               }
+       }
+       xff := reqIn.RemoteAddr
+       if xffIn := reqIn.Header.Get("X-Forwarded-For"); xffIn != "" {
+               xff = xffIn + "," + xff
+       }
+       hdrOut.Set("X-Forwarded-For", xff)
+       if hdrOut.Get("X-Forwarded-Proto") == "" {
+               hdrOut.Set("X-Forwarded-Proto", reqIn.URL.Scheme)
+       }
+       hdrOut.Add("Via", reqIn.Proto+" arvados-controller")
+
+       ctx := reqIn.Context()
+       if timeout := h.Cluster.HTTPRequestTimeout; timeout > 0 {
+               var cancel context.CancelFunc
+               ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Duration(timeout)))
+               defer cancel()
+       }
+
+       reqOut := (&http.Request{
+               Method: reqIn.Method,
+               URL:    urlOut,
+               Host:   reqIn.Host,
+               Header: hdrOut,
+               Body:   reqIn.Body,
+       }).WithContext(ctx)
+       resp, err := arvados.InsecureHTTPClient.Do(reqOut)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       for k, v := range resp.Header {
+               for _, v := range v {
+                       w.Header().Add(k, v)
+               }
+       }
+       w.WriteHeader(resp.StatusCode)
+       n, err := io.Copy(w, resp.Body)
+       if err != nil {
+               httpserver.Logger(reqIn).WithError(err).WithField("bytesCopied", n).Error("error copying response body")
+       }
+}
+
+// For now, findRailsAPI always uses the rails API running on this
+// node.
+func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, error) {
+       hostport := np.RailsAPI.Listen
+       if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
+               // ":12345" => connect to indicated port on localhost
+               hostport = "localhost" + hostport
+       } else if _, _, err := net.SplitHostPort(hostport); err == nil {
+               // "[::1]:12345" => connect to indicated address & port
+       } else {
+               return nil, err
+       }
+       proto := "http"
+       if np.RailsAPI.TLS {
+               proto = "https"
+       }
+       return url.Parse(proto + "://" + hostport)
+}
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
new file mode 100644 (file)
index 0000000..eb947ea
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "os"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&HandlerSuite{})
+
+type HandlerSuite struct {
+       cluster *arvados.Cluster
+       handler http.Handler
+}
+
+func (s *HandlerSuite) SetUpTest(c *check.C) {
+       s.cluster = &arvados.Cluster{
+               ClusterID: "zzzzz",
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": {
+                               Controller: arvados.SystemServiceInstance{Listen: ":"},
+                               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true},
+                       },
+               },
+       }
+       node := s.cluster.NodeProfiles["*"]
+       s.handler = newHandler(s.cluster, &node)
+}
+
+func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
+       req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var dd arvados.DiscoveryDocument
+       err := json.Unmarshal(resp.Body.Bytes(), &dd)
+       c.Check(err, check.IsNil)
+       c.Check(dd.BlobSignatureTTL, check.Not(check.Equals), int64(0))
+       c.Check(dd.BlobSignatureTTL > 0, check.Equals, true)
+       c.Check(len(dd.Resources), check.Not(check.Equals), 0)
+       c.Check(len(dd.Schemas), check.Not(check.Equals), 0)
+}
+
+func (s *HandlerSuite) TestRequestTimeout(c *check.C) {
+       s.cluster.HTTPRequestTimeout = arvados.Duration(time.Nanosecond)
+       req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusInternalServerError)
+       var jresp httpserver.ErrorResponse
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Assert(len(jresp.Errors), check.Equals, 1)
+       c.Check(jresp.Errors[0], check.Matches, `.*context deadline exceeded`)
+}
+
+func (s *HandlerSuite) TestProxyWithoutToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+       jresp := map[string]interface{}{}
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Check(jresp["errors"], check.FitsTypeOf, []interface{}{})
+}
+
+func (s *HandlerSuite) TestProxyWithToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var u arvados.User
+       err := json.Unmarshal(resp.Body.Bytes(), &u)
+       c.Check(err, check.IsNil)
+       c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *HandlerSuite) TestProxyWithTokenInRequestBody(c *check.C) {
+       req := httptest.NewRequest("POST", "/arvados/v1/users/current", strings.NewReader(url.Values{
+               "_method":   {"GET"},
+               "api_token": {arvadostest.ActiveToken},
+       }.Encode()))
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var u arvados.User
+       err := json.Unmarshal(resp.Body.Bytes(), &u)
+       c.Check(err, check.IsNil)
+       c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *HandlerSuite) TestProxyNotFound(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/xyzzy", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+       jresp := map[string]interface{}{}
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Check(jresp["errors"], check.FitsTypeOf, []interface{}{})
+}
+
+func (s *HandlerSuite) TestProxyRedirect(c *check.C) {
+       req := httptest.NewRequest("GET", "https://example.org:1234/login?return_to=foo", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusFound)
+       c.Check(resp.Header().Get("Location"), check.Matches, `https://example\.org:1234/auth/joshid\?return_to=foo&?`)
+}
index 2ca405060390c65df2f961f7c7a83e5a278d0687..1c36d6cf5bb770cb447b6f7f177d39c5ff7ef469 100644 (file)
@@ -8,6 +8,7 @@ import (
        "errors"
        "log"
        "os/exec"
+       "sort"
        "strings"
        "time"
 
@@ -15,11 +16,17 @@ import (
 )
 
 var (
-       ErrConstraintsNotSatisfiable  = errors.New("constraints not satisfiable by any configured instance type")
        ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
        discountConfiguredRAMPercent  = 5
 )
 
+// ConstraintsNotSatisfiableError includes a list of available instance types
+// to be reported back to the user.
+type ConstraintsNotSatisfiableError struct {
+       error
+       AvailableTypes []arvados.InstanceType
+}
+
 // ChooseInstanceType returns the cheapest available
 // arvados.InstanceType big enough to run ctr.
 func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
@@ -40,20 +47,35 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
        needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
        needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
 
-       err = ErrConstraintsNotSatisfiable
+       ok := false
        for _, it := range cc.InstanceTypes {
                switch {
-               case err == nil && it.Price > best.Price:
-               case it.Scratch < needScratch:
-               case it.RAM < needRAM:
+               case ok && it.Price > best.Price:
+               case int64(it.Scratch) < needScratch:
+               case int64(it.RAM) < needRAM:
                case it.VCPUs < needVCPUs:
+               case it.Preemptible != ctr.SchedulingParameters.Preemptible:
                case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs):
                        // Equal price, but worse specs
                default:
                        // Lower price || (same price && better specs)
                        best = it
-                       err = nil
+                       ok = true
+               }
+       }
+       if !ok {
+               availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
+               for _, t := range cc.InstanceTypes {
+                       availableTypes = append(availableTypes, t)
+               }
+               sort.Slice(availableTypes, func(a, b int) bool {
+                       return availableTypes[a].Price < availableTypes[b].Price
+               })
+               err = ConstraintsNotSatisfiableError{
+                       errors.New("constraints not satisfiable by any configured instance type"),
+                       availableTypes,
                }
+               return
        }
        return
 }
index 0c02a0e3e1be45bfeb6b2371287a4ce664de1d98..91c6bb1049fb381d9070e747b1f076eec2f95dbc 100644 (file)
@@ -11,7 +11,7 @@ import (
 
 var _ = check.Suite(&NodeSizeSuite{})
 
-const GiB = int64(1 << 30)
+const GiB = arvados.ByteSize(1 << 30)
 
 type NodeSizeSuite struct{}
 
@@ -27,12 +27,12 @@ func (*NodeSizeSuite) TestChooseNotConfigured(c *check.C) {
 
 func (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {
        checkUnsatisfiable := func(ctr *arvados.Container) {
-               _, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: []arvados.InstanceType{
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: "small1"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: "small2"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: "small4", Scratch: GiB},
+               _, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: map[string]arvados.InstanceType{
+                       "small1": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: "small1"},
+                       "small2": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: "small2"},
+                       "small4": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: "small4", Scratch: GiB},
                }}, ctr)
-               c.Check(err, check.Equals, ErrConstraintsNotSatisfiable)
+               c.Check(err, check.FitsTypeOf, ConstraintsNotSatisfiableError{})
        }
 
        for _, rc := range []arvados.RuntimeConstraints{
@@ -43,40 +43,40 @@ func (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {
                checkUnsatisfiable(&arvados.Container{RuntimeConstraints: rc})
        }
        checkUnsatisfiable(&arvados.Container{
-               Mounts:             map[string]arvados.Mount{"/tmp": {Kind: "tmp", Capacity: 2 * GiB}},
+               Mounts:             map[string]arvados.Mount{"/tmp": {Kind: "tmp", Capacity: int64(2 * GiB)}},
                RuntimeConstraints: arvados.RuntimeConstraints{RAM: 12345, VCPUs: 1},
        })
 }
 
 func (*NodeSizeSuite) TestChoose(c *check.C) {
-       for _, menu := range [][]arvados.InstanceType{
+       for _, menu := range []map[string]arvados.InstanceType{
                {
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "best":   {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
                },
                {
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
-                       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
                },
                {
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
-                       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
                },
                {
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: "small"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: "nearly"},
-                       {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: "small"},
+                       "nearly": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: "nearly"},
+                       "best":   {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
                },
        } {
                best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
                        Mounts: map[string]arvados.Mount{
-                               "/tmp": {Kind: "tmp", Capacity: 2 * GiB},
+                               "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
                        },
                        RuntimeConstraints: arvados.RuntimeConstraints{
                                VCPUs:        2,
@@ -91,3 +91,31 @@ func (*NodeSizeSuite) TestChoose(c *check.C) {
                c.Check(best.Scratch >= 2*GiB, check.Equals, true)
        }
 }
+
+func (*NodeSizeSuite) TestChoosePreemptable(c *check.C) {
+       menu := map[string]arvados.InstanceType{
+               "costly":      {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: "costly"},
+               "almost best": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "almost best"},
+               "best":        {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Preemptible: true, Name: "best"},
+               "small":       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Preemptible: true, Name: "small"},
+       }
+       best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
+               Mounts: map[string]arvados.Mount{
+                       "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+               },
+               RuntimeConstraints: arvados.RuntimeConstraints{
+                       VCPUs:        2,
+                       RAM:          987654321,
+                       KeepCacheRAM: 123456789,
+               },
+               SchedulingParameters: arvados.SchedulingParameters{
+                       Preemptible: true,
+               },
+       })
+       c.Check(err, check.IsNil)
+       c.Check(best.Name, check.Equals, "best")
+       c.Check(best.RAM >= 1234567890, check.Equals, true)
+       c.Check(best.VCPUs >= 2, check.Equals, true)
+       c.Check(best.Scratch >= 2*GiB, check.Equals, true)
+       c.Check(best.Preemptible, check.Equals, true)
+}
diff --git a/lib/service/cmd.go b/lib/service/cmd.go
new file mode 100644 (file)
index 0000000..4584939
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// package service provides a cmd.Handler that brings up a system service.
+package service
+
+import (
+       "flag"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/Sirupsen/logrus"
+       "github.com/coreos/go-systemd/daemon"
+)
+
+type Handler interface {
+       http.Handler
+       CheckHealth() error
+}
+
+type NewHandlerFunc func(*arvados.Cluster, *arvados.NodeProfile) Handler
+
+type command struct {
+       newHandler NewHandlerFunc
+       svcName    arvados.ServiceName
+}
+
+// Command returns a cmd.Handler that loads site config, calls
+// newHandler with the current cluster and node configs, and brings up
+// an http server with the returned handler.
+//
+// The handler is wrapped with server middleware (adding X-Request-ID
+// headers, logging requests/responses, etc).
+func Command(svcName arvados.ServiceName, newHandler NewHandlerFunc) cmd.Handler {
+       return &command{
+               newHandler: newHandler,
+               svcName:    svcName,
+       }
+}
+
+func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       log := logrus.New()
+       log.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: rfc3339NanoFixed,
+       }
+       log.Out = stderr
+
+       var err error
+       defer func() {
+               if err != nil {
+                       log.WithError(err).Info("exiting")
+               }
+       }()
+       flags := flag.NewFlagSet("", flag.ContinueOnError)
+       flags.SetOutput(stderr)
+       configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+       nodeProfile := flags.String("node-profile", "", "`Name` of NodeProfiles config entry to use (if blank, use $ARVADOS_NODE_PROFILE or hostname reported by OS)")
+       err = flags.Parse(args)
+       if err == flag.ErrHelp {
+               err = nil
+               return 0
+       } else if err != nil {
+               return 2
+       }
+       cfg, err := arvados.GetConfig(*configFile)
+       if err != nil {
+               return 1
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return 1
+       }
+       profileName := *nodeProfile
+       if profileName == "" {
+               profileName = os.Getenv("ARVADOS_NODE_PROFILE")
+       }
+       profile, err := cluster.GetNodeProfile(profileName)
+       if err != nil {
+               return 1
+       }
+       listen := profile.ServicePorts()[c.svcName]
+       if listen == "" {
+               err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
+               return 1
+       }
+       handler := c.newHandler(cluster, profile)
+       if err = handler.CheckHealth(); err != nil {
+               return 1
+       }
+       srv := &httpserver.Server{
+               Server: http.Server{
+                       Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+               },
+               Addr: listen,
+       }
+       err = srv.Start()
+       if err != nil {
+               return 1
+       }
+       log.WithFields(logrus.Fields{
+               "Listen":  srv.Addr,
+               "Service": c.svcName,
+       }).Info("listening")
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.WithError(err).Errorf("error notifying init daemon")
+       }
+       err = srv.Wait()
+       if err != nil {
+               return 1
+       }
+       return 0
+}
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
index f2cae86f546a41b632a7e11c534b6affcc5cddd4..1cc676875a49f4b6c0783ea8223927d06a4e232a 100644 (file)
@@ -1,6 +1,5 @@
 # Generated by roxygen2: do not edit by hand
 
-S3method(print,Arvados)
 S3method(print,ArvadosFile)
 S3method(print,Collection)
 S3method(print,Subcollection)
@@ -8,3 +7,5 @@ export(Arvados)
 export(ArvadosFile)
 export(Collection)
 export(Subcollection)
+export(generateAPI)
+export(listAll)
index 8b0e92b2374762133cae5953d32b3531853c3088..0ec2d115295749067ceb4ee105245aad73df149f 100644 (file)
-source("./R/RESTService.R")
-source("./R/HttpRequest.R")
-source("./R/HttpParser.R")
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
 
-#' Arvados
+#' users.get
+#' 
+#' users.get is a method defined in Arvados class.
+#' 
+#' @usage arv$users.get(uuid)
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.get
+NULL
+
+#' users.create
+#' 
+#' users.create is a method defined in Arvados class.
+#' 
+#' @usage arv$users.create(user, ensure_unique_name = "false")
+#' @param user User object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return User object.
+#' @name users.create
+NULL
+
+#' users.update
+#' 
+#' users.update is a method defined in Arvados class.
+#' 
+#' @usage arv$users.update(user, uuid)
+#' @param user User object.
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.update
+NULL
+
+#' users.delete
+#' 
+#' users.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$users.delete(uuid)
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.delete
+NULL
+
+#' users.current
+#' 
+#' users.current is a method defined in Arvados class.
+#' 
+#' @usage arv$users.current(NULL)
+#' @return User object.
+#' @name users.current
+NULL
+
+#' users.system
+#' 
+#' users.system is a method defined in Arvados class.
+#' 
+#' @usage arv$users.system(NULL)
+#' @return User object.
+#' @name users.system
+NULL
+
+#' users.activate
+#' 
+#' users.activate is a method defined in Arvados class.
+#' 
+#' @usage arv$users.activate(uuid)
+#' @param uuid 
+#' @return User object.
+#' @name users.activate
+NULL
+
+#' users.setup
+#' 
+#' users.setup is a method defined in Arvados class.
+#' 
+#' @usage arv$users.setup(user = NULL, openid_prefix = NULL,
+#'     repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+#' @param user 
+#' @param openid_prefix 
+#' @param repo_name 
+#' @param vm_uuid 
+#' @param send_notification_email 
+#' @return User object.
+#' @name users.setup
+NULL
+
+#' users.unsetup
+#' 
+#' users.unsetup is a method defined in Arvados class.
+#' 
+#' @usage arv$users.unsetup(uuid)
+#' @param uuid 
+#' @return User object.
+#' @name users.unsetup
+NULL
+
+#' users.update_uuid
+#' 
+#' users.update_uuid is a method defined in Arvados class.
+#' 
+#' @usage arv$users.update_uuid(uuid, new_uuid)
+#' @param uuid 
+#' @param new_uuid 
+#' @return User object.
+#' @name users.update_uuid
+NULL
+
+#' users.list
+#' 
+#' users.list is a method defined in Arvados class.
+#' 
+#' @usage arv$users.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return UserList object.
+#' @name users.list
+NULL
+
+#' api_client_authorizations.get
+#' 
+#' api_client_authorizations.get is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.get(uuid)
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.get
+NULL
+
+#' api_client_authorizations.create
+#' 
+#' api_client_authorizations.create is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.create(apiclientauthorization,
+#'     ensure_unique_name = "false")
+#' @param apiClientAuthorization ApiClientAuthorization object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.create
+NULL
+
+#' api_client_authorizations.update
+#' 
+#' api_client_authorizations.update is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.update(apiclientauthorization,
+#'     uuid)
+#' @param apiClientAuthorization ApiClientAuthorization object.
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.update
+NULL
+
+#' api_client_authorizations.delete
+#' 
+#' api_client_authorizations.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.delete(uuid)
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.delete
+NULL
+
+#' api_client_authorizations.create_system_auth
+#' 
+#' api_client_authorizations.create_system_auth is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
+#'     scopes = NULL)
+#' @param api_client_id 
+#' @param scopes 
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.create_system_auth
+NULL
+
+#' api_client_authorizations.current
+#' 
+#' api_client_authorizations.current is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.current(NULL)
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.current
+NULL
+
+#' api_client_authorizations.list
+#' 
+#' api_client_authorizations.list is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ApiClientAuthorizationList object.
+#' @name api_client_authorizations.list
+NULL
+
+#' containers.get
+#' 
+#' containers.get is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.get(uuid)
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.get
+NULL
+
+#' containers.create
+#' 
+#' containers.create is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.create(container,
+#'     ensure_unique_name = "false")
+#' @param container Container object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Container object.
+#' @name containers.create
+NULL
+
+#' containers.update
+#' 
+#' containers.update is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.update(container,
+#'     uuid)
+#' @param container Container object.
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.update
+NULL
+
+#' containers.delete
+#' 
+#' containers.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.delete(uuid)
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.delete
+NULL
+
+#' containers.auth
+#' 
+#' containers.auth is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.auth(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.auth
+NULL
+
+#' containers.lock
+#' 
+#' containers.lock is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.lock(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.lock
+NULL
+
+#' containers.unlock
+#' 
+#' containers.unlock is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.unlock(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.unlock
+NULL
+
+#' containers.secret_mounts
+#' 
+#' containers.secret_mounts is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.secret_mounts(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.secret_mounts
+NULL
+
+#' containers.current
+#' 
+#' containers.current is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.current(NULL)
+#' @return Container object.
+#' @name containers.current
+NULL
+
+#' containers.list
+#' 
+#' containers.list is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ContainerList object.
+#' @name containers.list
+NULL
+
+#' api_clients.get
+#' 
+#' api_clients.get is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.get(uuid)
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.get
+NULL
+
+#' api_clients.create
+#' 
+#' api_clients.create is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.create(apiclient,
+#'     ensure_unique_name = "false")
+#' @param apiClient ApiClient object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ApiClient object.
+#' @name api_clients.create
+NULL
+
+#' api_clients.update
+#' 
+#' api_clients.update is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.update(apiclient,
+#'     uuid)
+#' @param apiClient ApiClient object.
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.update
+NULL
+
+#' api_clients.delete
+#' 
+#' api_clients.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.delete(uuid)
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.delete
+NULL
+
+#' api_clients.list
+#' 
+#' api_clients.list is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ApiClientList object.
+#' @name api_clients.list
+NULL
+
+#' authorized_keys.get
+#' 
+#' authorized_keys.get is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.get(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.get
+NULL
+
+#' authorized_keys.create
+#' 
+#' authorized_keys.create is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.create(authorizedkey,
+#'     ensure_unique_name = "false")
+#' @param authorizedKey AuthorizedKey object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.create
+NULL
+
+#' authorized_keys.update
+#' 
+#' authorized_keys.update is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.update(authorizedkey,
+#'     uuid)
+#' @param authorizedKey AuthorizedKey object.
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.update
+NULL
+
+#' authorized_keys.delete
+#' 
+#' authorized_keys.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.delete(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.delete
+NULL
+
+#' authorized_keys.list
+#' 
+#' authorized_keys.list is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return AuthorizedKeyList object.
+#' @name authorized_keys.list
+NULL
+
+#' container_requests.get
+#' 
+#' container_requests.get is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.get(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.get
+NULL
+
+#' container_requests.create
+#' 
+#' container_requests.create is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.create(containerrequest,
+#'     ensure_unique_name = "false")
+#' @param containerRequest ContainerRequest object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ContainerRequest object.
+#' @name container_requests.create
+NULL
+
+#' container_requests.update
+#' 
+#' container_requests.update is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.update(containerrequest,
+#'     uuid)
+#' @param containerRequest ContainerRequest object.
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.update
+NULL
+
+#' container_requests.delete
+#' 
+#' container_requests.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.delete(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.delete
+NULL
+
+#' container_requests.list
+#' 
+#' container_requests.list is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ContainerRequestList object.
+#' @name container_requests.list
+NULL
+
+#' collections.get
+#' 
+#' collections.get is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.get(uuid)
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.get
+NULL
+
+#' collections.create
+#' 
+#' collections.create is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.create(collection,
+#'     ensure_unique_name = "false")
+#' @param collection Collection object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Collection object.
+#' @name collections.create
+NULL
+
+#' collections.update
+#' 
+#' collections.update is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.update(collection,
+#'     uuid)
+#' @param collection Collection object.
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.update
+NULL
+
+#' collections.delete
+#' 
+#' collections.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.delete(uuid)
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.delete
+NULL
+
+#' collections.provenance
+#' 
+#' collections.provenance is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.provenance(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.provenance
+NULL
+
+#' collections.used_by
+#' 
+#' collections.used_by is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.used_by(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.used_by
+NULL
+
+#' collections.trash
+#' 
+#' collections.trash is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.trash(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.trash
+NULL
+
+#' collections.untrash
+#' 
+#' collections.untrash is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.untrash(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.untrash
+NULL
+
+#' collections.list
+#' 
+#' collections.list is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact", include_trash = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include collections whose is_trashed attribute is true.
+#' @return CollectionList object.
+#' @name collections.list
+NULL
+
+#' humans.get
+#' 
+#' humans.get is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.get(uuid)
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.get
+NULL
+
+#' humans.create
+#' 
+#' humans.create is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.create(human, ensure_unique_name = "false")
+#' @param human Human object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Human object.
+#' @name humans.create
+NULL
+
+#' humans.update
+#' 
+#' humans.update is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.update(human, uuid)
+#' @param human Human object.
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.update
+NULL
+
+#' humans.delete
+#' 
+#' humans.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.delete(uuid)
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.delete
+NULL
+
+#' humans.list
+#' 
+#' humans.list is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return HumanList object.
+#' @name humans.list
+NULL
+
+#' job_tasks.get
+#' 
+#' job_tasks.get is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.get(uuid)
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.get
+NULL
+
+#' job_tasks.create
+#' 
+#' job_tasks.create is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.create(jobtask, ensure_unique_name = "false")
+#' @param jobTask JobTask object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return JobTask object.
+#' @name job_tasks.create
+NULL
+
+#' job_tasks.update
+#' 
+#' job_tasks.update is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.update(jobtask, uuid)
+#' @param jobTask JobTask object.
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.update
+NULL
+
+#' job_tasks.delete
+#' 
+#' job_tasks.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.delete(uuid)
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.delete
+NULL
+
+#' job_tasks.list
+#' 
+#' job_tasks.list is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return JobTaskList object.
+#' @name job_tasks.list
+NULL
+
+#' links.get
+#' 
+#' links.get is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.get
+NULL
+
+#' links.create
+#' 
+#' links.create is a method defined in Arvados class.
+#' 
+#' @usage arv$links.create(link, ensure_unique_name = "false")
+#' @param link Link object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Link object.
+#' @name links.create
+NULL
+
+#' links.update
+#' 
+#' links.update is a method defined in Arvados class.
+#' 
+#' @usage arv$links.update(link, uuid)
+#' @param link Link object.
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.update
+NULL
+
+#' links.delete
+#' 
+#' links.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$links.delete(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.delete
+NULL
+
+#' links.list
+#' 
+#' links.list is a method defined in Arvados class.
+#' 
+#' @usage arv$links.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return LinkList object.
+#' @name links.list
+NULL
+
+#' links.get_permissions
+#' 
+#' links.get_permissions is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get_permissions(uuid)
+#' @param uuid 
+#' @return Link object.
+#' @name links.get_permissions
+NULL
+
+#' jobs.get
+#' 
+#' jobs.get is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.get(uuid)
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.get
+NULL
+
+#' jobs.create
+#' 
+#' jobs.create is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.create(job, ensure_unique_name = "false",
+#'     find_or_create = "false", filters = NULL,
+#'     minimum_script_version = NULL, exclude_script_versions = NULL)
+#' @param job Job object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @param find_or_create 
+#' @param filters 
+#' @param minimum_script_version 
+#' @param exclude_script_versions 
+#' @return Job object.
+#' @name jobs.create
+NULL
+
+#' jobs.update
+#' 
+#' jobs.update is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.update(job, uuid)
+#' @param job Job object.
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.update
+NULL
+
+#' jobs.delete
+#' 
+#' jobs.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.delete(uuid)
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.delete
+NULL
+
+#' jobs.queue
+#' 
+#' jobs.queue is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.queue(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return Job object.
+#' @name jobs.queue
+NULL
+
+#' jobs.queue_size
+#' 
+#' jobs.queue_size is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.queue_size(NULL)
+#' @return Job object.
+#' @name jobs.queue_size
+NULL
+
+#' jobs.cancel
+#' 
+#' jobs.cancel is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.cancel(uuid)
+#' @param uuid 
+#' @return Job object.
+#' @name jobs.cancel
+NULL
+
+#' jobs.lock
+#' 
+#' jobs.lock is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.lock(uuid)
+#' @param uuid 
+#' @return Job object.
+#' @name jobs.lock
+NULL
+
+#' jobs.list
+#' 
+#' jobs.list is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.list(filters = NULL, where = NULL,
+#'     order = NULL, select = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return JobList object.
+#' @name jobs.list
+NULL
+
+#' keep_disks.get
+#' 
+#' keep_disks.get is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.get(uuid)
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.get
+NULL
+
+#' keep_disks.create
+#' 
+#' keep_disks.create is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.create(keepdisk,
+#'     ensure_unique_name = "false")
+#' @param keepDisk KeepDisk object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return KeepDisk object.
+#' @name keep_disks.create
+NULL
+
+#' keep_disks.update
+#' 
+#' keep_disks.update is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.update(keepdisk,
+#'     uuid)
+#' @param keepDisk KeepDisk object.
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.update
+NULL
+
+#' keep_disks.delete
+#' 
+#' keep_disks.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.delete(uuid)
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.delete
+NULL
+
+#' keep_disks.ping
+#' 
+#' keep_disks.ping is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.ping(uuid = NULL,
+#'     ping_secret, node_uuid = NULL, filesystem_uuid = NULL,
+#'     service_host = NULL, service_port, service_ssl_flag)
+#' @param uuid 
+#' @param ping_secret 
+#' @param node_uuid 
+#' @param filesystem_uuid 
+#' @param service_host 
+#' @param service_port 
+#' @param service_ssl_flag 
+#' @return KeepDisk object.
+#' @name keep_disks.ping
+NULL
+
+#' keep_disks.list
+#' 
+#' keep_disks.list is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return KeepDiskList object.
+#' @name keep_disks.list
+NULL
+
+#' keep_services.get
+#' 
+#' keep_services.get is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.get(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.get
+NULL
+
+#' keep_services.create
+#' 
+#' keep_services.create is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.create(keepservice,
+#'     ensure_unique_name = "false")
+#' @param keepService KeepService object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return KeepService object.
+#' @name keep_services.create
+NULL
+
+#' keep_services.update
+#' 
+#' keep_services.update is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.update(keepservice,
+#'     uuid)
+#' @param keepService KeepService object.
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.update
+NULL
+
+#' keep_services.delete
+#' 
+#' keep_services.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.delete(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.delete
+NULL
+
+#' keep_services.accessible
+#' 
+#' keep_services.accessible is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.accessible(NULL)
+#' @return KeepService object.
+#' @name keep_services.accessible
+NULL
+
+#' keep_services.list
+#' 
+#' keep_services.list is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return KeepServiceList object.
+#' @name keep_services.list
+NULL
+
+#' pipeline_templates.get
+#' 
+#' pipeline_templates.get is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.get(uuid)
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.get
+NULL
+
+#' pipeline_templates.create
+#' 
+#' pipeline_templates.create is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.create(pipelinetemplate,
+#'     ensure_unique_name = "false")
+#' @param pipelineTemplate PipelineTemplate object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.create
+NULL
+
+#' pipeline_templates.update
+#' 
+#' pipeline_templates.update is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.update(pipelinetemplate,
+#'     uuid)
+#' @param pipelineTemplate PipelineTemplate object.
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.update
+NULL
+
+#' pipeline_templates.delete
+#' 
+#' pipeline_templates.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.delete(uuid)
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.delete
+NULL
+
+#' pipeline_templates.list
+#' 
+#' pipeline_templates.list is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return PipelineTemplateList object.
+#' @name pipeline_templates.list
+NULL
+
+#' pipeline_instances.get
+#' 
+#' pipeline_instances.get is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.get(uuid)
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.get
+NULL
+
+#' pipeline_instances.create
+#' 
+#' pipeline_instances.create is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.create(pipelineinstance,
+#'     ensure_unique_name = "false")
+#' @param pipelineInstance PipelineInstance object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.create
+NULL
+
+#' pipeline_instances.update
+#' 
+#' pipeline_instances.update is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.update(pipelineinstance,
+#'     uuid)
+#' @param pipelineInstance PipelineInstance object.
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.update
+NULL
+
+#' pipeline_instances.delete
+#' 
+#' pipeline_instances.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.delete(uuid)
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.delete
+NULL
+
+#' pipeline_instances.cancel
+#' 
+#' pipeline_instances.cancel is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.cancel(uuid)
+#' @param uuid 
+#' @return PipelineInstance object.
+#' @name pipeline_instances.cancel
+NULL
+
+#' pipeline_instances.list
+#' 
+#' pipeline_instances.list is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return PipelineInstanceList object.
+#' @name pipeline_instances.list
+NULL
+
+#' nodes.get
+#' 
+#' nodes.get is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.get(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.get
+NULL
+
+#' nodes.create
+#' 
+#' nodes.create is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.create(node, ensure_unique_name = "false",
+#'     assign_slot = NULL)
+#' @param node Node object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.create
+NULL
+
+#' nodes.update
+#' 
+#' nodes.update is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.update(node, uuid, assign_slot = NULL)
+#' @param node Node object.
+#' @param uuid The UUID of the Node in question.
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.update
+NULL
+
+#' nodes.delete
+#' 
+#' nodes.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.delete(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.delete
+NULL
+
+#' nodes.ping
+#' 
+#' nodes.ping is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.ping(uuid, ping_secret)
+#' @param uuid 
+#' @param ping_secret 
+#' @return Node object.
+#' @name nodes.ping
+NULL
+
+#' nodes.list
+#' 
+#' nodes.list is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return NodeList object.
+#' @name nodes.list
+NULL
+
+#' repositories.get
+#' 
+#' repositories.get is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.get(uuid)
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.get
+NULL
+
+#' repositories.create
+#' 
+#' repositories.create is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.create(repository,
+#'     ensure_unique_name = "false")
+#' @param repository Repository object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Repository object.
+#' @name repositories.create
+NULL
+
+#' repositories.update
+#' 
+#' repositories.update is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.update(repository,
+#'     uuid)
+#' @param repository Repository object.
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.update
+NULL
+
+#' repositories.delete
+#' 
+#' repositories.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.delete(uuid)
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.delete
+NULL
+
+#' repositories.get_all_permissions
+#' 
+#' repositories.get_all_permissions is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.get_all_permissions(NULL)
+#' @return Repository object.
+#' @name repositories.get_all_permissions
+NULL
+
+#' repositories.list
+#' 
+#' repositories.list is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return RepositoryList object.
+#' @name repositories.list
+NULL
+
+#' specimens.get
+#' 
+#' specimens.get is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.get(uuid)
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.get
+NULL
+
+#' specimens.create
+#' 
+#' specimens.create is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.create(specimen,
+#'     ensure_unique_name = "false")
+#' @param specimen Specimen object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Specimen object.
+#' @name specimens.create
+NULL
+
+#' specimens.update
+#' 
+#' specimens.update is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.update(specimen,
+#'     uuid)
+#' @param specimen Specimen object.
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.update
+NULL
+
+#' specimens.delete
+#' 
+#' specimens.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.delete(uuid)
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.delete
+NULL
+
+#' specimens.list
+#' 
+#' specimens.list is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return SpecimenList object.
+#' @name specimens.list
+NULL
+
+#' logs.get
+#' 
+#' logs.get is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.get(uuid)
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.get
+NULL
+
+#' logs.create
+#' 
+#' logs.create is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.create(log, ensure_unique_name = "false")
+#' @param log Log object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Log object.
+#' @name logs.create
+NULL
+
+#' logs.update
+#' 
+#' logs.update is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.update(log, uuid)
+#' @param log Log object.
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.update
+NULL
+
+#' logs.delete
+#' 
+#' logs.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.delete(uuid)
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.delete
+NULL
+
+#' logs.list
+#' 
+#' logs.list is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.list(filters = NULL, where = NULL,
+#'     order = NULL, select = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return LogList object.
+#' @name logs.list
+NULL
+
+#' traits.get
+#' 
+#' traits.get is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.get(uuid)
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.get
+NULL
+
+#' traits.create
+#' 
+#' traits.create is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.create(trait, ensure_unique_name = "false")
+#' @param trait Trait object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Trait object.
+#' @name traits.create
+NULL
+
+#' traits.update
+#' 
+#' traits.update is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.update(trait, uuid)
+#' @param trait Trait object.
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.update
+NULL
+
+#' traits.delete
+#' 
+#' traits.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.delete(uuid)
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.delete
+NULL
+
+#' traits.list
+#' 
+#' traits.list is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return TraitList object.
+#' @name traits.list
+NULL
+
+#' virtual_machines.get
+#' 
+#' virtual_machines.get is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.get(uuid)
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.get
+NULL
+
+#' virtual_machines.create
+#' 
+#' virtual_machines.create is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.create(virtualmachine,
+#'     ensure_unique_name = "false")
+#' @param virtualMachine VirtualMachine object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return VirtualMachine object.
+#' @name virtual_machines.create
+NULL
+
+#' virtual_machines.update
+#' 
+#' virtual_machines.update is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.update(virtualmachine,
+#'     uuid)
+#' @param virtualMachine VirtualMachine object.
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.update
+NULL
+
+#' virtual_machines.delete
+#' 
+#' virtual_machines.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.delete(uuid)
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.delete
+NULL
+
+#' virtual_machines.logins
+#' 
+#' virtual_machines.logins is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.logins(uuid)
+#' @param uuid 
+#' @return VirtualMachine object.
+#' @name virtual_machines.logins
+NULL
+
+#' virtual_machines.get_all_logins
+#' 
+#' virtual_machines.get_all_logins is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.get_all_logins(NULL)
+#' @return VirtualMachine object.
+#' @name virtual_machines.get_all_logins
+NULL
+
+#' virtual_machines.list
+#' 
+#' virtual_machines.list is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return VirtualMachineList object.
+#' @name virtual_machines.list
+NULL
+
+#' workflows.get
+#' 
+#' workflows.get is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.get(uuid)
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.get
+NULL
+
+#' workflows.create
+#' 
+#' workflows.create is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.create(workflow,
+#'     ensure_unique_name = "false")
+#' @param workflow Workflow object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Workflow object.
+#' @name workflows.create
+NULL
+
+#' workflows.update
+#' 
+#' workflows.update is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.update(workflow,
+#'     uuid)
+#' @param workflow Workflow object.
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.update
+NULL
+
+#' workflows.delete
+#' 
+#' workflows.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.delete(uuid)
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.delete
+NULL
+
+#' workflows.list
+#' 
+#' workflows.list is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return WorkflowList object.
+#' @name workflows.list
+NULL
+
+#' groups.get
+#' 
+#' groups.get is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.get(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.get
+NULL
+
+#' groups.create
+#' 
+#' groups.create is a method defined in Arvados class.
 #' 
-#' Arvados class gives users ability to manipulate collections and projects.
+#' @usage arv$groups.create(group, ensure_unique_name = "false")
+#' @param group Group object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Group object.
+#' @name groups.create
+NULL
+
+#' groups.update
+#' 
+#' groups.update is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.update(group, uuid)
+#' @param group Group object.
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.update
+NULL
+
+#' groups.delete
+#' 
+#' groups.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.delete(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.delete
+NULL
+
+#' groups.contents
+#' 
+#' groups.contents is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.contents(filters = NULL,
+#'     where = NULL, order = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact",
+#'     include_trash = NULL, uuid = NULL, recursive = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @param uuid 
+#' @param recursive Include contents from child groups recursively.
+#' @return Group object.
+#' @name groups.contents
+NULL
+
+#' groups.trash
+#' 
+#' groups.trash is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.trash(uuid)
+#' @param uuid 
+#' @return Group object.
+#' @name groups.trash
+NULL
+
+#' groups.untrash
+#' 
+#' groups.untrash is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.untrash(uuid)
+#' @param uuid 
+#' @return Group object.
+#' @name groups.untrash
+NULL
+
+#' groups.list
+#' 
+#' groups.list is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact", include_trash = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @return GroupList object.
+#' @name groups.list
+NULL
+
+#' user_agreements.get
+#' 
+#' user_agreements.get is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.get(uuid)
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.get
+NULL
+
+#' user_agreements.create
+#' 
+#' user_agreements.create is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.create(useragreement,
+#'     ensure_unique_name = "false")
+#' @param userAgreement UserAgreement object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return UserAgreement object.
+#' @name user_agreements.create
+NULL
+
+#' user_agreements.update
+#' 
+#' user_agreements.update is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.update(useragreement,
+#'     uuid)
+#' @param userAgreement UserAgreement object.
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.update
+NULL
+
+#' user_agreements.delete
+#' 
+#' user_agreements.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.delete(uuid)
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.delete
+NULL
+
+#' user_agreements.signatures
+#' 
+#' user_agreements.signatures is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.signatures(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.signatures
+NULL
+
+#' user_agreements.sign
+#' 
+#' user_agreements.sign is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.sign(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.sign
+NULL
+
+#' user_agreements.list
+#' 
+#' user_agreements.list is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return UserAgreementList object.
+#' @name user_agreements.list
+NULL
+
+#' user_agreements.new
+#' 
+#' user_agreements.new is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.new(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.new
+NULL
+
+#' project.get
+#' 
+#' projects.get is equivalent to groups.get method.
+#' 
+#' @usage arv$projects.get(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.get
+NULL
+
+#' project.create
+#' 
+#' projects.create wrapps groups.create method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.create(group, ensure_unique_name = "false")
+#' @param group Group object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Group object.
+#' @name projects.create
+NULL
+
+#' project.update
+#' 
+#' projects.update wrapps groups.update method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.update(group, uuid)
+#' @param group Group object.
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.update
+NULL
+
+#' project.delete
+#' 
+#' projects.delete is equivalent to groups.delete method.
+#' 
+#' @usage arv$project.delete(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.delete
+NULL
+
+#' project.list
 #' 
+#' projects.list wrapps groups.list method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.list(filters = NULL,
+#'     where = NULL, order = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact",
+#'     include_trash = NULL, uuid = NULL, recursive = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @param uuid 
+#' @param recursive Include contents from child groups recursively.
+#' @return Group object.
+#' @name projects.list
+NULL
+
+#' Arvados
+#'
+#' Arvados class gives users ability to access Arvados REST API.
+#'
 #' @section Usage:
 #' \preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
 #'
 #' @section Arguments:
 #' \describe{
-#'   \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
-#'   \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
-#'   \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+#'     \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+#'     \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+#'     \item{numRetries}{Number which specifies how many times to retry failed service requests.}
 #' }
-#' 
+#'
 #' @section Methods:
 #' \describe{
-#'   \item{getToken()}{Returns authentification token currently in use.}
-#'   \item{getHostName()}{Returns host name currently in use.}
-#'   \item{getNumRetries()}{Returns number which specifies how many times to retry failed service requests.}
-#'   \item{setNumRetries(newNumOfRetries)}{Sets number which specifies how many times to retry failed service requests.}
-#'   \item{getCollection(uuid)}{Get collection with specified UUID.}
-#'   \item{listCollections(filters = NULL, limit = 100, offset = 0)}{Returns list of collections based on filters parameter.}
-#'   \item{listAllCollections(filters = NULL)}{Lists all collections, based on filters parameter, even if the number of items is greater than maximum API limit.}
-#'   \item{deleteCollection(uuid)}{Deletes collection with specified UUID.}
-#'   \item{updateCollection(uuid, newContent)}{Updates collection with specified UUID.}
-#'   \item{createCollection(content)}{Creates new collection.}
-#'   \item{getProject(uuid)}{Get project with specified UUID.}
-#'   \item{listProjects(filters = NULL, limit = 100, offset = 0)}{Returns list of projects based on filters parameter.}
-#'   \item{listAllProjects(filters = NULL)}{Lists all projects, based on filters parameter, even if the number of items is greater than maximum API limit.}
-#'   \item{deleteProject(uuid)}{Deletes project with specified UUID.}
-#'   \item{updateProject(uuid, newContent)}{Updates project with specified UUID.}
-#'   \item{createProject(content)}{Creates new project.}
+#'     \item{}{\code{\link{api_client_authorizations.create}}}
+#'     \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
+#'     \item{}{\code{\link{api_client_authorizations.current}}}
+#'     \item{}{\code{\link{api_client_authorizations.delete}}}
+#'     \item{}{\code{\link{api_client_authorizations.get}}}
+#'     \item{}{\code{\link{api_client_authorizations.list}}}
+#'     \item{}{\code{\link{api_client_authorizations.update}}}
+#'     \item{}{\code{\link{api_clients.create}}}
+#'     \item{}{\code{\link{api_clients.delete}}}
+#'     \item{}{\code{\link{api_clients.get}}}
+#'     \item{}{\code{\link{api_clients.list}}}
+#'     \item{}{\code{\link{api_clients.update}}}
+#'     \item{}{\code{\link{authorized_keys.create}}}
+#'     \item{}{\code{\link{authorized_keys.delete}}}
+#'     \item{}{\code{\link{authorized_keys.get}}}
+#'     \item{}{\code{\link{authorized_keys.list}}}
+#'     \item{}{\code{\link{authorized_keys.update}}}
+#'     \item{}{\code{\link{collections.create}}}
+#'     \item{}{\code{\link{collections.delete}}}
+#'     \item{}{\code{\link{collections.get}}}
+#'     \item{}{\code{\link{collections.list}}}
+#'     \item{}{\code{\link{collections.provenance}}}
+#'     \item{}{\code{\link{collections.trash}}}
+#'     \item{}{\code{\link{collections.untrash}}}
+#'     \item{}{\code{\link{collections.update}}}
+#'     \item{}{\code{\link{collections.used_by}}}
+#'     \item{}{\code{\link{container_requests.create}}}
+#'     \item{}{\code{\link{container_requests.delete}}}
+#'     \item{}{\code{\link{container_requests.get}}}
+#'     \item{}{\code{\link{container_requests.list}}}
+#'     \item{}{\code{\link{container_requests.update}}}
+#'     \item{}{\code{\link{containers.auth}}}
+#'     \item{}{\code{\link{containers.create}}}
+#'     \item{}{\code{\link{containers.current}}}
+#'     \item{}{\code{\link{containers.delete}}}
+#'     \item{}{\code{\link{containers.get}}}
+#'     \item{}{\code{\link{containers.list}}}
+#'     \item{}{\code{\link{containers.lock}}}
+#'     \item{}{\code{\link{containers.secret_mounts}}}
+#'     \item{}{\code{\link{containers.unlock}}}
+#'     \item{}{\code{\link{containers.update}}}
+#'     \item{}{\code{\link{groups.contents}}}
+#'     \item{}{\code{\link{groups.create}}}
+#'     \item{}{\code{\link{groups.delete}}}
+#'     \item{}{\code{\link{groups.get}}}
+#'     \item{}{\code{\link{groups.list}}}
+#'     \item{}{\code{\link{groups.trash}}}
+#'     \item{}{\code{\link{groups.untrash}}}
+#'     \item{}{\code{\link{groups.update}}}
+#'     \item{}{\code{\link{humans.create}}}
+#'     \item{}{\code{\link{humans.delete}}}
+#'     \item{}{\code{\link{humans.get}}}
+#'     \item{}{\code{\link{humans.list}}}
+#'     \item{}{\code{\link{humans.update}}}
+#'     \item{}{\code{\link{jobs.cancel}}}
+#'     \item{}{\code{\link{jobs.create}}}
+#'     \item{}{\code{\link{jobs.delete}}}
+#'     \item{}{\code{\link{jobs.get}}}
+#'     \item{}{\code{\link{jobs.list}}}
+#'     \item{}{\code{\link{jobs.lock}}}
+#'     \item{}{\code{\link{jobs.queue}}}
+#'     \item{}{\code{\link{jobs.queue_size}}}
+#'     \item{}{\code{\link{jobs.update}}}
+#'     \item{}{\code{\link{job_tasks.create}}}
+#'     \item{}{\code{\link{job_tasks.delete}}}
+#'     \item{}{\code{\link{job_tasks.get}}}
+#'     \item{}{\code{\link{job_tasks.list}}}
+#'     \item{}{\code{\link{job_tasks.update}}}
+#'     \item{}{\code{\link{keep_disks.create}}}
+#'     \item{}{\code{\link{keep_disks.delete}}}
+#'     \item{}{\code{\link{keep_disks.get}}}
+#'     \item{}{\code{\link{keep_disks.list}}}
+#'     \item{}{\code{\link{keep_disks.ping}}}
+#'     \item{}{\code{\link{keep_disks.update}}}
+#'     \item{}{\code{\link{keep_services.accessible}}}
+#'     \item{}{\code{\link{keep_services.create}}}
+#'     \item{}{\code{\link{keep_services.delete}}}
+#'     \item{}{\code{\link{keep_services.get}}}
+#'     \item{}{\code{\link{keep_services.list}}}
+#'     \item{}{\code{\link{keep_services.update}}}
+#'     \item{}{\code{\link{links.create}}}
+#'     \item{}{\code{\link{links.delete}}}
+#'     \item{}{\code{\link{links.get}}}
+#'     \item{}{\code{\link{links.get_permissions}}}
+#'     \item{}{\code{\link{links.list}}}
+#'     \item{}{\code{\link{links.update}}}
+#'     \item{}{\code{\link{logs.create}}}
+#'     \item{}{\code{\link{logs.delete}}}
+#'     \item{}{\code{\link{logs.get}}}
+#'     \item{}{\code{\link{logs.list}}}
+#'     \item{}{\code{\link{logs.update}}}
+#'     \item{}{\code{\link{nodes.create}}}
+#'     \item{}{\code{\link{nodes.delete}}}
+#'     \item{}{\code{\link{nodes.get}}}
+#'     \item{}{\code{\link{nodes.list}}}
+#'     \item{}{\code{\link{nodes.ping}}}
+#'     \item{}{\code{\link{nodes.update}}}
+#'     \item{}{\code{\link{pipeline_instances.cancel}}}
+#'     \item{}{\code{\link{pipeline_instances.create}}}
+#'     \item{}{\code{\link{pipeline_instances.delete}}}
+#'     \item{}{\code{\link{pipeline_instances.get}}}
+#'     \item{}{\code{\link{pipeline_instances.list}}}
+#'     \item{}{\code{\link{pipeline_instances.update}}}
+#'     \item{}{\code{\link{pipeline_templates.create}}}
+#'     \item{}{\code{\link{pipeline_templates.delete}}}
+#'     \item{}{\code{\link{pipeline_templates.get}}}
+#'     \item{}{\code{\link{pipeline_templates.list}}}
+#'     \item{}{\code{\link{pipeline_templates.update}}}
+#'     \item{}{\code{\link{projects.create}}}
+#'     \item{}{\code{\link{projects.delete}}}
+#'     \item{}{\code{\link{projects.get}}}
+#'     \item{}{\code{\link{projects.list}}}
+#'     \item{}{\code{\link{projects.update}}}
+#'     \item{}{\code{\link{repositories.create}}}
+#'     \item{}{\code{\link{repositories.delete}}}
+#'     \item{}{\code{\link{repositories.get}}}
+#'     \item{}{\code{\link{repositories.get_all_permissions}}}
+#'     \item{}{\code{\link{repositories.list}}}
+#'     \item{}{\code{\link{repositories.update}}}
+#'     \item{}{\code{\link{specimens.create}}}
+#'     \item{}{\code{\link{specimens.delete}}}
+#'     \item{}{\code{\link{specimens.get}}}
+#'     \item{}{\code{\link{specimens.list}}}
+#'     \item{}{\code{\link{specimens.update}}}
+#'     \item{}{\code{\link{traits.create}}}
+#'     \item{}{\code{\link{traits.delete}}}
+#'     \item{}{\code{\link{traits.get}}}
+#'     \item{}{\code{\link{traits.list}}}
+#'     \item{}{\code{\link{traits.update}}}
+#'     \item{}{\code{\link{user_agreements.create}}}
+#'     \item{}{\code{\link{user_agreements.delete}}}
+#'     \item{}{\code{\link{user_agreements.get}}}
+#'     \item{}{\code{\link{user_agreements.list}}}
+#'     \item{}{\code{\link{user_agreements.new}}}
+#'     \item{}{\code{\link{user_agreements.sign}}}
+#'     \item{}{\code{\link{user_agreements.signatures}}}
+#'     \item{}{\code{\link{user_agreements.update}}}
+#'     \item{}{\code{\link{users.activate}}}
+#'     \item{}{\code{\link{users.create}}}
+#'     \item{}{\code{\link{users.current}}}
+#'     \item{}{\code{\link{users.delete}}}
+#'     \item{}{\code{\link{users.get}}}
+#'     \item{}{\code{\link{users.list}}}
+#'     \item{}{\code{\link{users.setup}}}
+#'     \item{}{\code{\link{users.system}}}
+#'     \item{}{\code{\link{users.unsetup}}}
+#'     \item{}{\code{\link{users.update}}}
+#'     \item{}{\code{\link{users.update_uuid}}}
+#'     \item{}{\code{\link{virtual_machines.create}}}
+#'     \item{}{\code{\link{virtual_machines.delete}}}
+#'     \item{}{\code{\link{virtual_machines.get}}}
+#'     \item{}{\code{\link{virtual_machines.get_all_logins}}}
+#'     \item{}{\code{\link{virtual_machines.list}}}
+#'     \item{}{\code{\link{virtual_machines.logins}}}
+#'     \item{}{\code{\link{virtual_machines.update}}}
+#'     \item{}{\code{\link{workflows.create}}}
+#'     \item{}{\code{\link{workflows.delete}}}
+#'     \item{}{\code{\link{workflows.get}}}
+#'     \item{}{\code{\link{workflows.list}}}
+#'     \item{}{\code{\link{workflows.update}}}
 #' }
 #'
 #' @name Arvados
@@ -41,196 +2154,3539 @@ source("./R/HttpParser.R")
 #' \dontrun{
 #' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
 #'
-#' collection <- arv$getCollection("uuid")
+#' collection <- arv$collections.get("uuid")
 #'
-#' collectionList <- arv$listCollections(list(list("name", "like", "Test%")))
-#' collectionList <- arv$listAllCollections(list(list("name", "like", "Test%")))
+#' collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
+#' collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
 #'
-#' deletedCollection <- arv$deleteCollection("uuid")
+#' deletedCollection <- arv$collections.delete("uuid")
 #'
-#' updatedCollection <- arv$updateCollection("uuid", list(name = "New name",
-#'                                                        description = "New description"))
+#' updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
+#'                                             "uuid")
 #'
-#' createdCollection <- arv$createCollection(list(name = "Example",
-#'                                                description = "This is a test collection"))
+#' createdCollection <- arv$collections.create(list(name = "Example",
+#'                                                  description = "This is a test collection"))
 #' }
 NULL
 
 #' @export
 Arvados <- R6::R6Class(
 
-    "Arvados",
-
-    public = list(
-
-        initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
-        {
-            if(!is.null(hostName))
-               Sys.setenv(ARVADOS_API_HOST = hostName)
-
-            if(!is.null(authToken))
-                Sys.setenv(ARVADOS_API_TOKEN = authToken)
-
-            hostName  <- Sys.getenv("ARVADOS_API_HOST");
-            token     <- Sys.getenv("ARVADOS_API_TOKEN");
-
-            if(hostName == "" | token == "")
-                stop(paste("Please provide host name and authentification token",
-                           "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
-                           "environment variables."))
-
-            private$numRetries  <- numRetries
-            private$REST  <- RESTService$new(token, hostName,
-                                             HttpRequest$new(), HttpParser$new(),
-                                             numRetries)
-
-            private$token <- private$REST$token
-            private$host  <- private$REST$hostName
-        },
-
-        getToken          = function() private$REST$token,
-        getHostName       = function() private$REST$hostName,
-        getWebDavHostName = function() private$REST$getWebDavHostName(),
-        getRESTService    = function() private$REST,
-        setRESTService    = function(newRESTService) private$REST <- newRESTService,
-
-        getNumRetries = function() private$REST$numRetries,
-        setNumRetries = function(newNumOfRetries)
-        {
-            private$REST$setNumRetries(newNumOfRetries)
-        },
-
-        getCollection = function(uuid)
-        {
-            collection <- private$REST$getResource("collections", uuid)
-            collection
-        },
-
-        listCollections = function(filters = NULL, limit = 100, offset = 0)
-        {
-            if(!is.null(filters))
-                names(filters) <- c("collection")
-
-            collections <- private$REST$listResources("collections", filters,
-                                                      limit, offset)
-            collections
-        },
-
-        listAllCollections = function(filters = NULL)
-        {
-            if(!is.null(filters))
-                names(filters) <- c("collection")
-
-            collectionURL <- paste0(private$host, "collections")
-            allCollection <- private$REST$fetchAllItems(collectionURL, filters)
-            allCollection
-        },
-
-        deleteCollection = function(uuid)
-        {
-            removedCollection <- private$REST$deleteResource("collections", uuid)
-            removedCollection
-        },
-
-        updateCollection = function(uuid, newContent)
-        {
-            body <- list(list())
-            names(body) <- c("collection")
-            body$collection <- newContent
-
-            updatedCollection <- private$REST$updateResource("collections",
-                                                             uuid, body)
-            updatedCollection
-        },
-
-        createCollection = function(content)
-        {
-            body <- list(list())
-            names(body) <- c("collection")
-            body$collection <- content
-
-            newCollection <- private$REST$createResource("collections", body)
-            newCollection
-        },
-
-        getProject = function(uuid)
-        {
-            project <- private$REST$getResource("groups", uuid)
-            project
-        },
-
-        createProject = function(content)
-        {
-            body <- list(list())
-            names(body) <- c("group")
-            body$group <- c("group_class" = "project", content)
-
-            newProject <- private$REST$createResource("groups", body)
-            newProject
-        },
-
-        updateProject = function(uuid, newContent)
-        {
-            body <- list(list())
-            names(body) <- c("group")
-            body$group <- newContent
-
-            updatedProject <- private$REST$updateResource("groups", uuid, body)
-            updatedProject
-        },
-
-        listProjects = function(filters = NULL, limit = 100, offset = 0)
-        {
-            if(!is.null(filters))
-                names(filters) <- c("groups")
-
-            filters[[length(filters) + 1]] <- list("group_class", "=", "project")
-
-            projects <- private$REST$listResources("groups", filters, limit, offset)
-            projects
-        },
-
-        listAllProjects = function(filters = NULL)
-        {
-            if(!is.null(filters))
-                names(filters) <- c("groups")
-
-            filters[[length(filters) + 1]] <- list("group_class", "=", "project")
-
-            projectURL <- paste0(private$host, "groups")
-
-            result <- private$REST$fetchAllItems(projectURL, filters)
-            result
-        },
-
-        deleteProject = function(uuid)
-        {
-            removedProject <- private$REST$deleteResource("groups", uuid)
-            removedProject
-        }
-    ),
-
-    private = list(
-
-        token      = NULL,
-        host       = NULL,
-        REST       = NULL,
-        numRetries = NULL
-    ),
-
-    cloneable = FALSE
-)
+       "Arvados",
 
-#' print.Arvados
-#'
-#' Custom print function for Arvados class
-#'
-#' @param x Instance of Arvados class
-#' @param ... Optional arguments.
-#' @export 
-print.Arvados = function(x, ...)
-{
-    cat(paste0("Type:  ", "\"", "Arvados",       "\""), sep = "\n")
-    cat(paste0("Host:  ", "\"", x$getHostName(), "\""), sep = "\n")
-    cat(paste0("Token: ", "\"", x$getToken(),    "\""), sep = "\n")
-}
+       public = list(
+
+               initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
+               {
+                       if(!is.null(hostName))
+                               Sys.setenv(ARVADOS_API_HOST = hostName)
+
+                       if(!is.null(authToken))
+                               Sys.setenv(ARVADOS_API_TOKEN = authToken)
+
+                       hostName <- Sys.getenv("ARVADOS_API_HOST")
+                       token    <- Sys.getenv("ARVADOS_API_TOKEN")
+
+                       if(hostName == "" | token == "")
+                               stop(paste("Please provide host name and authentification token",
+                                                  "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
+                                                  "environment variables."))
+
+                       private$token <- token
+                       private$host  <- paste0("https://", hostName, "/arvados/v1/")
+                       private$numRetries <- numRetries
+                       private$REST <- RESTService$new(token, hostName,
+                                                       HttpRequest$new(), HttpParser$new(),
+                                                       numRetries)
+
+               },
+
+               projects.get = function(uuid)
+               {
+                       self$groups.get(uuid)
+               },
+
+               projects.create = function(group, ensure_unique_name = "false")
+               {
+                       group <- c("group_class" = "project", group)
+                       self$groups.create(group, ensure_unique_name)
+               },
+
+               projects.update = function(group, uuid)
+               {
+                       group <- c("group_class" = "project", group)
+                       self$groups.update(group, uuid)
+               },
+
+               projects.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL)
+               {
+                       filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+                       self$groups.list(filters, where, order, select, distinct,
+                                        limit, offset, count, include_trash)
+               },
+
+               projects.delete = function(uuid)
+               {
+                       self$groups.delete(uuid)
+               },
+
+               users.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.create = function(user, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("users")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(user) > 0)
+                               body <- jsonlite::toJSON(list(user = user), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.update = function(user, uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(user) > 0)
+                               body <- jsonlite::toJSON(list(user = user), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.current = function()
+               {
+                       endPoint <- stringr::str_interp("users/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.system = function()
+               {
+                       endPoint <- stringr::str_interp("users/system")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.activate = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/activate")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.setup = function(user = NULL, openid_prefix = NULL,
+                       repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+               {
+                       endPoint <- stringr::str_interp("users/setup")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(user = user, openid_prefix = openid_prefix,
+                                                         repo_name = repo_name, vm_uuid = vm_uuid,
+                                                         send_notification_email = send_notification_email)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.unsetup = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/unsetup")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.update_uuid = function(uuid, new_uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/update_uuid")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(new_uuid = new_uuid)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("users")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.create = function(apiclientauthorization,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(apiclientauthorization) > 0)
+                               body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.update = function(apiclientauthorization, uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(apiclientauthorization) > 0)
+                               body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.create_system_auth = function(api_client_id = NULL, scopes = NULL)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/create_system_auth")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(api_client_id = api_client_id,
+                                                         scopes = scopes)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.current = function()
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.create = function(container, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("containers")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(container) > 0)
+                               body <- jsonlite::toJSON(list(container = container), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.update = function(container, uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(container) > 0)
+                               body <- jsonlite::toJSON(list(container = container), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.auth = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/auth")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.lock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/lock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.unlock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/unlock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.secret_mounts = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/secret_mounts")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.current = function()
+               {
+                       endPoint <- stringr::str_interp("containers/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("containers")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.create = function(apiclient, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("api_clients")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(apiclient) > 0)
+                               body <- jsonlite::toJSON(list(apiclient = apiclient), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.update = function(apiclient, uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(apiclient) > 0)
+                               body <- jsonlite::toJSON(list(apiclient = apiclient), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("api_clients")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.create = function(authorizedkey,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("authorized_keys")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.update = function(authorizedkey, uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("authorized_keys")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.create = function(containerrequest,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("container_requests")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.update = function(containerrequest, uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("container_requests")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.create = function(collection, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("collections")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(collection) > 0)
+                               body <- jsonlite::toJSON(list(collection = collection), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.update = function(collection, uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(collection) > 0)
+                               body <- jsonlite::toJSON(list(collection = collection), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.provenance = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/provenance")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.used_by = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/used_by")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.trash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/trash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.untrash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/untrash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact", include_trash = NULL)
+               {
+                       endPoint <- stringr::str_interp("collections")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count,
+                                                         include_trash = include_trash)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.create = function(human, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("humans")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(human) > 0)
+                               body <- jsonlite::toJSON(list(human = human), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.update = function(human, uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(human) > 0)
+                               body <- jsonlite::toJSON(list(human = human), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("humans")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.create = function(jobtask, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("job_tasks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(jobtask) > 0)
+                               body <- jsonlite::toJSON(list(jobtask = jobtask), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.update = function(jobtask, uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(jobtask) > 0)
+                               body <- jsonlite::toJSON(list(jobtask = jobtask), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("job_tasks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.create = function(link, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("links")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.update = function(link, uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("links")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.get_permissions = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("permissions/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.create = function(job, ensure_unique_name = "false",
+                       find_or_create = "false", filters = NULL,
+                       minimum_script_version = NULL, exclude_script_versions = NULL)
+               {
+                       endPoint <- stringr::str_interp("jobs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         find_or_create = find_or_create, filters = filters,
+                                                         minimum_script_version = minimum_script_version,
+                                                         exclude_script_versions = exclude_script_versions)
+                       
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.update = function(job, uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.queue = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("jobs/queue")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.queue_size = function()
+               {
+                       endPoint <- stringr::str_interp("jobs/queue_size")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.cancel = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}/cancel")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.lock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}/lock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("jobs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.create = function(keepdisk, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("keep_disks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.update = function(keepdisk, uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.ping = function(uuid = NULL, ping_secret,
+                       node_uuid = NULL, filesystem_uuid = NULL,
+                       service_host = NULL, service_port, service_ssl_flag)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/ping")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(uuid = uuid, ping_secret = ping_secret,
+                                                         node_uuid = node_uuid, filesystem_uuid = filesystem_uuid,
+                                                         service_host = service_host, service_port = service_port,
+                                                         service_ssl_flag = service_ssl_flag)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("keep_disks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.create = function(keepservice,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("keep_services")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.update = function(keepservice, uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.accessible = function()
+               {
+                       endPoint <- stringr::str_interp("keep_services/accessible")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("keep_services")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.create = function(pipelinetemplate,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.update = function(pipelinetemplate, uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.create = function(pipelineinstance,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.update = function(pipelineinstance, uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.cancel = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}/cancel")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.create = function(node, ensure_unique_name = "false",
+                       assign_slot = NULL)
+               {
+                       endPoint <- stringr::str_interp("nodes")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         assign_slot = assign_slot)
+                       
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.update = function(node, uuid, assign_slot = NULL)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(assign_slot = assign_slot)
+                       
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.ping = function(uuid, ping_secret)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}/ping")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ping_secret = ping_secret)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("nodes")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.create = function(repository, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("repositories")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(repository) > 0)
+                               body <- jsonlite::toJSON(list(repository = repository), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.update = function(repository, uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(repository) > 0)
+                               body <- jsonlite::toJSON(list(repository = repository), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.get_all_permissions = function()
+               {
+                       endPoint <- stringr::str_interp("repositories/get_all_permissions")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("repositories")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.create = function(specimen, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("specimens")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(specimen) > 0)
+                               body <- jsonlite::toJSON(list(specimen = specimen), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.update = function(specimen, uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(specimen) > 0)
+                               body <- jsonlite::toJSON(list(specimen = specimen), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("specimens")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.create = function(log, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("logs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(log) > 0)
+                               body <- jsonlite::toJSON(list(log = log), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.update = function(log, uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(log) > 0)
+                               body <- jsonlite::toJSON(list(log = log), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("logs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.create = function(trait, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("traits")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(trait) > 0)
+                               body <- jsonlite::toJSON(list(trait = trait), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.update = function(trait, uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(trait) > 0)
+                               body <- jsonlite::toJSON(list(trait = trait), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("traits")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.create = function(virtualmachine,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("virtual_machines")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(virtualmachine) > 0)
+                               body <- jsonlite::toJSON(list(virtualmachine = virtualmachine), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.update = function(virtualmachine, uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(virtualmachine) > 0)
+                               body <- jsonlite::toJSON(list(virtualmachine = virtualmachine), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.logins = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}/logins")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.get_all_logins = function()
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/get_all_logins")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("virtual_machines")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.create = function(workflow, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("workflows")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(workflow) > 0)
+                               body <- jsonlite::toJSON(list(workflow = workflow), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.update = function(workflow, uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(workflow) > 0)
+                               body <- jsonlite::toJSON(list(workflow = workflow), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("workflows")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.create = function(group, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("groups")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(group) > 0)
+                               body <- jsonlite::toJSON(list(group = group), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.update = function(group, uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(group) > 0)
+                               body <- jsonlite::toJSON(list(group = group), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.contents = function(filters = NULL,
+                       where = NULL, order = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL, uuid = NULL, recursive = NULL)
+               {
+                       endPoint <- stringr::str_interp("groups/contents")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, distinct = distinct, limit = limit,
+                                                         offset = offset, count = count, include_trash = include_trash,
+                                                         uuid = uuid, recursive = recursive)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.trash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}/trash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.untrash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}/untrash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL)
+               {
+                       endPoint <- stringr::str_interp("groups")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count,
+                                                         include_trash = include_trash)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.create = function(useragreement,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("user_agreements")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(useragreement) > 0)
+                               body <- jsonlite::toJSON(list(useragreement = useragreement), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.update = function(useragreement, uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(useragreement) > 0)
+                               body <- jsonlite::toJSON(list(useragreement = useragreement), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.signatures = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/signatures")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.sign = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/sign")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("user_agreements")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.new = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/new")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               getHostName = function() private$host,
+               getToken = function() private$token,
+               setRESTService = function(newREST) private$REST <- newREST,
+               getRESTService = function() private$REST
+       ),
+
+       private = list(
+
+               token = NULL,
+               host = NULL,
+               REST = NULL,
+               numRetries = NULL
+       ),
+
+       cloneable = FALSE
+)
index e28ba9606cfebd95a89a436b7ac9953c98d63fb8..8f737831c4634cc09a3121a86e04dcbf0361946b 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("./R/util.R")
 
 #' ArvadosFile
index ed5b4f4b968d0b9a9b1b570682af587942c29ea2..e23da138329786cba49e3a8001479461dd30be77 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("./R/Subcollection.R")
 source("./R/ArvadosFile.R")
 source("./R/RESTService.R")
@@ -53,12 +57,12 @@ Collection <- R6::R6Class(
 
     public = list(
 
-        api  = NULL,
-        uuid = NULL,
+               uuid = NULL,
+        # api  = NULL,
 
-        initialize = function(api, uuid)
+               initialize = function(api, uuid) 
         {
-            self$api <- api
+            self$api <- api
             private$REST <- api$getRESTService()
 
             self$uuid <- uuid
@@ -69,6 +73,9 @@ Collection <- R6::R6Class(
 
         add = function(content, relativePath = "")
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             if(relativePath == ""  ||
                relativePath == "." ||
                relativePath == "./")
@@ -87,7 +94,6 @@ Collection <- R6::R6Class(
             if("ArvadosFile"   %in% class(content) ||
                "Subcollection" %in% class(content))
             {
-
                 if(content$getName() == "")
                     stop("Content has invalid name.")
 
@@ -104,6 +110,9 @@ Collection <- R6::R6Class(
 
         create = function(fileNames, relativePath = "")
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             if(relativePath == ""  ||
                relativePath == "." ||
                relativePath == "./")
@@ -149,6 +158,9 @@ Collection <- R6::R6Class(
 
         remove = function(paths)
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             if(is.character(paths))
             {
                 sapply(paths, function(filePath)
@@ -179,6 +191,9 @@ Collection <- R6::R6Class(
 
         move = function(content, newLocation)
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             content <- trimFromEnd(content, "/")
 
             elementToMove <- self$get(content)
@@ -191,15 +206,42 @@ Collection <- R6::R6Class(
 
         getFileListing = function()
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             content <- private$REST$getCollectionContent(self$uuid)
             content[order(tolower(content))]
         },
 
         get = function(relativePath)
         {
+            if(is.null(private$tree))
+                private$genereateCollectionTreeStructure()
+
             private$tree$getElement(relativePath)
         },
 
+               toJSON = function() 
+        {
+                       fields <- sapply(private$classFields, function(field)
+                       {
+                               self[[field]]
+                       }, USE.NAMES = TRUE)
+                       
+                       jsonlite::toJSON(list("collection" = 
+                     Filter(Negate(is.null), fields)), auto_unbox = TRUE)
+               },
+
+               isEmpty = function() {
+                       fields <- sapply(private$classFields,
+                                        function(field) self[[field]])
+
+                       if(any(sapply(fields, function(field) !is.null(field) && field != "")))
+                               FALSE
+                       else
+                               TRUE
+               },
+
         getRESTService = function() private$REST,
         setRESTService = function(newRESTService) private$REST <- newRESTService
     ),
@@ -208,7 +250,20 @@ Collection <- R6::R6Class(
 
         REST        = NULL,
         tree        = NULL,
-        fileContent = NULL
+        fileContent = NULL,
+        classFields = NULL,
+
+        genereateCollectionTreeStructure = function()
+        {
+            if(is.null(self$uuid))
+                stop("Collection uuid is not defined.")
+
+            if(is.null(private$REST))
+                stop("REST service is not defined.")
+
+            private$fileContent <- private$REST$getCollectionContent(self$uuid)
+            private$tree <- CollectionTree$new(private$fileContent, self)
+        }
     ),
 
     cloneable = FALSE
index 91e4ec86459dc8e4ad8891d59cbdb80d771a4013..8686f88c1a8a3c55b695351b9993df55939d0f1a 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("./R/Subcollection.R")
 source("./R/ArvadosFile.R")
 source("./R/util.R")
index 5df8287fdce7b85f2b83003ac7e55720afc39645..8ce68f3837f158486534c6adc55e4ff23e9386e1 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 HttpParser <- R6::R6Class(
 
     "HttrParser",
index f8ad0a60cdad56febb96f1b6b795d7f6cc8e1358..95dd375debe5ce076638c55de49a57db1f2d8f0d 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("./R/util.R")
 
 HttpRequest <- R6::R6Class(
@@ -15,82 +19,45 @@ HttpRequest <- R6::R6Class(
             self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE")
         },
 
-        execute = function(verb, url, headers = NULL, body = NULL, query = NULL,
-                           limit = NULL, offset = NULL, retryTimes = 0)
+        exec = function(verb, url, headers = NULL, body = NULL, queryParams = NULL,
+                        retryTimes = 0)
         {
             if(!(verb %in% self$validVerbs))
                 stop("Http verb is not valid.")
 
-            headers  <- httr::add_headers(unlist(headers))
-            urlQuery <- self$createQuery(query, limit, offset)
+            urlQuery <- self$createQuery(queryParams)
             url      <- paste0(url, urlQuery)
 
+            config <- httr::add_headers(unlist(headers))
+            if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
+               config$options = list(ssl_verifypeer = FALSE)
+
             # times = 1 regular call + numberOfRetries
             response <- httr::RETRY(verb, url = url, body = body,
-                                    config = headers, times = retryTimes + 1)
+                                    config = config, times = retryTimes + 1)
         },
 
-        createQuery = function(filters, limit, offset)
-        {
-            finalQuery <- NULL
-
-            finalQuery <- c(finalQuery, private$createFiltersQuery(filters))
-            finalQuery <- c(finalQuery, private$createLimitQuery(limit))
-            finalQuery <- c(finalQuery, private$createOffsetQuery(offset))
-
-            finalQuery <- finalQuery[!is.null(finalQuery)]
-            finalQuery <- paste0(finalQuery, collapse = "&")
-
-            if(finalQuery != "")
-                finalQuery <- paste0("/?", finalQuery)
-
-            finalQuery
-        }
-    ),
-
-    private = list(
-
-        createFiltersQuery = function(filters)
+        createQuery = function(queryParams)
         {
-            if(!is.null(filters))
-            {
-                filters <- RListToPythonList(filters, ",")
-                encodedQuery <- URLencode(filters, reserved = T, repeated = T)
-
-                return(paste0("filters=", encodedQuery))
-            }
-
-            return(NULL)
-        },
+            queryParams <- Filter(Negate(is.null), queryParams)
 
-        createLimitQuery = function(limit)
-        {
-            if(!is.null(limit))
+            query <- sapply(queryParams, function(param)
             {
-                limit <- suppressWarnings(as.numeric(limit))
+                if(is.list(param) || length(param) > 1)
+                    param <- RListToPythonList(param, ",")
 
-                if(is.na(limit))
-                    stop("Limit must be a numeric type.")
-                
-                return(paste0("limit=", limit))
-            }
+                URLencode(as.character(param), reserved = T, repeated = T)
 
-            return(NULL)
-        },
+            }, USE.NAMES = TRUE)
 
-        createOffsetQuery = function(offset)
-        {
-            if(!is.null(offset))
+            if(length(query) > 0)
             {
-                offset <- suppressWarnings(as.numeric(offset))
+                query <- paste0(names(query), "=", query, collapse = "&")
 
-                if(is.na(offset))
-                    stop("Offset must be a numeric type.")
-                
-                return(paste0("offset=", offset))
+                return(paste0("/?", query))
             }
 
-            return(NULL)
+            return("")
         }
     ),
 
index 12e6591ee8f4b3e3cafa6f187bf85886eb6e4914..ac65d0df3f37b6baa6031bc8cbab71b163e27a76 100644 (file)
@@ -1,30 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 RESTService <- R6::R6Class(
 
     "RESTService",
 
     public = list(
 
-        hostName   = NULL,
         token      = NULL,
         http       = NULL,
         httpParser = NULL,
         numRetries = NULL,
 
-        initialize = function(token, hostName,
+        initialize = function(token, rawHost,
                               http, httpParser,
                               numRetries     = 0,
                               webDavHostName = NULL)
         {
-            version <- "v1"
-
-            self$token       <- token
-            self$hostName    <- paste0("https://", hostName,
-                                       "/arvados/", version, "/")
-            self$http        <- http
-            self$httpParser  <- httpParser
-            self$numRetries  <- numRetries
+            self$token      <- token
+            self$http       <- http
+            self$httpParser <- httpParser
+            self$numRetries <- numRetries
 
-            private$rawHostName    <- hostName
+            private$rawHostName    <- rawHost
             private$webDavHostName <- webDavHostName
         },
 
@@ -42,8 +41,8 @@ RESTService <- R6::R6Class(
 
                 headers <- list(Authorization = paste("OAuth2", self$token))
 
-                serverResponse <- self$http$execute("GET", discoveryDocumentURL, headers,
-                                                    retryTimes = self$numRetries)
+                serverResponse <- self$http$exec("GET", discoveryDocumentURL, headers,
+                                                 retryTimes = self$numRetries)
 
                 discoveryDocument <- self$httpParser$parseJSONResponse(serverResponse)
                 private$webDavHostName <- discoveryDocument$keepWebServiceUrl
@@ -55,126 +54,6 @@ RESTService <- R6::R6Class(
             private$webDavHostName
         },
 
-        getResource = function(resource, uuid)
-        {
-            resourceURL <- paste0(self$hostName, resource, "/", uuid)
-            headers <- list(Authorization = paste("OAuth2", self$token))
-
-            serverResponse <- self$http$execute("GET", resourceURL, headers,
-                                                retryTimes = self$numRetries)
-
-            resource <- self$httpParser$parseJSONResponse(serverResponse)
-
-            if(!is.null(resource$errors))
-                stop(resource$errors)
-
-            resource
-        },
-
-        listResources = function(resource, filters = NULL, limit = 100, offset = 0)
-        {
-            resourceURL <- paste0(self$hostName, resource)
-            headers <- list(Authorization = paste("OAuth2", self$token))
-            body <- NULL
-
-            serverResponse <- self$http$execute("GET", resourceURL, headers, body,
-                                                filters, limit, offset,
-                                                self$numRetries)
-
-            resources <- self$httpParser$parseJSONResponse(serverResponse)
-
-            if(!is.null(resources$errors))
-                stop(resources$errors)
-
-            resources
-        },
-
-        fetchAllItems = function(resourceURL, filters)
-        {
-            headers <- list(Authorization = paste("OAuth2", self$token))
-
-            offset <- 0
-            itemsAvailable <- .Machine$integer.max
-            items <- c()
-            while(length(items) < itemsAvailable)
-            {
-                serverResponse <- self$http$execute(verb       = "GET",
-                                                    url        = resourceURL,
-                                                    headers    = headers,
-                                                    body       = NULL,
-                                                    query      = filters,
-                                                    limit      = NULL,
-                                                    offset     = offset,
-                                                    retryTimes = self$numRetries)
-
-                parsedResponse <- self$httpParser$parseJSONResponse(serverResponse)
-
-                if(!is.null(parsedResponse$errors))
-                    stop(parsedResponse$errors)
-
-                items          <- c(items, parsedResponse$items)
-                offset         <- length(items)
-                itemsAvailable <- parsedResponse$items_available
-            }
-
-            items
-        },
-
-        deleteResource = function(resource, uuid)
-        {
-            collectionURL <- paste0(self$hostName, resource, "/", uuid)
-            headers <- list("Authorization" = paste("OAuth2", self$token),
-                            "Content-Type"  = "application/json")
-
-            serverResponse <- self$http$execute("DELETE", collectionURL, headers,
-                                                retryTimes = self$numRetries)
-
-            removedResource <- self$httpParser$parseJSONResponse(serverResponse)
-
-            if(!is.null(removedResource$errors))
-                stop(removedResource$errors)
-
-            removedResource
-        },
-
-        updateResource = function(resource, uuid, newContent)
-        {
-            resourceURL <- paste0(self$hostName, resource, "/", uuid)
-            headers <- list("Authorization" = paste("OAuth2", self$token),
-                            "Content-Type"  = "application/json")
-
-            newContent <- jsonlite::toJSON(newContent, auto_unbox = T)
-
-            serverResponse <- self$http$execute("PUT", resourceURL, headers, newContent,
-                                                retryTimes = self$numRetries)
-
-            updatedResource <- self$httpParser$parseJSONResponse(serverResponse)
-
-            if(!is.null(updatedResource$errors))
-                stop(updatedResource$errors)
-
-            updatedResource
-        },
-
-        createResource = function(resource, content)
-        {
-            resourceURL <- paste0(self$hostName, resource)
-            headers <- list("Authorization" = paste("OAuth2", self$token),
-                            "Content-Type"  = "application/json")
-
-            content <- jsonlite::toJSON(content, auto_unbox = T)
-
-            serverResponse <- self$http$execute("POST", resourceURL, headers, content,
-                                                retryTimes = self$numRetries)
-
-            newResource <- self$httpParser$parseJSONResponse(serverResponse)
-
-            if(!is.null(newResource$errors))
-                stop(newResource$errors)
-
-            newResource
-        },
-
         create = function(files, uuid)
         {
             sapply(files, function(filePath)
@@ -189,8 +68,8 @@ RESTService <- R6::R6Class(
                               uuid, "/", relativePath);
             headers <- list(Authorization = paste("OAuth2", self$token)) 
 
-            serverResponse <- self$http$execute("DELETE", fileURL, headers,
-                                                retryTimes = self$numRetries)
+            serverResponse <- self$http$exec("DELETE", fileURL, headers,
+                                             retryTimes = self$numRetries)
 
             if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
                 stop(paste("Server code:", serverResponse$status_code))
@@ -207,8 +86,8 @@ RESTService <- R6::R6Class(
             headers <- list("Authorization" = paste("OAuth2", self$token),
                            "Destination" = toURL)
 
-            serverResponse <- self$http$execute("MOVE", fromURL, headers,
-                                                retryTimes = self$numRetries)
+            serverResponse <- self$http$exec("MOVE", fromURL, headers,
+                                             retryTimes = self$numRetries)
 
             if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
                 stop(paste("Server code:", serverResponse$status_code))
@@ -223,8 +102,8 @@ RESTService <- R6::R6Class(
 
             headers <- list("Authorization" = paste("OAuth2", self$token))
 
-            response <- self$http$execute("PROPFIND", collectionURL, headers,
-                                          retryTimes = self$numRetries)
+            response <- self$http$exec("PROPFIND", collectionURL, headers,
+                                       retryTimes = self$numRetries)
 
             if(all(response == ""))
                 stop("Response is empty, request may be misconfigured")
@@ -244,8 +123,8 @@ RESTService <- R6::R6Class(
 
             headers <- list("Authorization" = paste("OAuth2", self$token))
 
-            response <- self$http$execute("PROPFIND", subcollectionURL, headers,
-                                          retryTimes = self$numRetries)
+            response <- self$http$exec("PROPFIND", subcollectionURL, headers,
+                                       retryTimes = self$numRetries)
 
             if(all(response == ""))
                 stop("Response is empty, request may be misconfigured")
@@ -281,8 +160,8 @@ RESTService <- R6::R6Class(
             if(!(contentType %in% self$httpParser$validContentTypes))
                 stop("Invalid contentType. Please use text or raw.")
 
-            serverResponse <- self$http$execute("GET", fileURL, headers,
-                                                retryTimes = self$numRetries)
+            serverResponse <- self$http$exec("GET", fileURL, headers,
+                                             retryTimes = self$numRetries)
 
             if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
                 stop(paste("Server code:", serverResponse$status_code))
@@ -298,8 +177,8 @@ RESTService <- R6::R6Class(
                             "Content-Type" = contentType)
             body <- content
 
-            serverResponse <- self$http$execute("PUT", fileURL, headers, body,
-                                                retryTimes = self$numRetries)
+            serverResponse <- self$http$exec("PUT", fileURL, headers, body,
+                                             retryTimes = self$numRetries)
 
             if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
                 stop(paste("Server code:", serverResponse$status_code))
@@ -335,8 +214,8 @@ RESTService <- R6::R6Class(
                             "Content-Type" = contentType)
             body <- NULL
 
-            serverResponse <- self$http$execute("PUT", fileURL, headers, body,
-                                                retryTimes = self$numRetries)
+            serverResponse <- self$http$exec("PUT", fileURL, headers, body,
+                                             retryTimes = self$numRetries)
 
             if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
                 stop(paste("Server code:", serverResponse$status_code))
index 863e3f768f778880d7396a159a66f04e57d48fe6..60714a4ad835b9bc201fb780bb38b5fb8a81461c 100644 (file)
@@ -1,9 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("./R/util.R")
 
 #' Subcollection
 #' 
 #' Subcollection class represents a folder inside Arvados collection.
-#' It is essentially a composite of ArvadosFiles and other Subcollections.
+#' It is essentially a composite of arvadosFiles and other subcollections.
 #' 
 #' @section Usage:
 #' \preformatted{subcollection = Subcollection$new(name)}
diff --git a/sdk/R/R/autoGenAPI.R b/sdk/R/R/autoGenAPI.R
new file mode 100644 (file)
index 0000000..3e8c2fa
--- /dev/null
@@ -0,0 +1,575 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+getAPIDocument <- function(){
+    url <- "https://4xphq.arvadosapi.com/discovery/v1/apis/arvados/v1/rest"
+    serverResponse <- httr::RETRY("GET", url = url)
+
+    httr::content(serverResponse, as = "parsed", type = "application/json")
+}
+
+#' @export
+generateAPI <- function()
+{
+    #TODO: Consider passing discovery document URL as parameter.
+    #TODO: Consider passing location where to create new files.
+    discoveryDocument <- getAPIDocument()
+
+    methodResources <- discoveryDocument$resources
+    resourceNames   <- names(methodResources)
+
+    methodDoc <- genMethodsDoc(methodResources, resourceNames)
+    classDoc <- genAPIClassDoc(methodResources, resourceNames)
+    arvadosAPIHeader <- genAPIClassHeader()
+    arvadosProjectMethods <- genProjectMethods()
+    arvadosClassMethods <- genClassContent(methodResources, resourceNames)
+    arvadosAPIFooter <- genAPIClassFooter()
+
+    arvadosClass <- c(methodDoc,
+                      classDoc,
+                      arvadosAPIHeader,
+                      arvadosProjectMethods,
+                      arvadosClassMethods,
+                      arvadosAPIFooter)
+
+    fileConn <- file("./R/Arvados.R", "w")
+    writeLines(unlist(arvadosClass), fileConn)
+    close(fileConn)
+    NULL
+}
+
+genAPIClassHeader <- function()
+{
+    c("Arvados <- R6::R6Class(",
+      "",
+      "\t\"Arvados\",",
+      "",
+      "\tpublic = list(",
+      "",
+      "\t\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)",
+      "\t\t{",
+      "\t\t\tif(!is.null(hostName))",
+      "\t\t\t\tSys.setenv(ARVADOS_API_HOST = hostName)",
+      "",
+      "\t\t\tif(!is.null(authToken))",
+      "\t\t\t\tSys.setenv(ARVADOS_API_TOKEN = authToken)",
+      "",
+      "\t\t\thostName <- Sys.getenv(\"ARVADOS_API_HOST\")",
+      "\t\t\ttoken    <- Sys.getenv(\"ARVADOS_API_TOKEN\")",
+      "",
+      "\t\t\tif(hostName == \"\" | token == \"\")",
+      "\t\t\t\tstop(paste(\"Please provide host name and authentification token\",",
+      "\t\t\t\t\t\t   \"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\",",
+      "\t\t\t\t\t\t   \"environment variables.\"))",
+      "",
+      "\t\t\tprivate$token <- token",
+      "\t\t\tprivate$host  <- paste0(\"https://\", hostName, \"/arvados/v1/\")",
+      "\t\t\tprivate$numRetries <- numRetries",
+      "\t\t\tprivate$REST <- RESTService$new(token, hostName,",
+      "\t\t\t                                HttpRequest$new(), HttpParser$new(),",
+      "\t\t\t                                numRetries)",
+      "",
+      "\t\t},\n")
+}
+
+genProjectMethods <- function()
+{
+    c("\t\tprojects.get = function(uuid)",
+      "\t\t{",
+      "\t\t\tself$groups.get(uuid)",
+      "\t\t},",
+      "",
+      "\t\tprojects.create = function(group, ensure_unique_name = \"false\")",
+      "\t\t{",
+      "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+      "\t\t\tself$groups.create(group, ensure_unique_name)",
+      "\t\t},",
+      "",
+      "\t\tprojects.update = function(group, uuid)",
+      "\t\t{",
+      "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+      "\t\t\tself$groups.update(group, uuid)",
+      "\t\t},",
+      "",
+      "\t\tprojects.list = function(filters = NULL, where = NULL,",
+      "\t\t\torder = NULL, select = NULL, distinct = NULL,",
+      "\t\t\tlimit = \"100\", offset = \"0\", count = \"exact\",",
+      "\t\t\tinclude_trash = NULL)",
+      "\t\t{",
+      "\t\t\tfilters[[length(filters) + 1]] <- list(\"group_class\", \"=\", \"project\")",
+      "\t\t\tself$groups.list(filters, where, order, select, distinct,",
+      "\t\t\t                 limit, offset, count, include_trash)",
+      "\t\t},",
+      "",
+      "\t\tprojects.delete = function(uuid)",
+      "\t\t{",
+      "\t\t\tself$groups.delete(uuid)",
+      "\t\t},",
+      "")
+}
+
+genClassContent <- function(methodResources, resourceNames)
+{
+    arvadosMethods <- Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+
+        functions <- Map(function(methodMetaData, methodName)
+        {
+            #NOTE: Index, show and destroy are aliases for the preferred names
+            # "list", "get" and "delete". Until they are removed from discovery
+            # document we will filter them here.
+            if(methodName %in% c("index", "show", "destroy"))
+               return(NULL)
+
+            methodName <- paste0(resourceName, ".", methodName)
+            createMethod(methodName, methodMetaData)
+
+        }, resource$methods, methodNames)
+
+        unlist(unname(functions))
+
+    }, methodResources, resourceNames)
+
+    arvadosMethods
+}
+
+genAPIClassFooter <- function()
+{
+    c("\t\tgetHostName = function() private$host,",
+      "\t\tgetToken = function() private$token,",
+      "\t\tsetRESTService = function(newREST) private$REST <- newREST,",
+      "\t\tgetRESTService = function() private$REST",
+      "\t),",
+      "",
+      "\tprivate = list(",
+      "",
+      "\t\ttoken = NULL,",
+      "\t\thost = NULL,",
+      "\t\tREST = NULL,",
+      "\t\tnumRetries = NULL",
+      "\t),",
+      "",
+      "\tcloneable = FALSE",
+      ")")
+}
+
+createMethod <- function(name, methodMetaData)
+{
+    args      <- getMethodArguments(methodMetaData)
+    signature <- getMethodSignature(name, args)
+    body      <- getMethodBody(methodMetaData)
+
+    c(signature,
+      "\t\t{",
+          body,
+      "\t\t},\n")
+}
+
+getMethodArguments <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+    requestArgs <- NULL
+
+    if(!is.null(request))
+    {
+        resourceName <- tolower(request$properties[[1]][[1]])
+
+        if(request$required)
+            requestArgs <- resourceName
+        else
+            requestArgs <- paste(resourceName, "=", "NULL")
+    }
+
+    argNames <- names(methodMetaData$parameters)
+
+    args <- sapply(argNames, function(argName)
+    {
+        arg <- methodMetaData$parameters[[argName]]
+
+        if(!arg$required)
+        {
+            if(!is.null(arg$default))
+                return(paste0(argName, " = ", "\"", arg$default, "\""))
+            else
+                return(paste(argName, "=", "NULL"))
+        }
+
+        argName
+    })
+
+    c(requestArgs, args)
+}
+
+getMethodSignature <- function(methodName, args)
+{
+    collapsedArgs <- paste0(args, collapse = ", ")
+    lineLengthLimit <- 40
+
+    if(nchar(collapsedArgs) > lineLengthLimit)
+    {
+        return(paste0("\t\t",
+                      formatArgs(paste(methodName, "= function("),
+                                 "\t", args, ")", lineLengthLimit)))
+    }
+    else
+    {
+        return(paste0("\t\t", methodName, " = function(", collapsedArgs, ")"))
+    }
+}
+
+getMethodBody <- function(methodMetaData)
+{
+    url              <- getRequestURL(methodMetaData)
+    headers          <- getRequestHeaders()
+    requestQueryList <- getRequestQueryList(methodMetaData)
+    requestBody      <- getRequestBody(methodMetaData)
+    request          <- getRequest(methodMetaData)
+    response         <- getResponse(methodMetaData)
+    errorCheck       <- getErrorCheckingCode()
+    returnStatement  <- getReturnObject()
+
+    body <- c(url,
+              headers,
+              requestQueryList, "",
+              requestBody, "",
+              request, response, "",
+              errorCheck, "",
+              returnStatement)
+
+    paste0("\t\t\t", body)
+}
+
+getRequestURL <- function(methodMetaData)
+{
+    endPoint <- methodMetaData$path
+    endPoint <- stringr::str_replace_all(endPoint, "\\{", "${")
+    url <- c(paste0("endPoint <- stringr::str_interp(\"", endPoint, "\")"),
+             paste0("url <- paste0(private$host, endPoint)"))
+    url
+}
+
+getRequestHeaders <- function()
+{
+    c("headers <- list(Authorization = paste(\"OAuth2\", private$token), ",
+      "                \"Content-Type\" = \"application/json\")")
+}
+
+getRequestQueryList <- function(methodMetaData)
+{
+    queryArgs <- names(Filter(function(arg) arg$location == "query",
+                        methodMetaData$parameters))
+
+    if(length(queryArgs) == 0)
+        return("queryArgs <- NULL")
+
+    queryArgs <- sapply(queryArgs, function(arg) paste0(arg, " = ", arg))
+    collapsedArgs <- paste0(queryArgs, collapse = ", ")
+
+    lineLengthLimit <- 40
+
+    if(nchar(collapsedArgs) > lineLengthLimit)
+        return(formatArgs("queryArgs <- list(", "\t\t\t\t  ", queryArgs, ")",
+                          lineLengthLimit))
+    else
+        return(paste0("queryArgs <- list(", collapsedArgs, ")"))
+}
+
+getRequestBody <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+
+    if(is.null(request) || !request$required)
+        return("body <- NULL")
+
+    resourceName <- tolower(request$properties[[1]][[1]])
+
+    requestParameterName <- names(request$properties)[1]
+
+    c(paste0("if(length(", resourceName, ") > 0)"),
+      paste0("\tbody <- jsonlite::toJSON(list(", resourceName, " = ", resourceName, "), "),
+             "\t                         auto_unbox = TRUE)",
+      "else",
+      "\tbody <- NULL")
+}
+
+getRequest <- function(methodMetaData)
+{
+    method <- methodMetaData$httpMethod
+    c(paste0("response <- private$REST$http$exec(\"", method, "\", url, headers, body,"),
+      "                                   queryArgs, private$numRetries)")
+}
+
+getResponse <- function(methodMetaData)
+{
+    "resource <- private$REST$httpParser$parseJSONResponse(response)"
+}
+
+getErrorCheckingCode <- function()
+{
+    c("if(!is.null(resource$errors))",
+      "\tstop(resource$errors)")
+}
+
+getReturnObject <- function()
+{
+    "resource"
+}
+
+#NOTE: Arvados class documentation:
+
+genMethodsDoc <- function(methodResources, resourceNames)
+{
+    methodsDoc <- unlist(unname(Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+
+        methodDoc <- Map(function(methodMetaData, methodName)
+        {
+            #NOTE: Index, show and destroy are aliases for the preferred names
+            # "list", "get" and "delete". Until they are removed from discovery
+            # document we will filter them here.
+            if(methodName %in% c("index", "show", "destroy"))
+               return(NULL)
+
+            methodName <- paste0(resourceName, ".", methodName)
+            getMethodDoc(methodName, methodMetaData)
+
+        }, resource$methods, methodNames)
+
+        unlist(unname(methodDoc))
+
+    }, methodResources, resourceNames)))
+
+    projectDoc <- genProjectMethodsDoc()
+    
+    c(methodsDoc, projectDoc)
+}
+
+genAPIClassDoc <- function(methodResources, resourceNames)
+{
+    c("#' Arvados",
+      "#'",
+      "#' Arvados class gives users ability to access Arvados REST API.",
+      "#'" ,
+      "#' @section Usage:",
+      "#' \\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}",
+      "#'",
+      "#' @section Arguments:",
+      "#' \\describe{",
+      "#' \t\\item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}",
+      "#' \t\\item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}",
+      "#' \t\\item{numRetries}{Number which specifies how many times to retry failed service requests.}",
+      "#' }",
+      "#'",
+      "#' @section Methods:",
+      "#' \\describe{",
+      getAPIClassMethodList(methodResources, resourceNames),
+      "#' }",
+      "#'",
+      "#' @name Arvados",
+      "#' @examples",
+      "#' \\dontrun{",
+      "#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")",
+      "#'",
+      "#' collection <- arv$collections.get(\"uuid\")",
+      "#'",
+      "#' collectionList <- arv$collections.list(list(list(\"name\", \"like\", \"Test%\")))",
+      "#' collectionList <- listAll(arv$collections.list, list(list(\"name\", \"like\", \"Test%\")))",
+      "#'",
+      "#' deletedCollection <- arv$collections.delete(\"uuid\")",
+      "#'",
+      "#' updatedCollection <- arv$collections.update(list(name = \"New name\", description = \"New description\"),",
+      "#'                                             \"uuid\")",
+      "#'",
+      "#' createdCollection <- arv$collections.create(list(name = \"Example\",",
+      "#'                                                  description = \"This is a test collection\"))",
+      "#' }",
+      "NULL",
+      "",
+      "#' @export")
+}
+
+getAPIClassMethodList <- function(methodResources, resourceNames)
+{
+    methodList <- unlist(unname(Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+        paste0(resourceName,
+               ".",
+               methodNames[!(methodNames %in% c("index", "show", "destroy"))])
+
+    }, methodResources, resourceNames)))
+    
+    hardcodedMethods <- c("projects.create", "projects.get",
+                          "projects.list", "projects.update", "projects.delete")
+    paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}") 
+}
+
+getMethodDoc <- function(methodName, methodMetaData)
+{
+    name        <- paste("#' @name", methodName)
+    usage       <- getMethodUsage(methodName, methodMetaData)
+    description <- paste("#'", methodName, "is a method defined in Arvados class.")
+    params      <- getMethodDescription(methodMetaData)
+    returnValue <- paste("#' @return", methodMetaData$response[["$ref"]], "object.")
+
+    c(paste("#'", methodName),
+      "#' ",
+      description,
+      "#' ",
+      usage,
+      params,
+      returnValue,
+      name,
+      "NULL",
+      "")
+}
+
+getMethodUsage <- function(methodName, methodMetaData)
+{
+    lineLengthLimit <- 40
+    args <- getMethodArguments(methodMetaData)
+    c(formatArgs(paste0("#' @usage arv$", methodName,
+                        "("), "#' \t", args, ")", lineLengthLimit))
+}
+
+getMethodDescription <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+    requestDoc <- NULL
+
+    if(!is.null(request))
+    {
+        requestDoc <- unname(unlist(sapply(request$properties, function(prop)
+                             {
+                                 className <- sapply(prop, function(ref) ref)
+                                 objectName <- paste0(tolower(substr(className, 1, 1)),
+                                                      substr(className, 2, nchar(className)))
+                                 paste("#' @param", objectName, className, "object.") 
+                             })))
+    }
+
+    argNames <- names(methodMetaData$parameters)
+
+    argsDoc <- unname(unlist(sapply(argNames, function(argName)
+    {
+        arg <- methodMetaData$parameters[[argName]]
+        argDescription <- arg$description
+        paste("#' @param", argName, argDescription) 
+    })))
+
+    c(requestDoc, argsDoc)
+}
+
+genProjectMethodsDoc <- function()
+{
+    #TODO: Manually update this documentation to reflect changes in discovery document.
+    c("#' project.get",
+    "#' ",
+    "#' projects.get is equivalent to groups.get method.",
+    "#' ",
+    "#' @usage arv$projects.get(uuid)",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.get",
+    "NULL",
+    "",
+    "#' project.create",
+    "#' ",
+    "#' projects.create wrapps groups.create method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.create(group, ensure_unique_name = \"false\")",
+    "#' @param group Group object.",
+    "#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+    "#' @return Group object.",
+    "#' @name projects.create",
+    "NULL",
+    "",
+    "#' project.update",
+    "#' ",
+    "#' projects.update wrapps groups.update method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.update(group, uuid)",
+    "#' @param group Group object.",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.update",
+    "NULL",
+    "",
+    "#' project.delete",
+    "#' ",
+    "#' projects.delete is equivalent to groups.delete method.",
+    "#' ",
+    "#' @usage arv$project.delete(uuid)",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.delete",
+    "NULL",
+    "",
+    "#' project.list",
+    "#' ",
+    "#' projects.list wrapps groups.list method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.list(filters = NULL,",
+    "#'        where = NULL, order = NULL, distinct = NULL,",
+    "#'        limit = \"100\", offset = \"0\", count = \"exact\",",
+    "#'        include_trash = NULL, uuid = NULL, recursive = NULL)",
+    "#' @param filters ",
+    "#' @param where ",
+    "#' @param order ",
+    "#' @param distinct ",
+    "#' @param limit ",
+    "#' @param offset ",
+    "#' @param count ",
+    "#' @param include_trash Include items whose is_trashed attribute is true.",
+    "#' @param uuid ",
+    "#' @param recursive Include contents from child groups recursively.",
+    "#' @return Group object.",
+    "#' @name projects.list",
+    "NULL",
+    "")
+}
+
+#NOTE: Utility functions:
+
+# This function is used to split very long lines of code into smaller chunks.
+# This is usually the case when we pass a lot of named argumets to a function.
+formatArgs <- function(prependAtStart, prependToEachSplit,
+                       args, appendAtEnd, lineLength)
+{
+    if(length(args) > 1)
+    {
+        args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",") 
+    }
+
+    args[1] <- paste0(prependAtStart, args[1])
+    args[length(args)] <- paste0(args[length(args)], appendAtEnd)
+
+    argsLength <- length(args)
+    argLines <- list()
+    index <- 1
+
+    while(index <= argsLength)
+    {
+        line <- args[index]
+        index <- index + 1
+
+        while(nchar(line) < lineLength && index <= argsLength)
+        {
+            line <- paste(line, args[index])
+            index <- index + 1
+        }
+
+        argLines <- c(argLines, line)
+    }
+    
+    argLines <- unlist(argLines)
+    argLinesLen <- length(argLines)
+
+    if(argLinesLen > 1)
+        argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen]) 
+
+    argLines
+}
index d9af8b057bbb00b42611894fe2186aa6f87b0caf..f796cb7b87eca67b3de28d5929221d792554b047 100644 (file)
@@ -1,3 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#' listAll
+#'
+#' List all resources even if the number of items is greater than maximum API limit.
+#'
+#' @param fn Arvados method used to retrieve items from REST service.
+#' @param ... Optional arguments which will be pased to fn .
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#' cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test%"))
+#' }
+#' @export 
+listAll <- function(fn, ...)
+{
+    offset <- 0
+    itemsAvailable <- .Machine$integer.max
+    items <- c()
+
+    while(length(items) < itemsAvailable)
+    {
+        serverResponse <- fn(offset = offset, ...)
+
+        if(!is.null(serverResponse$errors))
+            stop(serverResponse$errors)
+
+        items          <- c(items, serverResponse$items)
+        offset         <- length(items)
+        itemsAvailable <- serverResponse$items_available
+    }
+
+    items
+}
+
+
+#NOTE: Package private functions
+
 trimFromStart <- function(sample, trimCharacters)
 {
     if(startsWith(sample, trimCharacters))
@@ -49,6 +89,5 @@ splitToPathAndName = function(path)
     nameAndPath$name <- components[length(components)]
     nameAndPath$path <- trimFromStart(paste0(components[-length(components)], collapse = "/"),
                                       "/")
-
     nameAndPath
 }
diff --git a/sdk/R/R/zzz.R b/sdk/R/R/zzz.R
new file mode 100644 (file)
index 0000000..c98f803
--- /dev/null
@@ -0,0 +1,10 @@
+.onLoad <- function(libName, pkgName)
+{
+    minAllowedRVersion <- "3.3.0"
+    currentRVersion <- getRversion()
+
+    if(currentRVersion < minAllowedRVersion)
+        print(paste0("Minimum R version required to run ", pkgName, " is ",
+                     minAllowedRVersion, ". Your current version is ",
+                     toString(currentRVersion), ". Please update R and try again."))
+}
index 098bfe19a5759d345ee9f325ee23f4b3a000e3bd..be34b2fdb1dabd3531ca74e696a8240528418520 100644 (file)
@@ -1,3 +1,7 @@
+[comment]: # (Copyright (c) The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
+
 ## R SDK for Arvados
 
 This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections.
@@ -5,294 +9,320 @@ The API is not final and feedback is solicited from users on ways in which it co
 
 ### Installation
 
-```install.packages("ArvadosR", repos=c("http://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)```
+```{r include=FALSE}
+knitr::opts_chunk$set(eval=FALSE)
+```
+
+```{r}
+install.packages("ArvadosR", repos=c("http://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)
+```
 
 Note: on Linux, you may have to install supporting packages.
 
 On Centos 7, this is:
 
-```yum install libxml2-devel openssl-devel curl-devel```
+```{bash}
+yum install libxml2-devel openssl-devel curl-devel
+```
 
 On Debian, this is:
 
-```apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev```
+```{bash}
+apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev
+```
+
+Minimum R version required to run ArvadosR is 3.3.0.
 
 
 ### Usage
 
 #### Initializing API
 
-```{r include=FALSE}
-knitr::opts_chunk$set(eval = FALSE)
-```
-
 * Load Library and Initialize API:
 
-    ```{r}
-    library('ArvadosR')
-    # use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
-    arv <- Arvados$new()
+```{r}
+library('ArvadosR')
+# use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
+arv <- Arvados$new()
 
-    # provide them explicitly
-    arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-    ```
+# provide them explicitly
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+```
 
-    Optionally, add numRetries parameter to specify number of times to retry failed service requests.
-    Default is 0.
+Optionally, add numRetries parameter to specify number of times to retry failed service requests.
+Default is 0.
 
-    ```{r}
-    arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
-    ```
+```{r}
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
+```
 
-    This parameter can be set at any time using setNumRetries
+This parameter can be set at any time using setNumRetries
 
-    ```{r}
-    arv$setNumRetries(5)
-    ```
+```{r}
+arv$setNumRetries(5)
+```
 
 
 #### Working with collections
 
 * Get a collection:
 
-    ```{r}
-    collection <- arv$getCollection("uuid")
-    ```
+```{r}
+collection <- arv$collections.get("uuid")
+```
 
 * List collections:
 
-    ```{r}
-    # offset of 0 and default limit of 100
-    collectionList <- arv$listCollections(list(list("name", "like", "Test%")))
+```{r}
+# offset of 0 and default limit of 100
+collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
 
-    collectionList <- arv$listCollections(list(list("name", "like", "Test%")), limit = 10, offset = 2)
-    ```
+collectionList <- arv$collections.list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
+```
 
-    ```{r}
-    # count of total number of items (may be more than returned due to paging)
-    collectionList$items_available
+```{r}
+# count of total number of items (may be more than returned due to paging)
+collectionList$items_available
 
-    # items which match the filter criteria
-    collectionList$items
-    ```
+# items which match the filter criteria
+collectionList$items
+```
 
 * List all collections even if the number of items is greater than maximum API limit:
 
-    ```{r}
-    collectionList <- arv$listAllCollections(list(list("name", "like", "Test%")))
-    ```
+```{r}
+collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
+```
 
 * Delete a collection:
 
-    ```{r}
-    deletedCollection <- arv$deleteCollection("uuid")
-    ```
+```{r}
+deletedCollection <- arv$collections.delete("uuid")
+```
 
 * Update a collection's metadata:
 
-    ```{r}
-    updatedCollection <- arv$updateCollection("uuid", list(name = "New name", description = "New description"))
-    ```
+```{r}
+updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"), "uuid")
+```
 
 * Create collection:
 
-    ```{r}
-    createdCollection <- arv$createCollection(list(name = "Example", description = "This is a test collection"))
-    ```
+```{r}
+newCollection <- arv$collections.create(list(name = "Example", description = "This is a test collection"))
+```
 
 
 #### Manipulating collection content
 
 * Create collection object:
 
-    ```{r}
-    collection <- Collection$new(arv, "uuid")
-    ```
+```{r}
+collection <- Collection$new(arv, "uuid")
+```
 
 * Get list of files:
 
-    ```{r}
-    files <- collection$getFileListing()
-    ```
+```{r}
+files <- collection$getFileListing()
+```
 
 * Get ArvadosFile or Subcollection from internal tree-like structure:
 
-    ```{r}
-    arvadosFile <- collection$get("location/to/my/file.cpp")
-    ```
+```{r}
+arvadosFile <- collection$get("location/to/my/file.cpp")
+```
 
     or
 
-    ```{r}
-    arvadosSubcollection <- collection$get("location/to/my/directory/")
-    ```
+```{r}
+arvadosSubcollection <- collection$get("location/to/my/directory/")
+```
 
 * Read a table:
 
-    ```{r}
-    arvadosFile   <- collection$get("myinput.txt")
-    arvConnection <- arvadosFile$connection("r")
-    mytable       <- read.table(arvConnection)
-    ```
+```{r}
+arvadosFile   <- collection$get("myinput.txt")
+arvConnection <- arvadosFile$connection("r")
+mytable       <- read.table(arvConnection)
+```
 
 * Write a table:
 
-    ```{r}
-    arvadosFile   <- collection$create("myoutput.txt")
-    arvConnection <- arvadosFile$connection("w")
-    write.table(mytable, arvConnection)
-    arvadosFile$flush()
-    ```
+```{r}
+arvadosFile   <- collection$create("myoutput.txt")
+arvConnection <- arvadosFile$connection("w")
+write.table(mytable, arvConnection)
+arvadosFile$flush()
+```
 
 * Write to existing file (override current content of the file):
 
-    ```{r}
-    arvadosFile <- collection$get("location/to/my/file.cpp")
-    arvadosFile$write("This is new file content")
-    ```
+```{r}
+arvadosFile <- collection$get("location/to/my/file.cpp")
+arvadosFile$write("This is new file content")
+```
 
 * Read whole file or just a portion of it:
 
-    ```{r}
-    fileContent <- arvadosFile$read()
-    fileContent <- arvadosFile$read("text")
-    fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
-    ```
+```{r}
+fileContent <- arvadosFile$read()
+fileContent <- arvadosFile$read("text")
+fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
+```
 
 * Get ArvadosFile or Subcollection size:
 
-    ```{r}
-    size <- arvadosFile$getSizeInBytes()
-    ```
+```{r}
+size <- arvadosFile$getSizeInBytes()
+```
 
     or
 
-    ```{r}
-    size <- arvadosSubcollection$getSizeInBytes()
-    ```
+```{r}
+size <- arvadosSubcollection$getSizeInBytes()
+```
 
 * Create new file in a collection:
 
-    ```{r}
-    collection$create(fileNames, optionalRelativePath)
-    ```
+```{r}
+collection$create(fileNames, optionalRelativePath)
+```
 
     Example:
 
-    ```{r}
-    mainFile <- collection$create("main.cpp", "cpp/src/")
-    fileList <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
-    ```
+```{r}
+mainFile <- collection$create("main.cpp", "cpp/src/")
+fileList <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+```
 
 * Add existing ArvadosFile or Subcollection to a collection:
 
-    ```{r}
-    folder <- Subcollection$new("src")
-    file   <- ArvadosFile$new("main.cpp")
-    folder$add(file)
-    ```
+```{r}
+folder <- Subcollection$new("src")
+file   <- ArvadosFile$new("main.cpp")
+folder$add(file)
+```
 
-    ```{r}
-    collection$add(folder, "cpp")
-    ```
+```{r}
+collection$add(folder, "cpp")
+```
 
-    This examples will add file "main.cpp" in "./cpp/src/" folder if folder exists.
-    If subcollection contains more files or folders they will be added recursively.
+This examples will add file "main.cpp" in "./cpp/src/" folder if folder exists.
+If subcollection contains more files or folders they will be added recursively.
 
 * Delete file from a collection:
 
-    ```{r}
-    collection$remove("location/to/my/file.cpp")
-    ```
+```{r}
+collection$remove("location/to/my/file.cpp")
+```
 
-    You can remove both Subcollection and ArvadosFile.
-    If subcollection contains more files or folders they will be removed recursively.
+You can remove both Subcollection and ArvadosFile.
+If subcollection contains more files or folders they will be removed recursively.
 
-    You can also remove multiple files at once:
+You can also remove multiple files at once:
 
-    ```{r}
-    collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
-    ```
+```{r}
+collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
+```
 
 * Delete file or folder from a Subcollection:
 
-    ```{r}
-    subcollection <- collection$get("mySubcollection/")
-    subcollection$remove("fileInsideSubcollection.exe")
-    subcollection$remove("folderInsideSubcollection/")
-    ```
+```{r}
+subcollection <- collection$get("mySubcollection/")
+subcollection$remove("fileInsideSubcollection.exe")
+subcollection$remove("folderInsideSubcollection/")
+```
 
 * Move file or folder inside collection:
 
-    Directley from collection
+Directley from collection
 
-    ```{r}
-    collection$move("folder/file.cpp", "file.cpp")
-    ```
+```{r}
+collection$move("folder/file.cpp", "file.cpp")
+```
 
-    Or from file
+Or from file
 
-    ```{r}
-    file <- collection$get("location/to/my/file.cpp")
-    file$move("newDestination/file.cpp")
-    ```
+```{r}
+file <- collection$get("location/to/my/file.cpp")
+file$move("newDestination/file.cpp")
+```
 
-    Or from subcollection
+Or from subcollection
 
-    ```{r}
-    subcollection <- collection$get("location/to/folder")
-    subcollection$move("newDestination/folder")
-    ```
+```{r}
+subcollection <- collection$get("location/to/folder")
+subcollection$move("newDestination/folder")
+```
 
-    Make sure to include new file name in destination.
-    In second example file$move("newDestination/") will not work.
+Make sure to include new file name in destination.
+In second example file$move("newDestination/") will not work.
 
 #### Working with Aravdos projects
 
 * Get a project:
 
-    ```{r}
-    project <- arv$getProject("uuid")
-    ```
+```{r}
+project <- arv$projects.get("uuid")
+```
 
 * List projects:
 
-    ```{r}
-    # list subprojects of a project
-    projects <- arv$listProjects(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
+```{r}
+list subprojects of a project
+projects <- arv$projects.list(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
 
-    # list projects which have names beginning with Example
-    arv$listProjects(list(list("name","like","Example%")))
-    ```
+list projects which have names beginning with Example
+examples <- arv$projects.list(list(list("name","like","Example%")))
+```
 
 * List all projects even if the number of items is greater than maximum API limit:
 
-    ```{r}
-    collectionList <- arv$listAllProjects(list(list("name","like","Example%")))
-    ```
+```{r}
+projects <- listAll(arv$projects.list, list(list("name","like","Example%")))
+```
 
 * Delete a project:
 
-    ```{r}
-    deletedProject <- arv$deleteProject("uuid")
-    ```
+```{r}
+deletedProject <- arv$projects.delete("uuid")
+```
 
 * Update project:
 
-    ```{r}
-    updatedProject <- arv$updateProject("uuid", list(name = "new_name", description = "new description"))
-    ```
+```{r}
+updatedProject <- arv$projects.update(list(name = "new_name", description = "new description"), "uuid")
+```
 
 * Create project:
 
-    ```{r}
-    createdProject <- arv$createProject(list(name = "project_name", description = "project description"))
-    ```
+```{r}
+newProject <- arv$projects.update(list(name = "project_name", description = "project description"))
+```
+
+#### Help
+
+* View help page of Arvados classes by puting ? before class name:
+
+```{r}
+?Arvados
+?Collection
+?Subcollection
+?ArvadosFile
+```
+
+* View help page of any method defined in Arvados class by puting ? before method name:
+
+```{r}
+?collections.update
+?jobs.get
+```
 
 ### Building the ArvadosR package
 
-  ```
-  cd arvados/sdk && R CMD build R
-  ```
+```{bash}
+cd arvados/sdk && R CMD build R
+```
 
 This will create a tarball of the ArvadosR package in the current directory.
diff --git a/sdk/R/createDoc.R b/sdk/R/createDoc.R
new file mode 100644 (file)
index 0000000..5decab9
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#Run script with $Rscript createDoc.R input.Rmd output.html
+
+require(knitr) # required for knitting from rmd to md
+require(markdown) # required for md to html
+
+args <- commandArgs(TRUE)
+
+if(length(args) != 2)
+    stop("Please provide 2 arguments corresponding to input and output file!")
+
+inputFile <- args[[1]] # .Rmd file
+outputFile <- args[[2]] # .html file
+
+# Create and fill temp .md file from existing .Rmd file
+#tempMdFile <- tempfile("tempREADME", fileext = "md")
+knitr::knit(inputFile, outputFile)
+#knitr::knit(inputFile, tempMdFile)
+
+# Generate HTML from temporary .md file
+#markdown::markdownToHTML(tempMdFile, outputFile)
index a54a9a23facdda1769520e7d653b976c293ce4bd..593129bb3ceeb18bbc6cb2520529fd5067b823b1 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
 if (!requireNamespace("devtools")) {
   install.packages("devtools")
@@ -5,14 +9,11 @@ if (!requireNamespace("devtools")) {
 if (!requireNamespace("roxygen2")) {
   install.packages("roxygen2")
 }
-
-# These install from github so install known-good versions instead of
-# letting any push to master break our build.
-if (!requireNamespace("pkgload")) {
-  devtools::install_github("r-lib/pkgload", ref="7a97de62adf1793c03e73095937e4655baad79c9")
+if (!requireNamespace("knitr")) {
+  install.packages("knitr")
 }
-if (!requireNamespace("pkgdown")) {
-  devtools::install_github("r-lib/pkgdown", ref="897ffbc016549c11c4263cb5d1f6e9f5c99efb45")
+if (!requireNamespace("markdown")) {
+  install.packages("markdown")
 }
 
 devtools::install_dev_deps()
index 3c66437f04149ed833d6202d2cbe2f68d38bc5fc..95a2e5561fa9ce21b1d5bd236489595b8c2c034d 100644 (file)
@@ -4,7 +4,7 @@
 \alias{Arvados}
 \title{Arvados}
 \description{
-Arvados class gives users ability to manipulate collections and projects.
+Arvados class gives users ability to access Arvados REST API.
 }
 \section{Usage}{
 
@@ -14,31 +14,175 @@ Arvados class gives users ability to manipulate collections and projects.
 \section{Arguments}{
 
 \describe{
-  \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
-  \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
-  \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+       \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+       \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+       \item{numRetries}{Number which specifies how many times to retry failed service requests.}
 }
 }
 
 \section{Methods}{
 
 \describe{
-  \item{getToken()}{Returns authentification token currently in use.}
-  \item{getHostName()}{Returns host name currently in use.}
-  \item{getNumRetries()}{Returns number which specifies how many times to retry failed service requests.}
-  \item{setNumRetries(newNumOfRetries)}{Sets number which specifies how many times to retry failed service requests.}
-  \item{getCollection(uuid)}{Get collection with specified UUID.}
-  \item{listCollections(filters = NULL, limit = 100, offset = 0)}{Returns list of collections based on filters parameter.}
-  \item{listAllCollections(filters = NULL)}{Lists all collections, based on filters parameter, even if the number of items is greater than maximum API limit.}
-  \item{deleteCollection(uuid)}{Deletes collection with specified UUID.}
-  \item{updateCollection(uuid, newContent)}{Updates collection with specified UUID.}
-  \item{createCollection(content)}{Creates new collection.}
-  \item{getProject(uuid)}{Get project with specified UUID.}
-  \item{listProjects(filters = NULL, limit = 100, offset = 0)}{Returns list of projects based on filters parameter.}
-  \item{listAllProjects(filters = NULL)}{Lists all projects, based on filters parameter, even if the number of items is greater than maximum API limit.}
-  \item{deleteProject(uuid)}{Deletes project with specified UUID.}
-  \item{updateProject(uuid, newContent)}{Updates project with specified UUID.}
-  \item{createProject(content)}{Creates new project.}
+       \item{}{\code{\link{api_client_authorizations.create}}}
+       \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
+       \item{}{\code{\link{api_client_authorizations.current}}}
+       \item{}{\code{\link{api_client_authorizations.delete}}}
+       \item{}{\code{\link{api_client_authorizations.get}}}
+       \item{}{\code{\link{api_client_authorizations.list}}}
+       \item{}{\code{\link{api_client_authorizations.update}}}
+       \item{}{\code{\link{api_clients.create}}}
+       \item{}{\code{\link{api_clients.delete}}}
+       \item{}{\code{\link{api_clients.get}}}
+       \item{}{\code{\link{api_clients.list}}}
+       \item{}{\code{\link{api_clients.update}}}
+       \item{}{\code{\link{authorized_keys.create}}}
+       \item{}{\code{\link{authorized_keys.delete}}}
+       \item{}{\code{\link{authorized_keys.get}}}
+       \item{}{\code{\link{authorized_keys.list}}}
+       \item{}{\code{\link{authorized_keys.update}}}
+       \item{}{\code{\link{collections.create}}}
+       \item{}{\code{\link{collections.delete}}}
+       \item{}{\code{\link{collections.get}}}
+       \item{}{\code{\link{collections.list}}}
+       \item{}{\code{\link{collections.provenance}}}
+       \item{}{\code{\link{collections.trash}}}
+       \item{}{\code{\link{collections.untrash}}}
+       \item{}{\code{\link{collections.update}}}
+       \item{}{\code{\link{collections.used_by}}}
+       \item{}{\code{\link{container_requests.create}}}
+       \item{}{\code{\link{container_requests.delete}}}
+       \item{}{\code{\link{container_requests.get}}}
+       \item{}{\code{\link{container_requests.list}}}
+       \item{}{\code{\link{container_requests.update}}}
+       \item{}{\code{\link{containers.auth}}}
+       \item{}{\code{\link{containers.create}}}
+       \item{}{\code{\link{containers.current}}}
+       \item{}{\code{\link{containers.delete}}}
+       \item{}{\code{\link{containers.get}}}
+       \item{}{\code{\link{containers.list}}}
+       \item{}{\code{\link{containers.lock}}}
+       \item{}{\code{\link{containers.secret_mounts}}}
+       \item{}{\code{\link{containers.unlock}}}
+       \item{}{\code{\link{containers.update}}}
+       \item{}{\code{\link{groups.contents}}}
+       \item{}{\code{\link{groups.create}}}
+       \item{}{\code{\link{groups.delete}}}
+       \item{}{\code{\link{groups.get}}}
+       \item{}{\code{\link{groups.list}}}
+       \item{}{\code{\link{groups.trash}}}
+       \item{}{\code{\link{groups.untrash}}}
+       \item{}{\code{\link{groups.update}}}
+       \item{}{\code{\link{humans.create}}}
+       \item{}{\code{\link{humans.delete}}}
+       \item{}{\code{\link{humans.get}}}
+       \item{}{\code{\link{humans.list}}}
+       \item{}{\code{\link{humans.update}}}
+       \item{}{\code{\link{jobs.cancel}}}
+       \item{}{\code{\link{jobs.create}}}
+       \item{}{\code{\link{jobs.delete}}}
+       \item{}{\code{\link{jobs.get}}}
+       \item{}{\code{\link{jobs.list}}}
+       \item{}{\code{\link{jobs.lock}}}
+       \item{}{\code{\link{jobs.queue}}}
+       \item{}{\code{\link{jobs.queue_size}}}
+       \item{}{\code{\link{jobs.update}}}
+       \item{}{\code{\link{job_tasks.create}}}
+       \item{}{\code{\link{job_tasks.delete}}}
+       \item{}{\code{\link{job_tasks.get}}}
+       \item{}{\code{\link{job_tasks.list}}}
+       \item{}{\code{\link{job_tasks.update}}}
+       \item{}{\code{\link{keep_disks.create}}}
+       \item{}{\code{\link{keep_disks.delete}}}
+       \item{}{\code{\link{keep_disks.get}}}
+       \item{}{\code{\link{keep_disks.list}}}
+       \item{}{\code{\link{keep_disks.ping}}}
+       \item{}{\code{\link{keep_disks.update}}}
+       \item{}{\code{\link{keep_services.accessible}}}
+       \item{}{\code{\link{keep_services.create}}}
+       \item{}{\code{\link{keep_services.delete}}}
+       \item{}{\code{\link{keep_services.get}}}
+       \item{}{\code{\link{keep_services.list}}}
+       \item{}{\code{\link{keep_services.update}}}
+       \item{}{\code{\link{links.create}}}
+       \item{}{\code{\link{links.delete}}}
+       \item{}{\code{\link{links.get}}}
+       \item{}{\code{\link{links.get_permissions}}}
+       \item{}{\code{\link{links.list}}}
+       \item{}{\code{\link{links.update}}}
+       \item{}{\code{\link{logs.create}}}
+       \item{}{\code{\link{logs.delete}}}
+       \item{}{\code{\link{logs.get}}}
+       \item{}{\code{\link{logs.list}}}
+       \item{}{\code{\link{logs.update}}}
+       \item{}{\code{\link{nodes.create}}}
+       \item{}{\code{\link{nodes.delete}}}
+       \item{}{\code{\link{nodes.get}}}
+       \item{}{\code{\link{nodes.list}}}
+       \item{}{\code{\link{nodes.ping}}}
+       \item{}{\code{\link{nodes.update}}}
+       \item{}{\code{\link{pipeline_instances.cancel}}}
+       \item{}{\code{\link{pipeline_instances.create}}}
+       \item{}{\code{\link{pipeline_instances.delete}}}
+       \item{}{\code{\link{pipeline_instances.get}}}
+       \item{}{\code{\link{pipeline_instances.list}}}
+       \item{}{\code{\link{pipeline_instances.update}}}
+       \item{}{\code{\link{pipeline_templates.create}}}
+       \item{}{\code{\link{pipeline_templates.delete}}}
+       \item{}{\code{\link{pipeline_templates.get}}}
+       \item{}{\code{\link{pipeline_templates.list}}}
+       \item{}{\code{\link{pipeline_templates.update}}}
+       \item{}{\code{\link{projects.create}}}
+       \item{}{\code{\link{projects.delete}}}
+       \item{}{\code{\link{projects.get}}}
+       \item{}{\code{\link{projects.list}}}
+       \item{}{\code{\link{projects.update}}}
+       \item{}{\code{\link{repositories.create}}}
+       \item{}{\code{\link{repositories.delete}}}
+       \item{}{\code{\link{repositories.get}}}
+       \item{}{\code{\link{repositories.get_all_permissions}}}
+       \item{}{\code{\link{repositories.list}}}
+       \item{}{\code{\link{repositories.update}}}
+       \item{}{\code{\link{specimens.create}}}
+       \item{}{\code{\link{specimens.delete}}}
+       \item{}{\code{\link{specimens.get}}}
+       \item{}{\code{\link{specimens.list}}}
+       \item{}{\code{\link{specimens.update}}}
+       \item{}{\code{\link{traits.create}}}
+       \item{}{\code{\link{traits.delete}}}
+       \item{}{\code{\link{traits.get}}}
+       \item{}{\code{\link{traits.list}}}
+       \item{}{\code{\link{traits.update}}}
+       \item{}{\code{\link{user_agreements.create}}}
+       \item{}{\code{\link{user_agreements.delete}}}
+       \item{}{\code{\link{user_agreements.get}}}
+       \item{}{\code{\link{user_agreements.list}}}
+       \item{}{\code{\link{user_agreements.new}}}
+       \item{}{\code{\link{user_agreements.sign}}}
+       \item{}{\code{\link{user_agreements.signatures}}}
+       \item{}{\code{\link{user_agreements.update}}}
+       \item{}{\code{\link{users.activate}}}
+       \item{}{\code{\link{users.create}}}
+       \item{}{\code{\link{users.current}}}
+       \item{}{\code{\link{users.delete}}}
+       \item{}{\code{\link{users.get}}}
+       \item{}{\code{\link{users.list}}}
+       \item{}{\code{\link{users.setup}}}
+       \item{}{\code{\link{users.system}}}
+       \item{}{\code{\link{users.unsetup}}}
+       \item{}{\code{\link{users.update}}}
+       \item{}{\code{\link{users.update_uuid}}}
+       \item{}{\code{\link{virtual_machines.create}}}
+       \item{}{\code{\link{virtual_machines.delete}}}
+       \item{}{\code{\link{virtual_machines.get}}}
+       \item{}{\code{\link{virtual_machines.get_all_logins}}}
+       \item{}{\code{\link{virtual_machines.list}}}
+       \item{}{\code{\link{virtual_machines.logins}}}
+       \item{}{\code{\link{virtual_machines.update}}}
+       \item{}{\code{\link{workflows.create}}}
+       \item{}{\code{\link{workflows.delete}}}
+       \item{}{\code{\link{workflows.get}}}
+       \item{}{\code{\link{workflows.list}}}
+       \item{}{\code{\link{workflows.update}}}
 }
 }
 
@@ -46,17 +190,17 @@ Arvados class gives users ability to manipulate collections and projects.
 \dontrun{
 arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
 
-collection <- arv$getCollection("uuid")
+collection <- arv$collections.get("uuid")
 
-collectionList <- arv$listCollections(list(list("name", "like", "Test\%")))
-collectionList <- arv$listAllCollections(list(list("name", "like", "Test\%")))
+collectionList <- arv$collections.list(list(list("name", "like", "Test\%")))
+collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test\%")))
 
-deletedCollection <- arv$deleteCollection("uuid")
+deletedCollection <- arv$collections.delete("uuid")
 
-updatedCollection <- arv$updateCollection("uuid", list(name = "New name",
-                                                       description = "New description"))
+updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
+                                            "uuid")
 
-createdCollection <- arv$createCollection(list(name = "Example",
-                                               description = "This is a test collection"))
+createdCollection <- arv$collections.create(list(name = "Example",
+                                                 description = "This is a test collection"))
 }
 }
diff --git a/sdk/R/man/api_client_authorizations.create.Rd b/sdk/R/man/api_client_authorizations.create.Rd
new file mode 100644 (file)
index 0000000..e322419
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.create}
+\alias{api_client_authorizations.create}
+\title{api_client_authorizations.create}
+\usage{
+arv$api_client_authorizations.create(apiclientauthorization,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{apiClientAuthorization}{ApiClientAuthorization object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.create_system_auth.Rd b/sdk/R/man/api_client_authorizations.create_system_auth.Rd
new file mode 100644 (file)
index 0000000..3eb172a
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.create_system_auth}
+\alias{api_client_authorizations.create_system_auth}
+\title{api_client_authorizations.create_system_auth}
+\usage{
+arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
+       scopes = NULL)
+}
+\arguments{
+\item{api_client_id}{}
+
+\item{scopes}{}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.create_system_auth is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.current.Rd b/sdk/R/man/api_client_authorizations.current.Rd
new file mode 100644 (file)
index 0000000..c286237
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.current}
+\alias{api_client_authorizations.current}
+\title{api_client_authorizations.current}
+\usage{
+arv$api_client_authorizations.current(NULL)
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.delete.Rd b/sdk/R/man/api_client_authorizations.delete.Rd
new file mode 100644 (file)
index 0000000..054cc79
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.delete}
+\alias{api_client_authorizations.delete}
+\title{api_client_authorizations.delete}
+\usage{
+arv$api_client_authorizations.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.get.Rd b/sdk/R/man/api_client_authorizations.get.Rd
new file mode 100644 (file)
index 0000000..3f5b630
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.get}
+\alias{api_client_authorizations.get}
+\title{api_client_authorizations.get}
+\usage{
+arv$api_client_authorizations.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.list.Rd b/sdk/R/man/api_client_authorizations.list.Rd
new file mode 100644 (file)
index 0000000..7c8ae69
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.list}
+\alias{api_client_authorizations.list}
+\title{api_client_authorizations.list}
+\usage{
+arv$api_client_authorizations.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ApiClientAuthorizationList object.
+}
+\description{
+api_client_authorizations.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.update.Rd b/sdk/R/man/api_client_authorizations.update.Rd
new file mode 100644 (file)
index 0000000..e6380cc
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.update}
+\alias{api_client_authorizations.update}
+\title{api_client_authorizations.update}
+\usage{
+arv$api_client_authorizations.update(apiclientauthorization,
+       uuid)
+}
+\arguments{
+\item{apiClientAuthorization}{ApiClientAuthorization object.}
+
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.create.Rd b/sdk/R/man/api_clients.create.Rd
new file mode 100644 (file)
index 0000000..2601168
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.create}
+\alias{api_clients.create}
+\title{api_clients.create}
+\usage{
+arv$api_clients.create(apiclient,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{apiClient}{ApiClient object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.delete.Rd b/sdk/R/man/api_clients.delete.Rd
new file mode 100644 (file)
index 0000000..90eaa99
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.delete}
+\alias{api_clients.delete}
+\title{api_clients.delete}
+\usage{
+arv$api_clients.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.get.Rd b/sdk/R/man/api_clients.get.Rd
new file mode 100644 (file)
index 0000000..4a1b6c0
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.get}
+\alias{api_clients.get}
+\title{api_clients.get}
+\usage{
+arv$api_clients.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.list.Rd b/sdk/R/man/api_clients.list.Rd
new file mode 100644 (file)
index 0000000..0679c9c
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.list}
+\alias{api_clients.list}
+\title{api_clients.list}
+\usage{
+arv$api_clients.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ApiClientList object.
+}
+\description{
+api_clients.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.update.Rd b/sdk/R/man/api_clients.update.Rd
new file mode 100644 (file)
index 0000000..a37e533
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.update}
+\alias{api_clients.update}
+\title{api_clients.update}
+\usage{
+arv$api_clients.update(apiclient,
+       uuid)
+}
+\arguments{
+\item{apiClient}{ApiClient object.}
+
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.create.Rd b/sdk/R/man/authorized_keys.create.Rd
new file mode 100644 (file)
index 0000000..e0d226a
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.create}
+\alias{authorized_keys.create}
+\title{authorized_keys.create}
+\usage{
+arv$authorized_keys.create(authorizedkey,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{authorizedKey}{AuthorizedKey object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.delete.Rd b/sdk/R/man/authorized_keys.delete.Rd
new file mode 100644 (file)
index 0000000..db1f0e7
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.delete}
+\alias{authorized_keys.delete}
+\title{authorized_keys.delete}
+\usage{
+arv$authorized_keys.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.get.Rd b/sdk/R/man/authorized_keys.get.Rd
new file mode 100644 (file)
index 0000000..31a2dd3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.get}
+\alias{authorized_keys.get}
+\title{authorized_keys.get}
+\usage{
+arv$authorized_keys.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.list.Rd b/sdk/R/man/authorized_keys.list.Rd
new file mode 100644 (file)
index 0000000..cd19bc6
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.list}
+\alias{authorized_keys.list}
+\title{authorized_keys.list}
+\usage{
+arv$authorized_keys.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+AuthorizedKeyList object.
+}
+\description{
+authorized_keys.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.update.Rd b/sdk/R/man/authorized_keys.update.Rd
new file mode 100644 (file)
index 0000000..65d93d0
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.update}
+\alias{authorized_keys.update}
+\title{authorized_keys.update}
+\usage{
+arv$authorized_keys.update(authorizedkey,
+       uuid)
+}
+\arguments{
+\item{authorizedKey}{AuthorizedKey object.}
+
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.create.Rd b/sdk/R/man/collections.create.Rd
new file mode 100644 (file)
index 0000000..af8e398
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.create}
+\alias{collections.create}
+\title{collections.create}
+\usage{
+arv$collections.create(collection,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{collection}{Collection object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.delete.Rd b/sdk/R/man/collections.delete.Rd
new file mode 100644 (file)
index 0000000..28b3543
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.delete}
+\alias{collections.delete}
+\title{collections.delete}
+\usage{
+arv$collections.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.get.Rd b/sdk/R/man/collections.get.Rd
new file mode 100644 (file)
index 0000000..3878aaf
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.get}
+\alias{collections.get}
+\title{collections.get}
+\usage{
+arv$collections.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.list.Rd b/sdk/R/man/collections.list.Rd
new file mode 100644 (file)
index 0000000..87f6f78
--- /dev/null
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.list}
+\alias{collections.list}
+\title{collections.list}
+\usage{
+arv$collections.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact", include_trash = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include collections whose is_trashed attribute is true.}
+}
+\value{
+CollectionList object.
+}
+\description{
+collections.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.provenance.Rd b/sdk/R/man/collections.provenance.Rd
new file mode 100644 (file)
index 0000000..001a7b4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.provenance}
+\alias{collections.provenance}
+\title{collections.provenance}
+\usage{
+arv$collections.provenance(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.provenance is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.trash.Rd b/sdk/R/man/collections.trash.Rd
new file mode 100644 (file)
index 0000000..4862109
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.trash}
+\alias{collections.trash}
+\title{collections.trash}
+\usage{
+arv$collections.trash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.trash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.untrash.Rd b/sdk/R/man/collections.untrash.Rd
new file mode 100644 (file)
index 0000000..c41bc3d
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.untrash}
+\alias{collections.untrash}
+\title{collections.untrash}
+\usage{
+arv$collections.untrash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.untrash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.update.Rd b/sdk/R/man/collections.update.Rd
new file mode 100644 (file)
index 0000000..c9b201c
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.update}
+\alias{collections.update}
+\title{collections.update}
+\usage{
+arv$collections.update(collection,
+       uuid)
+}
+\arguments{
+\item{collection}{Collection object.}
+
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.used_by.Rd b/sdk/R/man/collections.used_by.Rd
new file mode 100644 (file)
index 0000000..53b8e49
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.used_by}
+\alias{collections.used_by}
+\title{collections.used_by}
+\usage{
+arv$collections.used_by(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.used_by is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.create.Rd b/sdk/R/man/container_requests.create.Rd
new file mode 100644 (file)
index 0000000..e114d32
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.create}
+\alias{container_requests.create}
+\title{container_requests.create}
+\usage{
+arv$container_requests.create(containerrequest,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{containerRequest}{ContainerRequest object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.delete.Rd b/sdk/R/man/container_requests.delete.Rd
new file mode 100644 (file)
index 0000000..905739b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.delete}
+\alias{container_requests.delete}
+\title{container_requests.delete}
+\usage{
+arv$container_requests.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.get.Rd b/sdk/R/man/container_requests.get.Rd
new file mode 100644 (file)
index 0000000..54fe5d4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.get}
+\alias{container_requests.get}
+\title{container_requests.get}
+\usage{
+arv$container_requests.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.list.Rd b/sdk/R/man/container_requests.list.Rd
new file mode 100644 (file)
index 0000000..9c2412b
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.list}
+\alias{container_requests.list}
+\title{container_requests.list}
+\usage{
+arv$container_requests.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ContainerRequestList object.
+}
+\description{
+container_requests.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.update.Rd b/sdk/R/man/container_requests.update.Rd
new file mode 100644 (file)
index 0000000..063417b
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.update}
+\alias{container_requests.update}
+\title{container_requests.update}
+\usage{
+arv$container_requests.update(containerrequest,
+       uuid)
+}
+\arguments{
+\item{containerRequest}{ContainerRequest object.}
+
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.auth.Rd b/sdk/R/man/containers.auth.Rd
new file mode 100644 (file)
index 0000000..a594d2f
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.auth}
+\alias{containers.auth}
+\title{containers.auth}
+\usage{
+arv$containers.auth(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.auth is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.create.Rd b/sdk/R/man/containers.create.Rd
new file mode 100644 (file)
index 0000000..4ce25bb
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.create}
+\alias{containers.create}
+\title{containers.create}
+\usage{
+arv$containers.create(container,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{container}{Container object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Container object.
+}
+\description{
+containers.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.current.Rd b/sdk/R/man/containers.current.Rd
new file mode 100644 (file)
index 0000000..0f6ad4e
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.current}
+\alias{containers.current}
+\title{containers.current}
+\usage{
+arv$containers.current(NULL)
+}
+\value{
+Container object.
+}
+\description{
+containers.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.delete.Rd b/sdk/R/man/containers.delete.Rd
new file mode 100644 (file)
index 0000000..e2e3cd7
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.delete}
+\alias{containers.delete}
+\title{containers.delete}
+\usage{
+arv$containers.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.get.Rd b/sdk/R/man/containers.get.Rd
new file mode 100644 (file)
index 0000000..05d97d3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.get}
+\alias{containers.get}
+\title{containers.get}
+\usage{
+arv$containers.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.list.Rd b/sdk/R/man/containers.list.Rd
new file mode 100644 (file)
index 0000000..d445796
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.list}
+\alias{containers.list}
+\title{containers.list}
+\usage{
+arv$containers.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ContainerList object.
+}
+\description{
+containers.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.lock.Rd b/sdk/R/man/containers.lock.Rd
new file mode 100644 (file)
index 0000000..72bcdf0
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.lock}
+\alias{containers.lock}
+\title{containers.lock}
+\usage{
+arv$containers.lock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.lock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.secret_mounts.Rd b/sdk/R/man/containers.secret_mounts.Rd
new file mode 100644 (file)
index 0000000..d0f8444
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.secret_mounts}
+\alias{containers.secret_mounts}
+\title{containers.secret_mounts}
+\usage{
+arv$containers.secret_mounts(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.secret_mounts is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.unlock.Rd b/sdk/R/man/containers.unlock.Rd
new file mode 100644 (file)
index 0000000..5c41f20
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.unlock}
+\alias{containers.unlock}
+\title{containers.unlock}
+\usage{
+arv$containers.unlock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.unlock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.update.Rd b/sdk/R/man/containers.update.Rd
new file mode 100644 (file)
index 0000000..3a85726
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.update}
+\alias{containers.update}
+\title{containers.update}
+\usage{
+arv$containers.update(container,
+       uuid)
+}
+\arguments{
+\item{container}{Container object.}
+
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.contents.Rd b/sdk/R/man/groups.contents.Rd
new file mode 100644 (file)
index 0000000..26647df
--- /dev/null
@@ -0,0 +1,38 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.contents}
+\alias{groups.contents}
+\title{groups.contents}
+\usage{
+arv$groups.contents(filters = NULL,
+       where = NULL, order = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact",
+       include_trash = NULL, uuid = NULL, recursive = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+
+\item{uuid}{}
+
+\item{recursive}{Include contents from child groups recursively.}
+}
+\value{
+Group object.
+}
+\description{
+groups.contents is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.create.Rd b/sdk/R/man/groups.create.Rd
new file mode 100644 (file)
index 0000000..8719603
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.create}
+\alias{groups.create}
+\title{groups.create}
+\usage{
+arv$groups.create(group, ensure_unique_name = "false")
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Group object.
+}
+\description{
+groups.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.delete.Rd b/sdk/R/man/groups.delete.Rd
new file mode 100644 (file)
index 0000000..1b4a0d9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.delete}
+\alias{groups.delete}
+\title{groups.delete}
+\usage{
+arv$groups.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.get.Rd b/sdk/R/man/groups.get.Rd
new file mode 100644 (file)
index 0000000..28a1872
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.get}
+\alias{groups.get}
+\title{groups.get}
+\usage{
+arv$groups.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.list.Rd b/sdk/R/man/groups.list.Rd
new file mode 100644 (file)
index 0000000..7699f3e
--- /dev/null
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.list}
+\alias{groups.list}
+\title{groups.list}
+\usage{
+arv$groups.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact", include_trash = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+}
+\value{
+GroupList object.
+}
+\description{
+groups.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.trash.Rd b/sdk/R/man/groups.trash.Rd
new file mode 100644 (file)
index 0000000..c529618
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.trash}
+\alias{groups.trash}
+\title{groups.trash}
+\usage{
+arv$groups.trash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Group object.
+}
+\description{
+groups.trash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.untrash.Rd b/sdk/R/man/groups.untrash.Rd
new file mode 100644 (file)
index 0000000..014190c
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.untrash}
+\alias{groups.untrash}
+\title{groups.untrash}
+\usage{
+arv$groups.untrash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Group object.
+}
+\description{
+groups.untrash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.update.Rd b/sdk/R/man/groups.update.Rd
new file mode 100644 (file)
index 0000000..47abde7
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.update}
+\alias{groups.update}
+\title{groups.update}
+\usage{
+arv$groups.update(group, uuid)
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.create.Rd b/sdk/R/man/humans.create.Rd
new file mode 100644 (file)
index 0000000..44c9aa3
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.create}
+\alias{humans.create}
+\title{humans.create}
+\usage{
+arv$humans.create(human, ensure_unique_name = "false")
+}
+\arguments{
+\item{human}{Human object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Human object.
+}
+\description{
+humans.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.delete.Rd b/sdk/R/man/humans.delete.Rd
new file mode 100644 (file)
index 0000000..ae66b8e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.delete}
+\alias{humans.delete}
+\title{humans.delete}
+\usage{
+arv$humans.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.get.Rd b/sdk/R/man/humans.get.Rd
new file mode 100644 (file)
index 0000000..820c562
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.get}
+\alias{humans.get}
+\title{humans.get}
+\usage{
+arv$humans.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.list.Rd b/sdk/R/man/humans.list.Rd
new file mode 100644 (file)
index 0000000..a8db4c7
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.list}
+\alias{humans.list}
+\title{humans.list}
+\usage{
+arv$humans.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+HumanList object.
+}
+\description{
+humans.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.update.Rd b/sdk/R/man/humans.update.Rd
new file mode 100644 (file)
index 0000000..83956a2
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.update}
+\alias{humans.update}
+\title{humans.update}
+\usage{
+arv$humans.update(human, uuid)
+}
+\arguments{
+\item{human}{Human object.}
+
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.create.Rd b/sdk/R/man/job_tasks.create.Rd
new file mode 100644 (file)
index 0000000..2da0b0c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.create}
+\alias{job_tasks.create}
+\title{job_tasks.create}
+\usage{
+arv$job_tasks.create(jobtask, ensure_unique_name = "false")
+}
+\arguments{
+\item{jobTask}{JobTask object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.delete.Rd b/sdk/R/man/job_tasks.delete.Rd
new file mode 100644 (file)
index 0000000..b78a38e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.delete}
+\alias{job_tasks.delete}
+\title{job_tasks.delete}
+\usage{
+arv$job_tasks.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.get.Rd b/sdk/R/man/job_tasks.get.Rd
new file mode 100644 (file)
index 0000000..07d2054
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.get}
+\alias{job_tasks.get}
+\title{job_tasks.get}
+\usage{
+arv$job_tasks.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.list.Rd b/sdk/R/man/job_tasks.list.Rd
new file mode 100644 (file)
index 0000000..51c4b49
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.list}
+\alias{job_tasks.list}
+\title{job_tasks.list}
+\usage{
+arv$job_tasks.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+JobTaskList object.
+}
+\description{
+job_tasks.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.update.Rd b/sdk/R/man/job_tasks.update.Rd
new file mode 100644 (file)
index 0000000..42d10bd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.update}
+\alias{job_tasks.update}
+\title{job_tasks.update}
+\usage{
+arv$job_tasks.update(jobtask, uuid)
+}
+\arguments{
+\item{jobTask}{JobTask object.}
+
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.cancel.Rd b/sdk/R/man/jobs.cancel.Rd
new file mode 100644 (file)
index 0000000..7399d28
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.cancel}
+\alias{jobs.cancel}
+\title{jobs.cancel}
+\usage{
+arv$jobs.cancel(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.cancel is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.create.Rd b/sdk/R/man/jobs.create.Rd
new file mode 100644 (file)
index 0000000..4c4d61a
--- /dev/null
@@ -0,0 +1,29 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.create}
+\alias{jobs.create}
+\title{jobs.create}
+\usage{
+arv$jobs.create(job, ensure_unique_name = "false",
+       find_or_create = "false", filters = NULL,
+       minimum_script_version = NULL, exclude_script_versions = NULL)
+}
+\arguments{
+\item{job}{Job object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+
+\item{find_or_create}{}
+
+\item{filters}{}
+
+\item{minimum_script_version}{}
+
+\item{exclude_script_versions}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.delete.Rd b/sdk/R/man/jobs.delete.Rd
new file mode 100644 (file)
index 0000000..7f75608
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.delete}
+\alias{jobs.delete}
+\title{jobs.delete}
+\usage{
+arv$jobs.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.get.Rd b/sdk/R/man/jobs.get.Rd
new file mode 100644 (file)
index 0000000..072b613
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.get}
+\alias{jobs.get}
+\title{jobs.get}
+\usage{
+arv$jobs.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.list.Rd b/sdk/R/man/jobs.list.Rd
new file mode 100644 (file)
index 0000000..53055f5
--- /dev/null
@@ -0,0 +1,33 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.list}
+\alias{jobs.list}
+\title{jobs.list}
+\usage{
+arv$jobs.list(filters = NULL, where = NULL,
+       order = NULL, select = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+JobList object.
+}
+\description{
+jobs.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.lock.Rd b/sdk/R/man/jobs.lock.Rd
new file mode 100644 (file)
index 0000000..3c2e232
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.lock}
+\alias{jobs.lock}
+\title{jobs.lock}
+\usage{
+arv$jobs.lock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.lock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.queue.Rd b/sdk/R/man/jobs.queue.Rd
new file mode 100644 (file)
index 0000000..a9deaa9
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.queue}
+\alias{jobs.queue}
+\title{jobs.queue}
+\usage{
+arv$jobs.queue(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.queue is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.queue_size.Rd b/sdk/R/man/jobs.queue_size.Rd
new file mode 100644 (file)
index 0000000..2185820
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.queue_size}
+\alias{jobs.queue_size}
+\title{jobs.queue_size}
+\usage{
+arv$jobs.queue_size(NULL)
+}
+\value{
+Job object.
+}
+\description{
+jobs.queue_size is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.update.Rd b/sdk/R/man/jobs.update.Rd
new file mode 100644 (file)
index 0000000..666d7fd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.update}
+\alias{jobs.update}
+\title{jobs.update}
+\usage{
+arv$jobs.update(job, uuid)
+}
+\arguments{
+\item{job}{Job object.}
+
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.create.Rd b/sdk/R/man/keep_disks.create.Rd
new file mode 100644 (file)
index 0000000..524c5b6
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.create}
+\alias{keep_disks.create}
+\title{keep_disks.create}
+\usage{
+arv$keep_disks.create(keepdisk,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{keepDisk}{KeepDisk object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.delete.Rd b/sdk/R/man/keep_disks.delete.Rd
new file mode 100644 (file)
index 0000000..80f39f3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.delete}
+\alias{keep_disks.delete}
+\title{keep_disks.delete}
+\usage{
+arv$keep_disks.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.get.Rd b/sdk/R/man/keep_disks.get.Rd
new file mode 100644 (file)
index 0000000..1b511fe
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.get}
+\alias{keep_disks.get}
+\title{keep_disks.get}
+\usage{
+arv$keep_disks.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.list.Rd b/sdk/R/man/keep_disks.list.Rd
new file mode 100644 (file)
index 0000000..fdb599f
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.list}
+\alias{keep_disks.list}
+\title{keep_disks.list}
+\usage{
+arv$keep_disks.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+KeepDiskList object.
+}
+\description{
+keep_disks.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.ping.Rd b/sdk/R/man/keep_disks.ping.Rd
new file mode 100644 (file)
index 0000000..6ae5595
--- /dev/null
@@ -0,0 +1,31 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.ping}
+\alias{keep_disks.ping}
+\title{keep_disks.ping}
+\usage{
+arv$keep_disks.ping(uuid = NULL,
+       ping_secret, node_uuid = NULL, filesystem_uuid = NULL,
+       service_host = NULL, service_port, service_ssl_flag)
+}
+\arguments{
+\item{uuid}{}
+
+\item{ping_secret}{}
+
+\item{node_uuid}{}
+
+\item{filesystem_uuid}{}
+
+\item{service_host}{}
+
+\item{service_port}{}
+
+\item{service_ssl_flag}{}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.ping is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.update.Rd b/sdk/R/man/keep_disks.update.Rd
new file mode 100644 (file)
index 0000000..1ca3363
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.update}
+\alias{keep_disks.update}
+\title{keep_disks.update}
+\usage{
+arv$keep_disks.update(keepdisk,
+       uuid)
+}
+\arguments{
+\item{keepDisk}{KeepDisk object.}
+
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.accessible.Rd b/sdk/R/man/keep_services.accessible.Rd
new file mode 100644 (file)
index 0000000..3caae2f
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.accessible}
+\alias{keep_services.accessible}
+\title{keep_services.accessible}
+\usage{
+arv$keep_services.accessible(NULL)
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.accessible is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.create.Rd b/sdk/R/man/keep_services.create.Rd
new file mode 100644 (file)
index 0000000..59c43ab
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.create}
+\alias{keep_services.create}
+\title{keep_services.create}
+\usage{
+arv$keep_services.create(keepservice,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{keepService}{KeepService object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.delete.Rd b/sdk/R/man/keep_services.delete.Rd
new file mode 100644 (file)
index 0000000..726771e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.delete}
+\alias{keep_services.delete}
+\title{keep_services.delete}
+\usage{
+arv$keep_services.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.get.Rd b/sdk/R/man/keep_services.get.Rd
new file mode 100644 (file)
index 0000000..065cf84
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.get}
+\alias{keep_services.get}
+\title{keep_services.get}
+\usage{
+arv$keep_services.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.list.Rd b/sdk/R/man/keep_services.list.Rd
new file mode 100644 (file)
index 0000000..22aa3aa
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.list}
+\alias{keep_services.list}
+\title{keep_services.list}
+\usage{
+arv$keep_services.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+KeepServiceList object.
+}
+\description{
+keep_services.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.update.Rd b/sdk/R/man/keep_services.update.Rd
new file mode 100644 (file)
index 0000000..2680a5c
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.update}
+\alias{keep_services.update}
+\title{keep_services.update}
+\usage{
+arv$keep_services.update(keepservice,
+       uuid)
+}
+\arguments{
+\item{keepService}{KeepService object.}
+
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.create.Rd b/sdk/R/man/links.create.Rd
new file mode 100644 (file)
index 0000000..06b012e
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.create}
+\alias{links.create}
+\title{links.create}
+\usage{
+arv$links.create(link, ensure_unique_name = "false")
+}
+\arguments{
+\item{link}{Link object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Link object.
+}
+\description{
+links.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.delete.Rd b/sdk/R/man/links.delete.Rd
new file mode 100644 (file)
index 0000000..3a78b7f
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.delete}
+\alias{links.delete}
+\title{links.delete}
+\usage{
+arv$links.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.get.Rd b/sdk/R/man/links.get.Rd
new file mode 100644 (file)
index 0000000..bf26271
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.get}
+\alias{links.get}
+\title{links.get}
+\usage{
+arv$links.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.get_permissions.Rd b/sdk/R/man/links.get_permissions.Rd
new file mode 100644 (file)
index 0000000..982dbb9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.get_permissions}
+\alias{links.get_permissions}
+\title{links.get_permissions}
+\usage{
+arv$links.get_permissions(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Link object.
+}
+\description{
+links.get_permissions is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.list.Rd b/sdk/R/man/links.list.Rd
new file mode 100644 (file)
index 0000000..540fdc1
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.list}
+\alias{links.list}
+\title{links.list}
+\usage{
+arv$links.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+LinkList object.
+}
+\description{
+links.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.update.Rd b/sdk/R/man/links.update.Rd
new file mode 100644 (file)
index 0000000..398b6fd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.update}
+\alias{links.update}
+\title{links.update}
+\usage{
+arv$links.update(link, uuid)
+}
+\arguments{
+\item{link}{Link object.}
+
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/listAll.Rd b/sdk/R/man/listAll.Rd
new file mode 100644 (file)
index 0000000..2084b47
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/util.R
+\name{listAll}
+\alias{listAll}
+\title{listAll}
+\usage{
+listAll(fn, ...)
+}
+\arguments{
+\item{fn}{Arvados method used to retrieve items from REST service.}
+
+\item{...}{Optional arguments which will be pased to fn .}
+}
+\description{
+List all resources even if the number of items is greater than maximum API limit.
+}
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test\%"))
+}
+}
diff --git a/sdk/R/man/logs.create.Rd b/sdk/R/man/logs.create.Rd
new file mode 100644 (file)
index 0000000..a575e5f
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.create}
+\alias{logs.create}
+\title{logs.create}
+\usage{
+arv$logs.create(log, ensure_unique_name = "false")
+}
+\arguments{
+\item{log}{Log object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Log object.
+}
+\description{
+logs.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.delete.Rd b/sdk/R/man/logs.delete.Rd
new file mode 100644 (file)
index 0000000..63d6a0b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.delete}
+\alias{logs.delete}
+\title{logs.delete}
+\usage{
+arv$logs.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.get.Rd b/sdk/R/man/logs.get.Rd
new file mode 100644 (file)
index 0000000..d3053d1
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.get}
+\alias{logs.get}
+\title{logs.get}
+\usage{
+arv$logs.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.list.Rd b/sdk/R/man/logs.list.Rd
new file mode 100644 (file)
index 0000000..58dbdb7
--- /dev/null
@@ -0,0 +1,33 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.list}
+\alias{logs.list}
+\title{logs.list}
+\usage{
+arv$logs.list(filters = NULL, where = NULL,
+       order = NULL, select = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+LogList object.
+}
+\description{
+logs.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.update.Rd b/sdk/R/man/logs.update.Rd
new file mode 100644 (file)
index 0000000..efd670c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.update}
+\alias{logs.update}
+\title{logs.update}
+\usage{
+arv$logs.update(log, uuid)
+}
+\arguments{
+\item{log}{Log object.}
+
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.create.Rd b/sdk/R/man/nodes.create.Rd
new file mode 100644 (file)
index 0000000..eb73e69
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.create}
+\alias{nodes.create}
+\title{nodes.create}
+\usage{
+arv$nodes.create(node, ensure_unique_name = "false",
+       assign_slot = NULL)
+}
+\arguments{
+\item{node}{Node object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+
+\item{assign_slot}{assign slot and hostname}
+}
+\value{
+Node object.
+}
+\description{
+nodes.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.delete.Rd b/sdk/R/man/nodes.delete.Rd
new file mode 100644 (file)
index 0000000..0591ded
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.delete}
+\alias{nodes.delete}
+\title{nodes.delete}
+\usage{
+arv$nodes.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Node in question.}
+}
+\value{
+Node object.
+}
+\description{
+nodes.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.get.Rd b/sdk/R/man/nodes.get.Rd
new file mode 100644 (file)
index 0000000..dcd7b12
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.get}
+\alias{nodes.get}
+\title{nodes.get}
+\usage{
+arv$nodes.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Node in question.}
+}
+\value{
+Node object.
+}
+\description{
+nodes.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.list.Rd b/sdk/R/man/nodes.list.Rd
new file mode 100644 (file)
index 0000000..7ccfad6
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.list}
+\alias{nodes.list}
+\title{nodes.list}
+\usage{
+arv$nodes.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+NodeList object.
+}
+\description{
+nodes.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.ping.Rd b/sdk/R/man/nodes.ping.Rd
new file mode 100644 (file)
index 0000000..e77d2b5
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.ping}
+\alias{nodes.ping}
+\title{nodes.ping}
+\usage{
+arv$nodes.ping(uuid, ping_secret)
+}
+\arguments{
+\item{uuid}{}
+
+\item{ping_secret}{}
+}
+\value{
+Node object.
+}
+\description{
+nodes.ping is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.update.Rd b/sdk/R/man/nodes.update.Rd
new file mode 100644 (file)
index 0000000..f87245f
--- /dev/null
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.update}
+\alias{nodes.update}
+\title{nodes.update}
+\usage{
+arv$nodes.update(node, uuid, assign_slot = NULL)
+}
+\arguments{
+\item{node}{Node object.}
+
+\item{uuid}{The UUID of the Node in question.}
+
+\item{assign_slot}{assign slot and hostname}
+}
+\value{
+Node object.
+}
+\description{
+nodes.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.cancel.Rd b/sdk/R/man/pipeline_instances.cancel.Rd
new file mode 100644 (file)
index 0000000..026de81
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.cancel}
+\alias{pipeline_instances.cancel}
+\title{pipeline_instances.cancel}
+\usage{
+arv$pipeline_instances.cancel(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.cancel is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.create.Rd b/sdk/R/man/pipeline_instances.create.Rd
new file mode 100644 (file)
index 0000000..9ee5586
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.create}
+\alias{pipeline_instances.create}
+\title{pipeline_instances.create}
+\usage{
+arv$pipeline_instances.create(pipelineinstance,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{pipelineInstance}{PipelineInstance object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.delete.Rd b/sdk/R/man/pipeline_instances.delete.Rd
new file mode 100644 (file)
index 0000000..7297da5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.delete}
+\alias{pipeline_instances.delete}
+\title{pipeline_instances.delete}
+\usage{
+arv$pipeline_instances.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.get.Rd b/sdk/R/man/pipeline_instances.get.Rd
new file mode 100644 (file)
index 0000000..e500df5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.get}
+\alias{pipeline_instances.get}
+\title{pipeline_instances.get}
+\usage{
+arv$pipeline_instances.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.list.Rd b/sdk/R/man/pipeline_instances.list.Rd
new file mode 100644 (file)
index 0000000..407f944
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.list}
+\alias{pipeline_instances.list}
+\title{pipeline_instances.list}
+\usage{
+arv$pipeline_instances.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+PipelineInstanceList object.
+}
+\description{
+pipeline_instances.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.update.Rd b/sdk/R/man/pipeline_instances.update.Rd
new file mode 100644 (file)
index 0000000..4a66660
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.update}
+\alias{pipeline_instances.update}
+\title{pipeline_instances.update}
+\usage{
+arv$pipeline_instances.update(pipelineinstance,
+       uuid)
+}
+\arguments{
+\item{pipelineInstance}{PipelineInstance object.}
+
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.create.Rd b/sdk/R/man/pipeline_templates.create.Rd
new file mode 100644 (file)
index 0000000..afb1e58
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.create}
+\alias{pipeline_templates.create}
+\title{pipeline_templates.create}
+\usage{
+arv$pipeline_templates.create(pipelinetemplate,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{pipelineTemplate}{PipelineTemplate object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.delete.Rd b/sdk/R/man/pipeline_templates.delete.Rd
new file mode 100644 (file)
index 0000000..c74d88b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.delete}
+\alias{pipeline_templates.delete}
+\title{pipeline_templates.delete}
+\usage{
+arv$pipeline_templates.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.get.Rd b/sdk/R/man/pipeline_templates.get.Rd
new file mode 100644 (file)
index 0000000..48ef739
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.get}
+\alias{pipeline_templates.get}
+\title{pipeline_templates.get}
+\usage{
+arv$pipeline_templates.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.list.Rd b/sdk/R/man/pipeline_templates.list.Rd
new file mode 100644 (file)
index 0000000..c6c7413
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.list}
+\alias{pipeline_templates.list}
+\title{pipeline_templates.list}
+\usage{
+arv$pipeline_templates.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+PipelineTemplateList object.
+}
+\description{
+pipeline_templates.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.update.Rd b/sdk/R/man/pipeline_templates.update.Rd
new file mode 100644 (file)
index 0000000..25e02bf
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.update}
+\alias{pipeline_templates.update}
+\title{pipeline_templates.update}
+\usage{
+arv$pipeline_templates.update(pipelinetemplate,
+       uuid)
+}
+\arguments{
+\item{pipelineTemplate}{PipelineTemplate object.}
+
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/print.Arvados.Rd b/sdk/R/man/print.Arvados.Rd
deleted file mode 100644 (file)
index 0833d15..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{print.Arvados}
-\alias{print.Arvados}
-\title{print.Arvados}
-\usage{
-\method{print}{Arvados}(x, ...)
-}
-\arguments{
-\item{x}{Instance of Arvados class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for Arvados class
-}
diff --git a/sdk/R/man/projects.create.Rd b/sdk/R/man/projects.create.Rd
new file mode 100644 (file)
index 0000000..66b1f2a
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.create}
+\alias{projects.create}
+\title{project.create}
+\usage{
+arv$projects.create(group, ensure_unique_name = "false")
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Group object.
+}
+\description{
+projects.create wrapps groups.create method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/projects.delete.Rd b/sdk/R/man/projects.delete.Rd
new file mode 100644 (file)
index 0000000..7170792
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.delete}
+\alias{projects.delete}
+\title{project.delete}
+\usage{
+arv$project.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.delete is equivalent to groups.delete method.
+}
diff --git a/sdk/R/man/projects.get.Rd b/sdk/R/man/projects.get.Rd
new file mode 100644 (file)
index 0000000..1939378
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.get}
+\alias{projects.get}
+\title{project.get}
+\usage{
+arv$projects.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.get is equivalent to groups.get method.
+}
diff --git a/sdk/R/man/projects.list.Rd b/sdk/R/man/projects.list.Rd
new file mode 100644 (file)
index 0000000..ff4c1c9
--- /dev/null
@@ -0,0 +1,38 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.list}
+\alias{projects.list}
+\title{project.list}
+\usage{
+arv$projects.list(filters = NULL,
+       where = NULL, order = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact",
+       include_trash = NULL, uuid = NULL, recursive = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+
+\item{uuid}{}
+
+\item{recursive}{Include contents from child groups recursively.}
+}
+\value{
+Group object.
+}
+\description{
+projects.list wrapps groups.list method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/projects.update.Rd b/sdk/R/man/projects.update.Rd
new file mode 100644 (file)
index 0000000..824c5b5
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.update}
+\alias{projects.update}
+\title{project.update}
+\usage{
+arv$projects.update(group, uuid)
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.update wrapps groups.update method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/repositories.create.Rd b/sdk/R/man/repositories.create.Rd
new file mode 100644 (file)
index 0000000..1603604
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.create}
+\alias{repositories.create}
+\title{repositories.create}
+\usage{
+arv$repositories.create(repository,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{repository}{Repository object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.delete.Rd b/sdk/R/man/repositories.delete.Rd
new file mode 100644 (file)
index 0000000..36fac73
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.delete}
+\alias{repositories.delete}
+\title{repositories.delete}
+\usage{
+arv$repositories.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.get.Rd b/sdk/R/man/repositories.get.Rd
new file mode 100644 (file)
index 0000000..b855b76
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.get}
+\alias{repositories.get}
+\title{repositories.get}
+\usage{
+arv$repositories.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.get_all_permissions.Rd b/sdk/R/man/repositories.get_all_permissions.Rd
new file mode 100644 (file)
index 0000000..f16dbd1
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.get_all_permissions}
+\alias{repositories.get_all_permissions}
+\title{repositories.get_all_permissions}
+\usage{
+arv$repositories.get_all_permissions(NULL)
+}
+\value{
+Repository object.
+}
+\description{
+repositories.get_all_permissions is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.list.Rd b/sdk/R/man/repositories.list.Rd
new file mode 100644 (file)
index 0000000..d1f4772
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.list}
+\alias{repositories.list}
+\title{repositories.list}
+\usage{
+arv$repositories.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+RepositoryList object.
+}
+\description{
+repositories.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.update.Rd b/sdk/R/man/repositories.update.Rd
new file mode 100644 (file)
index 0000000..1be4b61
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.update}
+\alias{repositories.update}
+\title{repositories.update}
+\usage{
+arv$repositories.update(repository,
+       uuid)
+}
+\arguments{
+\item{repository}{Repository object.}
+
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.create.Rd b/sdk/R/man/specimens.create.Rd
new file mode 100644 (file)
index 0000000..12344f2
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.create}
+\alias{specimens.create}
+\title{specimens.create}
+\usage{
+arv$specimens.create(specimen,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{specimen}{Specimen object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.delete.Rd b/sdk/R/man/specimens.delete.Rd
new file mode 100644 (file)
index 0000000..8ed2d39
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.delete}
+\alias{specimens.delete}
+\title{specimens.delete}
+\usage{
+arv$specimens.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.get.Rd b/sdk/R/man/specimens.get.Rd
new file mode 100644 (file)
index 0000000..e757056
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.get}
+\alias{specimens.get}
+\title{specimens.get}
+\usage{
+arv$specimens.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.list.Rd b/sdk/R/man/specimens.list.Rd
new file mode 100644 (file)
index 0000000..4e07f4a
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.list}
+\alias{specimens.list}
+\title{specimens.list}
+\usage{
+arv$specimens.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+SpecimenList object.
+}
+\description{
+specimens.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.update.Rd b/sdk/R/man/specimens.update.Rd
new file mode 100644 (file)
index 0000000..73a9010
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.update}
+\alias{specimens.update}
+\title{specimens.update}
+\usage{
+arv$specimens.update(specimen,
+       uuid)
+}
+\arguments{
+\item{specimen}{Specimen object.}
+
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.create.Rd b/sdk/R/man/traits.create.Rd
new file mode 100644 (file)
index 0000000..bf6e0c1
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.create}
+\alias{traits.create}
+\title{traits.create}
+\usage{
+arv$traits.create(trait, ensure_unique_name = "false")
+}
+\arguments{
+\item{trait}{Trait object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.delete.Rd b/sdk/R/man/traits.delete.Rd
new file mode 100644 (file)
index 0000000..9ab9570
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.delete}
+\alias{traits.delete}
+\title{traits.delete}
+\usage{
+arv$traits.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.get.Rd b/sdk/R/man/traits.get.Rd
new file mode 100644 (file)
index 0000000..7d2bac5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.get}
+\alias{traits.get}
+\title{traits.get}
+\usage{
+arv$traits.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.list.Rd b/sdk/R/man/traits.list.Rd
new file mode 100644 (file)
index 0000000..e91b929
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.list}
+\alias{traits.list}
+\title{traits.list}
+\usage{
+arv$traits.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+TraitList object.
+}
+\description{
+traits.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.update.Rd b/sdk/R/man/traits.update.Rd
new file mode 100644 (file)
index 0000000..f594434
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.update}
+\alias{traits.update}
+\title{traits.update}
+\usage{
+arv$traits.update(trait, uuid)
+}
+\arguments{
+\item{trait}{Trait object.}
+
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.create.Rd b/sdk/R/man/user_agreements.create.Rd
new file mode 100644 (file)
index 0000000..7991305
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.create}
+\alias{user_agreements.create}
+\title{user_agreements.create}
+\usage{
+arv$user_agreements.create(useragreement,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{userAgreement}{UserAgreement object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.delete.Rd b/sdk/R/man/user_agreements.delete.Rd
new file mode 100644 (file)
index 0000000..30c9bf8
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.delete}
+\alias{user_agreements.delete}
+\title{user_agreements.delete}
+\usage{
+arv$user_agreements.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.get.Rd b/sdk/R/man/user_agreements.get.Rd
new file mode 100644 (file)
index 0000000..6311605
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.get}
+\alias{user_agreements.get}
+\title{user_agreements.get}
+\usage{
+arv$user_agreements.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.list.Rd b/sdk/R/man/user_agreements.list.Rd
new file mode 100644 (file)
index 0000000..5e69861
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.list}
+\alias{user_agreements.list}
+\title{user_agreements.list}
+\usage{
+arv$user_agreements.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+UserAgreementList object.
+}
+\description{
+user_agreements.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.new.Rd b/sdk/R/man/user_agreements.new.Rd
new file mode 100644 (file)
index 0000000..c213cb4
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.new}
+\alias{user_agreements.new}
+\title{user_agreements.new}
+\usage{
+arv$user_agreements.new(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.new is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.sign.Rd b/sdk/R/man/user_agreements.sign.Rd
new file mode 100644 (file)
index 0000000..9811610
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.sign}
+\alias{user_agreements.sign}
+\title{user_agreements.sign}
+\usage{
+arv$user_agreements.sign(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.sign is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.signatures.Rd b/sdk/R/man/user_agreements.signatures.Rd
new file mode 100644 (file)
index 0000000..d889579
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.signatures}
+\alias{user_agreements.signatures}
+\title{user_agreements.signatures}
+\usage{
+arv$user_agreements.signatures(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.signatures is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.update.Rd b/sdk/R/man/user_agreements.update.Rd
new file mode 100644 (file)
index 0000000..578e179
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.update}
+\alias{user_agreements.update}
+\title{user_agreements.update}
+\usage{
+arv$user_agreements.update(useragreement,
+       uuid)
+}
+\arguments{
+\item{userAgreement}{UserAgreement object.}
+
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.activate.Rd b/sdk/R/man/users.activate.Rd
new file mode 100644 (file)
index 0000000..201caf4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.activate}
+\alias{users.activate}
+\title{users.activate}
+\usage{
+arv$users.activate(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.activate is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.create.Rd b/sdk/R/man/users.create.Rd
new file mode 100644 (file)
index 0000000..1805c66
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.create}
+\alias{users.create}
+\title{users.create}
+\usage{
+arv$users.create(user, ensure_unique_name = "false")
+}
+\arguments{
+\item{user}{User object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+User object.
+}
+\description{
+users.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.current.Rd b/sdk/R/man/users.current.Rd
new file mode 100644 (file)
index 0000000..4e8af94
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.current}
+\alias{users.current}
+\title{users.current}
+\usage{
+arv$users.current(NULL)
+}
+\value{
+User object.
+}
+\description{
+users.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.delete.Rd b/sdk/R/man/users.delete.Rd
new file mode 100644 (file)
index 0000000..df9e238
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.delete}
+\alias{users.delete}
+\title{users.delete}
+\usage{
+arv$users.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.get.Rd b/sdk/R/man/users.get.Rd
new file mode 100644 (file)
index 0000000..ec2b284
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.get}
+\alias{users.get}
+\title{users.get}
+\usage{
+arv$users.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.list.Rd b/sdk/R/man/users.list.Rd
new file mode 100644 (file)
index 0000000..7131943
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.list}
+\alias{users.list}
+\title{users.list}
+\usage{
+arv$users.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+UserList object.
+}
+\description{
+users.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.setup.Rd b/sdk/R/man/users.setup.Rd
new file mode 100644 (file)
index 0000000..869403d
--- /dev/null
@@ -0,0 +1,26 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.setup}
+\alias{users.setup}
+\title{users.setup}
+\usage{
+arv$users.setup(user = NULL, openid_prefix = NULL,
+       repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+}
+\arguments{
+\item{user}{}
+
+\item{openid_prefix}{}
+
+\item{repo_name}{}
+
+\item{vm_uuid}{}
+
+\item{send_notification_email}{}
+}
+\value{
+User object.
+}
+\description{
+users.setup is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.system.Rd b/sdk/R/man/users.system.Rd
new file mode 100644 (file)
index 0000000..c321c23
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.system}
+\alias{users.system}
+\title{users.system}
+\usage{
+arv$users.system(NULL)
+}
+\value{
+User object.
+}
+\description{
+users.system is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.unsetup.Rd b/sdk/R/man/users.unsetup.Rd
new file mode 100644 (file)
index 0000000..85de6f9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.unsetup}
+\alias{users.unsetup}
+\title{users.unsetup}
+\usage{
+arv$users.unsetup(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.unsetup is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.update.Rd b/sdk/R/man/users.update.Rd
new file mode 100644 (file)
index 0000000..fcd9c71
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.update}
+\alias{users.update}
+\title{users.update}
+\usage{
+arv$users.update(user, uuid)
+}
+\arguments{
+\item{user}{User object.}
+
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.update_uuid.Rd b/sdk/R/man/users.update_uuid.Rd
new file mode 100644 (file)
index 0000000..af62c2c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.update_uuid}
+\alias{users.update_uuid}
+\title{users.update_uuid}
+\usage{
+arv$users.update_uuid(uuid, new_uuid)
+}
+\arguments{
+\item{uuid}{}
+
+\item{new_uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.update_uuid is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.create.Rd b/sdk/R/man/virtual_machines.create.Rd
new file mode 100644 (file)
index 0000000..689a0f9
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.create}
+\alias{virtual_machines.create}
+\title{virtual_machines.create}
+\usage{
+arv$virtual_machines.create(virtualmachine,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{virtualMachine}{VirtualMachine object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.delete.Rd b/sdk/R/man/virtual_machines.delete.Rd
new file mode 100644 (file)
index 0000000..c513833
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.delete}
+\alias{virtual_machines.delete}
+\title{virtual_machines.delete}
+\usage{
+arv$virtual_machines.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.get.Rd b/sdk/R/man/virtual_machines.get.Rd
new file mode 100644 (file)
index 0000000..3e56e17
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.get}
+\alias{virtual_machines.get}
+\title{virtual_machines.get}
+\usage{
+arv$virtual_machines.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.get_all_logins.Rd b/sdk/R/man/virtual_machines.get_all_logins.Rd
new file mode 100644 (file)
index 0000000..b2af1e4
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.get_all_logins}
+\alias{virtual_machines.get_all_logins}
+\title{virtual_machines.get_all_logins}
+\usage{
+arv$virtual_machines.get_all_logins(NULL)
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.get_all_logins is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.list.Rd b/sdk/R/man/virtual_machines.list.Rd
new file mode 100644 (file)
index 0000000..42ed58b
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.list}
+\alias{virtual_machines.list}
+\title{virtual_machines.list}
+\usage{
+arv$virtual_machines.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+VirtualMachineList object.
+}
+\description{
+virtual_machines.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.logins.Rd b/sdk/R/man/virtual_machines.logins.Rd
new file mode 100644 (file)
index 0000000..7e25110
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.logins}
+\alias{virtual_machines.logins}
+\title{virtual_machines.logins}
+\usage{
+arv$virtual_machines.logins(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.logins is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.update.Rd b/sdk/R/man/virtual_machines.update.Rd
new file mode 100644 (file)
index 0000000..d1a07eb
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.update}
+\alias{virtual_machines.update}
+\title{virtual_machines.update}
+\usage{
+arv$virtual_machines.update(virtualmachine,
+       uuid)
+}
+\arguments{
+\item{virtualMachine}{VirtualMachine object.}
+
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.create.Rd b/sdk/R/man/workflows.create.Rd
new file mode 100644 (file)
index 0000000..8a84e00
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.create}
+\alias{workflows.create}
+\title{workflows.create}
+\usage{
+arv$workflows.create(workflow,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{workflow}{Workflow object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.delete.Rd b/sdk/R/man/workflows.delete.Rd
new file mode 100644 (file)
index 0000000..96a561e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.delete}
+\alias{workflows.delete}
+\title{workflows.delete}
+\usage{
+arv$workflows.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.get.Rd b/sdk/R/man/workflows.get.Rd
new file mode 100644 (file)
index 0000000..8a8c3a8
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.get}
+\alias{workflows.get}
+\title{workflows.get}
+\usage{
+arv$workflows.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.list.Rd b/sdk/R/man/workflows.list.Rd
new file mode 100644 (file)
index 0000000..e24b74d
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.list}
+\alias{workflows.list}
+\title{workflows.list}
+\usage{
+arv$workflows.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+WorkflowList object.
+}
+\description{
+workflows.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.update.Rd b/sdk/R/man/workflows.update.Rd
new file mode 100644 (file)
index 0000000..d3f6186
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.update}
+\alias{workflows.update}
+\title{workflows.update}
+\usage{
+arv$workflows.update(workflow,
+       uuid)
+}
+\arguments{
+\item{workflow}{Workflow object.}
+
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.update is a method defined in Arvados class.
+}
index 1f8931d917969115a382b61b2ac378e47e665764..156dde1080c5040373d55633ff8a689a8867484a 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 results <- devtools::test()
 any_error <- any(as.data.frame(results)$error)
 if (any_error) {
index 18ef411fd644144ac34ba76203f8bc6d8f793f17..9ca4f86fb67d76b3a0abc0b16734788e6fff882b 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 library(testthat)
 library(ArvadosR)
 
index 5886ff761f6d0b3b586397ac36562f9aa385eb0d..4fcfd6c67e53f12c8bbd9908d750b1a52c756e07 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 FakeArvados <- R6::R6Class(
 
     "FakeArvados",
index 865234d83552db7965f1d4077085a9f83c65ec4e..c97572c193f1eadbd315928fb09d56aff5e2d7a2 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 FakeHttpParser <- R6::R6Class(
 
     "FakeHttrParser",
index 533602886ab09e0d34a49e2829acaf73a9051baa..2633abdf2c745bf0e4c9afcee1b73b7c5751fbeb 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 FakeHttpRequest <- R6::R6Class(
 
     "FakeHttpRequest",
@@ -56,8 +60,8 @@ FakeHttpRequest <- R6::R6Class(
             self$serverMaxElementsPerRequest <- 5
         },
 
-        execute = function(verb, url, headers = NULL, body = NULL, query = NULL,
-                           limit = NULL, offset = NULL, retryTimes = 0)
+        exec = function(verb, url, headers = NULL, body = NULL, query = NULL,
+                        limit = NULL, offset = NULL, retryTimes = 0)
         {
             private$validateURL(url)
             private$validateHeaders(headers)
index d370e87fbe7e3ca581e4a36ac3a2a149989f18e1..08e8717de5e4b97b5776c2c6cc8893c523f4c133 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 FakeRESTService <- R6::R6Class(
 
     "FakeRESTService",
diff --git a/sdk/R/tests/testthat/test-Arvados.R b/sdk/R/tests/testthat/test-Arvados.R
deleted file mode 100644 (file)
index 25cf88f..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-context("Arvados API")
-
-source("fakes/FakeRESTService.R")
-
-test_that("Constructor will use environment variables if no parameters are passed to it", {
-
-    Sys.setenv(ARVADOS_API_HOST  = "environment_api_host")
-    Sys.setenv(ARVADOS_API_TOKEN = "environment_api_token")
-
-    arv <- Arvados$new()
-
-    Sys.unsetenv("ARVADOS_API_HOST")
-    Sys.unsetenv("ARVADOS_API_TOKEN")
-
-    expect_that("https://environment_api_host/arvados/v1/",
-                equals(arv$getHostName())) 
-
-    expect_that("environment_api_token",
-                equals(arv$getToken())) 
-}) 
-
-test_that("Constructor preferes constructor fields over environment variables", {
-
-    Sys.setenv(ARVADOS_API_HOST  = "environment_api_host")
-    Sys.setenv(ARVADOS_API_TOKEN = "environment_api_token")
-
-    arv <- Arvados$new("constructor_api_token", "constructor_api_host")
-
-    Sys.unsetenv("ARVADOS_API_HOST")
-    Sys.unsetenv("ARVADOS_API_TOKEN")
-
-    expect_that("https://constructor_api_host/arvados/v1/",
-                equals(arv$getHostName())) 
-
-    expect_that("constructor_api_token",
-                equals(arv$getToken())) 
-}) 
-
-test_that("Constructor raises exception if fields and environment variables are not provided", {
-
-    expect_that(Arvados$new(),
-                throws_error(paste0("Please provide host name and authentification token",
-                                    " or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
-                                    " environment variables.")))
-}) 
-
-test_that("getCollection delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$getCollection(collectionUUID)
-
-    expect_that(fakeREST$getResourceCallCount, equals(1))
-}) 
-
-test_that("listCollections delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-
-    arv$listCollections()
-
-    expect_that(fakeREST$listResourcesCallCount, equals(1))
-}) 
-
-test_that("listCollections filter paramerter must be named 'collection'", {
-
-    filters <- list(list("name", "like", "MyCollection"))
-    names(filters) <- c("collection")
-    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$listCollections(list(list("name", "like", "MyCollection")))
-
-    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
-}) 
-
-test_that("listAllCollections delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-
-    arv$listAllCollections()
-
-    expect_that(fakeREST$fetchAllItemsCallCount, equals(1))
-}) 
-
-test_that("listAllCollections filter paramerter must be named 'collection'", {
-
-    filters <- list(list("name", "like", "MyCollection"))
-    names(filters) <- c("collection")
-    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$listAllCollections(list(list("name", "like", "MyCollection")))
-
-    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
-}) 
-
-test_that("deleteCollection delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$deleteCollection(collectionUUID)
-
-    expect_that(fakeREST$deleteResourceCallCount, equals(1))
-}) 
-
-test_that("updateCollection delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    newCollectionContent <- list(newName = "Brand new shiny name")
-    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$updateCollection(collectionUUID, newCollectionContent)
-
-    expect_that(fakeREST$updateResourceCallCount, equals(1))
-}) 
-
-test_that("updateCollection adds content to request parameter named 'collection'", {
-
-    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    body <- list(list())
-    names(body) <- c("collection")
-    body$collection <- list(name = "MyCollection", desc = "No description")
-    fakeREST <- FakeRESTService$new(returnContent = body)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$updateCollection(collectionUUID, 
-                         list(name = "MyCollection", desc = "No description"))
-
-    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
-}) 
-
-test_that("createCollection delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    collectionContent <- list(newName = "Brand new shiny name")
-
-    arv$createCollection(collectionContent)
-
-    expect_that(fakeREST$createResourceCallCount, equals(1))
-}) 
-
-test_that("createCollection adds content to request parameter named 'collection'", {
-
-    body <- list(list())
-    names(body) <- c("collection")
-    body$collection <- list(name = "MyCollection", desc = "No description")
-    fakeREST <- FakeRESTService$new(returnContent = body)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$createCollection(list(name = "MyCollection", desc = "No description"))
-
-    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
-}) 
-
-test_that("getProject delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$getCollection(projectUUID)
-
-    expect_that(fakeREST$getResourceCallCount, equals(1))
-}) 
-
-test_that("listProjects delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-
-    arv$listCollections()
-
-    expect_that(fakeREST$listResourcesCallCount, equals(1))
-}) 
-
-test_that("listProjects filter contains additional 'group_class' field by default", {
-
-    filters <- list(list("name", "like", "MyProject"))
-    names(filters) <- c("groups")
-    filters[[length(filters) + 1]] <- list("group_class", "=", "project")
-
-    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$listProjects(list(list("name", "like", "MyProject")))
-
-    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
-}) 
-
-test_that("listAllProjects delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-
-    arv$listAllProjects()
-
-    expect_that(fakeREST$fetchAllItemsCallCount, equals(1))
-}) 
-
-test_that("listAllProjects filter contains additional 'group_class' field by default", {
-
-    filters <- list(list("name", "like", "MyProject"))
-    names(filters) <- c("groups")
-    filters[[length(filters) + 1]] <- list("group_class", "=", "project")
-
-    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$listAllProjects(list(list("name", "like", "MyProject")))
-
-    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
-}) 
-
-test_that("deleteProject delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$deleteCollection(projectUUID)
-
-    expect_that(fakeREST$deleteResourceCallCount, equals(1))
-}) 
-
-test_that("updateProject delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    newProjectContent <- list(newName = "Brand new shiny name")
-    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-
-    arv$updateCollection(projectUUID, newProjectContent)
-
-    expect_that(fakeREST$updateResourceCallCount, equals(1))
-}) 
-
-test_that("updateProject adds content to request parameter named 'group'", {
-
-    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    body <- list(list())
-    names(body) <- c("group")
-    body$group <- list(name = "MyProject", desc = "No description")
-
-    fakeREST <- FakeRESTService$new(returnContent = body)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$updateProject(projectUUID,
-                      list(name = "MyProject", desc = "No description"))
-
-    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
-}) 
-
-test_that("createProject delegates operation to RESTService class", {
-
-    arv <- Arvados$new("token", "hostName")
-    fakeREST <- FakeRESTService$new()
-    arv$setRESTService(fakeREST)
-    projectContent <- list(newName = "Brand new shiny name")
-
-    arv$createCollection(projectContent)
-
-    expect_that(fakeREST$createResourceCallCount, equals(1))
-}) 
-
-test_that("createProject request body contains 'goup_class' filed", {
-
-    body <- list(list())
-    names(body) <- c("group")
-    body$group <- c("group_class" = "project",
-                    list(name = "MyProject", desc = "No description"))
-
-    fakeREST <- FakeRESTService$new(returnContent = body)
-    arv <- Arvados$new("token", "hostName")
-    arv$setRESTService(fakeREST)
-
-    arv$createProject(list(name = "MyProject", desc = "No description"))
-
-    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
-}) 
index 90cc1499d674df18f075876c4e2ba0bf028f06e7..fb14888aab91b982d88dbdddca0be9589f757fb8 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("fakes/FakeRESTService.R")
 
 context("ArvadosFile")
@@ -268,4 +272,4 @@ test_that("move moves arvados file inside collection tree", {
 
     expect_that(dogIsNullOnOldLocation, is_true())
     expect_that(dogExistsOnNewLocation, is_true())
-}) 
+})
index ec00ca3c66dbcc66d875abee3d810a9ba06b9cd9..c3c70910e4c63acea6d86f5df71cc9bab9f3e72f 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("fakes/FakeRESTService.R")
 
 context("Collection")
index 42a54bf69422a31235768488ff2839716011d25d..5c8a40526988bb562c45b5702fd921a743f0a77c 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 context("CollectionTree")
 
 test_that("constructor creates file tree from character array properly", {
index b2862128c261f9cf8b8634ebcc384fe3113a286d..a119d88bf82fa226e26d5127f3ae001d1b515a2e 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 context("Http Parser")
 
 
index 66ab9af19636a33f96e257eebbdf3fa0813add72..5ad8aa03115207035ee7f369ded5fbcd597e0ba7 100644 (file)
@@ -1,28 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 context("Http Request")
 
 
 test_that("execyte raises exception if http verb is not valid", {
 
     http <- HttpRequest$new()
-    expect_that(http$execute("FAKE VERB", "url"),
+    expect_that(http$exec("FAKE VERB", "url"),
                throws_error("Http verb is not valid."))
 }) 
 
-test_that(paste("createQuery generates and encodes query portion of http",
-                "request based on filters, limit and offset parameters"), {
+test_that("createQuery generates and encodes query portion of http", {
 
     http <- HttpRequest$new()
-    filters <- list(list("color", "=", "red"))
-    limit <- 20
-    offset <- 50
-    expect_that(http$createQuery(filters, limit, offset),
+    queryParams <- list()
+    queryParams$filters <- list(list("color", "=", "red"))
+    queryParams$limit <- 20
+    queryParams$offset <- 50
+    expect_that(http$createQuery(queryParams),
                 equals(paste0("/?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red",
                               "%22%5D%5D&limit=20&offset=50")))
 }) 
 
-test_that(paste("createQuery generates and empty string",
-                "when filters, limit and offset parameters are set to NULL"), {
+test_that("createQuery generates and empty string when queryParams is an empty list", {
 
     http <- HttpRequest$new()
-    expect_that(http$createQuery(NULL, NULL, NULL), equals(""))
+    expect_that(http$createQuery(list()), equals(""))
 }) 
index d4f3c2c1c0b809f48fa983cf55d504e6da42eadf..859b6180f3380c2d834b99e126aa0c7761155368 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("fakes/FakeArvados.R")
 source("fakes/FakeHttpRequest.R")
 source("fakes/FakeHttpParser.R")
@@ -31,264 +35,6 @@ test_that("getWebDavHostName returns webDAV host name properly", {
     expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName())) 
 }) 
 
-test_that("getResource calls REST service properly", {
-
-    serverResponse <- NULL
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
-
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, FakeHttpParser$new(),
-                            0, "webDavHost")
-
-    REST$getResource("collections", resourceUUID)
-
-    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
-    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
-    expect_that(httpRequest$numberOfGETRequests, equals(1))
-}) 
-
-test_that("getResource parses server response", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    httpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(), httpParser,
-                            0, "webDavHost")
-
-    REST$getResource("collections", resourceUUID)
-
-    expect_that(httpParser$parserCallCount, equals(1))
-}) 
-
-test_that("getResource raises exception if response contains errors field", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    serverResponse <- list(errors = 404)
-
-    REST <- RESTService$new("token", "host",
-                            FakeHttpRequest$new(NULL, serverResponse),
-                            FakeHttpParser$new(),
-                            0, "webDavHost")
-    
-    expect_that(REST$getResource("collections", resourceUUID), throws_error("404", fixed = TRUE))
-}) 
-
-test_that("listResources calls REST service properly", {
-
-    serverResponse <- NULL
-    expectedURL    <- paste0("https://host/arvados/v1/collections")
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, FakeHttpParser$new(),
-                            0, "webDavHost")
-
-    REST$listResources("collections")
-
-    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
-    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
-    expect_that(httpRequest$numberOfGETRequests, equals(1))
-}) 
-
-test_that("listResources parses server response", {
-
-    httpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(), httpParser,
-                            0, "webDavHost")
-
-    REST$listResources("collections")
-
-    expect_that(httpParser$parserCallCount, equals(1))
-}) 
-
-test_that("listResources raises exception if response contains errors field", {
-
-    serverResponse <- list(errors = 404)
-
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(NULL, serverResponse),
-                            FakeHttpParser$new(),
-                            0, "webDavHost")
-    
-    expect_that(REST$listResources("collections"), throws_error("404", fixed = TRUE))
-}) 
-
-test_that("fetchAllItems always returns all resource items from server", {
-
-    expectedURL <- NULL
-    serverResponse <- list(items_available = 8,
-                           items = list("collection1",
-                                        "collection2",
-                                        "collection3",
-                                        "collection4",
-                                        "collection5",
-                                        "collection6",
-                                        "collection7",
-                                        "collection8"))
-
-    httpParser <- FakeHttpParser$new()
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-    httpRequest$serverMaxElementsPerRequest <- 3
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, httpParser,
-                            0, "webDavHost")
-
-    result <- REST$fetchAllItems(NULL, NULL)
-
-    expect_that(length(result), equals(8))
-    expect_that(httpRequest$numberOfGETRequests, equals(3))
-    expect_that(httpParser$parserCallCount, equals(3))
-}) 
-
-test_that("deleteResource calls REST service properly", {
-
-    serverResponse <- NULL
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
-
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, FakeHttpParser$new(),
-                            0, "webDavHost")
-
-    REST$deleteResource("collections", resourceUUID)
-
-    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
-    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
-    expect_that(httpRequest$numberOfDELETERequests, equals(1))
-}) 
-
-test_that("deleteCollection parses server response", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    httpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(), httpParser,
-                            0, "webDavHost")
-
-    REST$deleteResource("collections", resourceUUID)
-
-    expect_that(httpParser$parserCallCount, equals(1))
-}) 
-
-test_that("deleteCollection raises exception if response contains errors field", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    serverResponse <- list(errors = 404)
-
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(NULL, serverResponse),
-                            FakeHttpParser$new(),
-                            0, "webDavHost")
-    
-    expect_that(REST$deleteResource("collections", resourceUUID), throws_error("404", fixed = TRUE))
-}) 
-
-test_that("updateResource calls REST service properly", {
-
-    serverResponse <- NULL
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
-    newResourceContent <- list(newName = "Brand new shiny name")
-
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, FakeHttpParser$new(),
-                            0, "webDavHost")
-
-    REST$updateResource("collections", resourceUUID, newResourceContent)
-
-    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
-    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
-    expect_that(httpRequest$JSONEncodedBodyIsProvided, is_true())
-    expect_that(httpRequest$numberOfPUTRequests, equals(1))
-}) 
-
-test_that("updateResource parses server response", {
-
-    newResourceContent <- list(newName = "Brand new shiny name")
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    httpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(), httpParser,
-                            0, "webDavHost")
-
-    REST$updateResource("collections", resourceUUID, newResourceContent)
-
-    expect_that(httpParser$parserCallCount, equals(1))
-}) 
-
-test_that("updateResource raises exception if response contains errors field", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    serverResponse <- list(errors = 404)
-    newResourceContent <- list(newName = "Brand new shiny name")
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(NULL, serverResponse),
-                            FakeHttpParser$new(),
-                            0, "webDavHost")
-    
-    expect_that(REST$updateResource("collections", resourceUUID, newResourceContent),
-                throws_error("404", fixed = TRUE))
-}) 
-
-test_that("createResource calls REST service properly", {
-
-    resourceContent <- list(name = "My favorite collection")
-    serverResponse <- NULL
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    expectedURL <- "https://host/arvados/v1/collections"
-    newResourceContent <- list(newName = "Brand new shiny name")
-
-    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
-
-    REST <- RESTService$new("token", "host", 
-                            httpRequest, FakeHttpParser$new(),
-                            0, "webDavHost")
-
-    REST$createResource("collections", resourceContent)
-
-    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
-    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
-    expect_that(httpRequest$JSONEncodedBodyIsProvided, is_true())
-    expect_that(httpRequest$numberOfPOSTRequests, equals(1))
-}) 
-
-test_that("createResource parses server response", {
-
-    resourceContent <- list(newName = "Brand new shiny name")
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    httpParser <- FakeHttpParser$new()
-    REST <- RESTService$new("token", "host",
-                            FakeHttpRequest$new(), httpParser,
-                            0, "webDavHost")
-
-    REST$createResource("collections", resourceContent)
-
-    expect_that(httpParser$parserCallCount, equals(1))
-}) 
-
-test_that("createResource raises exception if response contains errors field", {
-
-    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
-    serverResponse <- list(errors = 404)
-    resourceContent <- list(newName = "Brand new shiny name")
-    REST <- RESTService$new("token", "host", 
-                            FakeHttpRequest$new(NULL, serverResponse),
-                            FakeHttpParser$new(),
-                            0, "webDavHost")
-    
-    expect_that(REST$createResource("collections", resourceContent),
-                throws_error("404", fixed = TRUE))
-}) 
-
 test_that("create calls REST service properly", {
 
     uuid <- "aaaaa-j7d0g-ccccccccccccccc"
index 1b141e10514c91e3db63ad0368989141137726ff..e025586c58a968f6c0d61a47512087a69d601635 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 source("fakes/FakeRESTService.R")
 
 context("Subcollection")
@@ -353,4 +357,4 @@ test_that(paste("getSizeInBytes delegates size calculation",
     resourceSize <- animal$getSizeInBytes()
 
     expect_that(resourceSize, equals(100))
-}) 
+})
index 62065f8e88f106b3bf988c418f242d57176cf5fa..9f5e07c1767af6c089274a308dc3dc270fb25c2f 100644 (file)
@@ -1,5 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 context("Utility function")
 
+test_that("listAll always returns all resource items from server", {
+
+    serverResponseLimit <- 3
+    itemsAvailable <- 8
+    items <- list("collection1", "collection2", "collection3", "collection4",
+                  "collection5", "collection6", "collection7", "collection8")
+
+    testFunction <- function(offset, ...)
+    {
+        response <- list()
+        response$items_available <- itemsAvailable
+
+        maxIndex <- offset + serverResponseLimit
+        lastElementIndex <- if(maxIndex < itemsAvailable) maxIndex else itemsAvailable
+
+        response$items <- items[(offset + 1):lastElementIndex]
+        response
+    }
+
+    result <- listAll(testFunction)
+
+    expect_that(length(result), equals(8))
+}) 
+
 test_that("trimFromStart trims string correctly if string starts with trimCharacters", {
 
     sample <- "./something/random"
index 93968e2b94b7f9486f83ad0f9ca3b997e8294f3c..fd48b4852df4f1223eb7ce6fc125fc2234e78f6d 100644 (file)
@@ -7,12 +7,14 @@ if not File.exist?('/usr/bin/git') then
   exit
 end
 
+git_latest_tag = `git describe --abbrev=0`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
 git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
 git_timestamp = Time.at(git_timestamp.to_i).utc
 
 Gem::Specification.new do |s|
   s.name        = 'arvados-cli'
-  s.version     = "0.1.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
   s.date        = git_timestamp.strftime("%Y-%m-%d")
   s.summary     = "Arvados CLI tools"
   s.description = "Arvados command line tools, git commit #{git_hash}"
index a0b71723c13163011f9d153454de0f9fb0528bee..131795ee2c0173703a7385c8676d11536ff17398 100644 (file)
@@ -17,6 +17,10 @@ import json
 import re
 from functools import partial
 import pkg_resources  # part of setuptools
+import Queue
+import time
+import signal
+import thread
 
 from cwltool.errors import WorkflowException
 import cwltool.main
@@ -24,26 +28,31 @@ import cwltool.workflow
 import cwltool.process
 from schema_salad.sourceline import SourceLine
 import schema_salad.validate as validate
+import cwltool.argparser
 
 import arvados
 import arvados.config
 from arvados.keep import KeepClient
 from arvados.errors import ApiError
+import arvados.commands._util as arv_cmd
 
 from .arvcontainer import ArvadosContainer, RunnerContainer
 from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
-from. runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, upload_dependencies
+from. runner import Runner, upload_docker, upload_job_order, upload_workflow_deps
 from .arvtool import ArvadosCommandTool
 from .arvworkflow import ArvadosWorkflow, upload_workflow
 from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache
 from .perf import Perf
 from .pathmapper import NoFollowPathMapper
+from .task_queue import TaskQueue
+from .context import ArvLoadingContext, ArvRuntimeContext
 from ._version import __version__
 
 from cwltool.pack import pack
 from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, get_listing
 from cwltool.command_line_tool import compute_checksums
+
 from arvados.api import OrderedJsonModel
 
 logger = logging.getLogger('arvados.cwl-runner')
@@ -62,26 +71,39 @@ class ArvCwlRunner(object):
 
     """
 
-    def __init__(self, api_client, work_api=None, keep_client=None, output_name=None, output_tags=None, num_retries=4):
+    def __init__(self, api_client,
+                 arvargs=None,
+                 keep_client=None,
+                 num_retries=4,
+                 thread_count=4):
+
+        if arvargs is None:
+            arvargs = argparse.Namespace()
+            arvargs.work_api = None
+            arvargs.output_name = None
+            arvargs.output_tags = None
+            arvargs.thread_count = 1
+
         self.api = api_client
         self.processes = {}
-        self.lock = threading.Lock()
-        self.cond = threading.Condition(self.lock)
+        self.workflow_eval_lock = threading.Condition(threading.RLock())
         self.final_output = None
         self.final_status = None
-        self.uploaded = {}
         self.num_retries = num_retries
         self.uuid = None
         self.stop_polling = threading.Event()
         self.poll_api = None
         self.pipeline = None
         self.final_output_collection = None
-        self.output_name = output_name
-        self.output_tags = output_tags
+        self.output_name = arvargs.output_name
+        self.output_tags = arvargs.output_tags
         self.project_uuid = None
         self.intermediate_output_ttl = 0
         self.intermediate_output_collections = []
         self.trash_intermediate = False
+        self.thread_count = arvargs.thread_count
+        self.poll_interval = 12
+        self.loadingContext = None
 
         if keep_client is not None:
             self.keep_client = keep_client
@@ -90,73 +112,94 @@ class ArvCwlRunner(object):
 
         self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries)
 
+        self.fetcher_constructor = partial(CollectionFetcher,
+                                           api_client=self.api,
+                                           fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
+                                           num_retries=self.num_retries)
+
         self.work_api = None
         expected_api = ["jobs", "containers"]
         for api in expected_api:
             try:
                 methods = self.api._rootDesc.get('resources')[api]['methods']
                 if ('httpMethod' in methods['create'] and
-                    (work_api == api or work_api is None)):
+                    (arvargs.work_api == api or arvargs.work_api is None)):
                     self.work_api = api
                     break
             except KeyError:
                 pass
 
         if not self.work_api:
-            if work_api is None:
+            if arvargs.work_api is None:
                 raise Exception("No supported APIs")
             else:
                 raise Exception("Unsupported API '%s', expected one of %s" % (work_api, expected_api))
 
-    def arv_make_tool(self, toolpath_object, **kwargs):
-        kwargs["work_api"] = self.work_api
-        kwargs["fetcher_constructor"] = partial(CollectionFetcher,
-                                                api_client=self.api,
-                                                fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
-                                                num_retries=self.num_retries)
-        kwargs["resolver"] = partial(collectionResolver, self.api, num_retries=self.num_retries)
+        self.loadingContext = ArvLoadingContext(vars(arvargs))
+        self.loadingContext.fetcher_constructor = self.fetcher_constructor
+        self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
+        self.loadingContext.construct_tool_object = self.arv_make_tool
+
+
+    def arv_make_tool(self, toolpath_object, loadingContext):
         if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
-            return ArvadosCommandTool(self, toolpath_object, **kwargs)
+            return ArvadosCommandTool(self, toolpath_object, loadingContext)
         elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
-            return ArvadosWorkflow(self, toolpath_object, **kwargs)
+            return ArvadosWorkflow(self, toolpath_object, loadingContext)
         else:
-            return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
+            return cwltool.workflow.default_make_tool(toolpath_object, loadingContext)
 
     def output_callback(self, out, processStatus):
-        if processStatus == "success":
-            logger.info("Overall process status is %s", processStatus)
-            if self.pipeline:
-                self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
-                                                     body={"state": "Complete"}).execute(num_retries=self.num_retries)
-        else:
-            logger.warn("Overall process status is %s", processStatus)
-            if self.pipeline:
-                self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
-                                                     body={"state": "Failed"}).execute(num_retries=self.num_retries)
-        self.final_status = processStatus
-        self.final_output = out
+        with self.workflow_eval_lock:
+            if processStatus == "success":
+                logger.info("Overall process status is %s", processStatus)
+                if self.pipeline:
+                    self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
+                                                         body={"state": "Complete"}).execute(num_retries=self.num_retries)
+            else:
+                logger.error("Overall process status is %s", processStatus)
+                if self.pipeline:
+                    self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
+                                                         body={"state": "Failed"}).execute(num_retries=self.num_retries)
+            self.final_status = processStatus
+            self.final_output = out
+            self.workflow_eval_lock.notifyAll()
+
+
+    def start_run(self, runnable, runtimeContext):
+        self.task_queue.add(partial(runnable.run, runtimeContext))
+
+    def process_submitted(self, container):
+        with self.workflow_eval_lock:
+            self.processes[container.uuid] = container
+
+    def process_done(self, uuid, record):
+        with self.workflow_eval_lock:
+            j = self.processes[uuid]
+            logger.info("%s %s is %s", self.label(j), uuid, record["state"])
+            self.task_queue.add(partial(j.done, record))
+            del self.processes[uuid]
+
+    def wrapped_callback(self, cb, obj, st):
+        with self.workflow_eval_lock:
+            cb(obj, st)
+            self.workflow_eval_lock.notifyAll()
+
+    def get_wrapped_callback(self, cb):
+        return partial(self.wrapped_callback, cb)
 
     def on_message(self, event):
-        if "object_uuid" in event:
-            if event["object_uuid"] in self.processes and event["event_type"] == "update":
-                if event["properties"]["new_attributes"]["state"] == "Running" and self.processes[event["object_uuid"]].running is False:
-                    uuid = event["object_uuid"]
-                    with self.lock:
-                        j = self.processes[uuid]
-                        logger.info("%s %s is Running", self.label(j), uuid)
+        if event.get("object_uuid") in self.processes and event["event_type"] == "update":
+            uuid = event["object_uuid"]
+            if event["properties"]["new_attributes"]["state"] == "Running":
+                with self.workflow_eval_lock:
+                    j = self.processes[uuid]
+                    if j.running is False:
                         j.running = True
                         j.update_pipeline_component(event["properties"]["new_attributes"])
-                elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
-                    uuid = event["object_uuid"]
-                    try:
-                        self.cond.acquire()
-                        j = self.processes[uuid]
-                        logger.info("%s %s is %s", self.label(j), uuid, event["properties"]["new_attributes"]["state"])
-                        with Perf(metrics, "done %s" % j.name):
-                            j.done(event["properties"]["new_attributes"])
-                        self.cond.notify()
-                    finally:
-                        self.cond.release()
+                        logger.info("%s %s is Running", self.label(j), uuid)
+            elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
+                self.process_done(uuid, event["properties"]["new_attributes"])
 
     def label(self, obj):
         return "[%s %s]" % (self.work_api[0:-1], obj.name)
@@ -168,15 +211,19 @@ class ArvCwlRunner(object):
         """
 
         try:
+            remain_wait = self.poll_interval
             while True:
-                self.stop_polling.wait(15)
+                if remain_wait > 0:
+                    self.stop_polling.wait(remain_wait)
                 if self.stop_polling.is_set():
                     break
-                with self.lock:
-                    keys = self.processes.keys()
+                with self.workflow_eval_lock:
+                    keys = list(self.processes.keys())
                 if not keys:
+                    remain_wait = self.poll_interval
                     continue
 
+                begin_poll = time.time()
                 if self.work_api == "containers":
                     table = self.poll_api.container_requests()
                 elif self.work_api == "jobs":
@@ -186,6 +233,7 @@ class ArvCwlRunner(object):
                     proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
                 except Exception as e:
                     logger.warn("Error checking states on API server: %s", e)
+                    remain_wait = self.poll_interval
                     continue
 
                 for p in proc_states["items"]:
@@ -196,21 +244,16 @@ class ArvCwlRunner(object):
                             "new_attributes": p
                         }
                     })
+                finish_poll = time.time()
+                remain_wait = self.poll_interval - (finish_poll - begin_poll)
         except:
-            logger.error("Fatal error in state polling thread.", exc_info=(sys.exc_info()[1] if self.debug else False))
-            self.cond.acquire()
-            self.processes.clear()
-            self.cond.notify()
-            self.cond.release()
+            logger.exception("Fatal error in state polling thread.")
+            with self.workflow_eval_lock:
+                self.processes.clear()
+                self.workflow_eval_lock.notifyAll()
         finally:
             self.stop_polling.set()
 
-    def get_uploaded(self):
-        return self.uploaded.copy()
-
-    def add_uploaded(self, src, pair):
-        self.uploaded[src] = pair
-
     def add_intermediate_output(self, uuid):
         if uuid:
             self.intermediate_output_collections.append(uuid)
@@ -222,7 +265,7 @@ class ArvCwlRunner(object):
                 self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)
             except:
                 logger.warn("Failed to delete intermediate output: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
-            if sys.exc_info()[0] is KeyboardInterrupt:
+            if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
                 break
 
     def check_features(self, obj):
@@ -246,7 +289,7 @@ class ArvCwlRunner(object):
                 with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
                     self.check_features(v)
 
-    def make_output_collection(self, name, tagsString, outputObj):
+    def make_output_collection(self, name, storage_classes, tagsString, outputObj):
         outputObj = copy.deepcopy(outputObj)
 
         files = []
@@ -297,7 +340,7 @@ class ArvCwlRunner(object):
         with final.open("cwl.output.json", "w") as f:
             json.dump(outputObj, f, sort_keys=True, indent=4, separators=(',',': '))
 
-        final.save_new(name=name, owner_uuid=self.project_uuid, ensure_unique_name=True)
+        final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes, ensure_unique_name=True)
 
         logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
                     final.api_response()["name"],
@@ -346,30 +389,31 @@ class ArvCwlRunner(object):
                                        'progress':1.0
                                    }).execute(num_retries=self.num_retries)
 
-    def arv_executor(self, tool, job_order, **kwargs):
-        self.debug = kwargs.get("debug")
+    def arv_executor(self, tool, job_order, runtimeContext, logger=None):
+        self.debug = runtimeContext.debug
 
         tool.visit(self.check_features)
 
-        self.project_uuid = kwargs.get("project_uuid")
+        self.project_uuid = runtimeContext.project_uuid
         self.pipeline = None
-        make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
-                                                                 collection_cache=self.collection_cache)
-        self.fs_access = make_fs_access(kwargs["basedir"])
-        self.secret_store = kwargs.get("secret_store")
+        self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
+        self.secret_store = runtimeContext.secret_store
 
-        self.trash_intermediate = kwargs["trash_intermediate"]
+        self.trash_intermediate = runtimeContext.trash_intermediate
         if self.trash_intermediate and self.work_api != "containers":
             raise Exception("--trash-intermediate is only supported with --api=containers.")
 
-        self.intermediate_output_ttl = kwargs["intermediate_output_ttl"]
+        self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl
         if self.intermediate_output_ttl and self.work_api != "containers":
             raise Exception("--intermediate-output-ttl is only supported with --api=containers.")
         if self.intermediate_output_ttl < 0:
             raise Exception("Invalid value %d for --intermediate-output-ttl, cannot be less than zero" % self.intermediate_output_ttl)
 
-        if not kwargs.get("name"):
-            kwargs["name"] = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+        if runtimeContext.submit_request_uuid and self.work_api != "containers":
+            raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
+
+        if not runtimeContext.name:
+            runtimeContext.name = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
 
         # Upload direct dependencies of workflow steps, get back mapping of files to keep references.
         # Also uploads docker images.
@@ -378,26 +422,28 @@ class ArvCwlRunner(object):
         # Reload tool object which may have been updated by
         # upload_workflow_deps
         # Don't validate this time because it will just print redundant errors.
+        loadingContext = self.loadingContext.copy()
+        loadingContext.loader = tool.doc_loader
+        loadingContext.avsc_names = tool.doc_schema
+        loadingContext.metadata = tool.metadata
+        loadingContext.do_validate = False
+
         tool = self.arv_make_tool(tool.doc_loader.idx[tool.tool["id"]],
-                                  makeTool=self.arv_make_tool,
-                                  loader=tool.doc_loader,
-                                  avsc_names=tool.doc_schema,
-                                  metadata=tool.metadata,
-                                  do_validate=False)
+                                  loadingContext)
 
         # Upload local file references in the job order.
-        job_order = upload_job_order(self, "%s input" % kwargs["name"],
+        job_order = upload_job_order(self, "%s input" % runtimeContext.name,
                                      tool, job_order)
 
-        existing_uuid = kwargs.get("update_workflow")
-        if existing_uuid or kwargs.get("create_workflow"):
+        existing_uuid = runtimeContext.update_workflow
+        if existing_uuid or runtimeContext.create_workflow:
             # Create a pipeline template or workflow record and exit.
             if self.work_api == "jobs":
                 tmpl = RunnerTemplate(self, tool, job_order,
-                                      kwargs.get("enable_reuse"),
+                                      runtimeContext.enable_reuse,
                                       uuid=existing_uuid,
-                                      submit_runner_ram=kwargs.get("submit_runner_ram"),
-                                      name=kwargs["name"],
+                                      submit_runner_ram=runtimeContext.submit_runner_ram,
+                                      name=runtimeContext.name,
                                       merged_map=merged_map)
                 tmpl.save()
                 # cwltool.main will write our return value to stdout.
@@ -406,99 +452,103 @@ class ArvCwlRunner(object):
                 return (upload_workflow(self, tool, job_order,
                                         self.project_uuid,
                                         uuid=existing_uuid,
-                                        submit_runner_ram=kwargs.get("submit_runner_ram"),
-                                        name=kwargs["name"],
+                                        submit_runner_ram=runtimeContext.submit_runner_ram,
+                                        name=runtimeContext.name,
                                         merged_map=merged_map),
                         "success")
 
-        self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
-        self.eval_timeout = kwargs.get("eval_timeout")
+        self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
+        self.eval_timeout = runtimeContext.eval_timeout
 
-        kwargs["make_fs_access"] = make_fs_access
-        kwargs["enable_reuse"] = kwargs.get("enable_reuse")
-        kwargs["use_container"] = True
-        kwargs["tmpdir_prefix"] = "tmp"
-        kwargs["compute_checksum"] = kwargs.get("compute_checksum")
+        runtimeContext = runtimeContext.copy()
+        runtimeContext.use_container = True
+        runtimeContext.tmpdir_prefix = "tmp"
+        runtimeContext.work_api = self.work_api
 
         if self.work_api == "containers":
             if self.ignore_docker_for_reuse:
                 raise Exception("--ignore-docker-for-reuse not supported with containers API.")
-            kwargs["outdir"] = "/var/spool/cwl"
-            kwargs["docker_outdir"] = "/var/spool/cwl"
-            kwargs["tmpdir"] = "/tmp"
-            kwargs["docker_tmpdir"] = "/tmp"
+            runtimeContext.outdir = "/var/spool/cwl"
+            runtimeContext.docker_outdir = "/var/spool/cwl"
+            runtimeContext.tmpdir = "/tmp"
+            runtimeContext.docker_tmpdir = "/tmp"
         elif self.work_api == "jobs":
-            if kwargs["priority"] != DEFAULT_PRIORITY:
+            if runtimeContext.priority != DEFAULT_PRIORITY:
                 raise Exception("--priority not implemented for jobs API.")
-            kwargs["outdir"] = "$(task.outdir)"
-            kwargs["docker_outdir"] = "$(task.outdir)"
-            kwargs["tmpdir"] = "$(task.tmpdir)"
+            runtimeContext.outdir = "$(task.outdir)"
+            runtimeContext.docker_outdir = "$(task.outdir)"
+            runtimeContext.tmpdir = "$(task.tmpdir)"
 
-        if kwargs["priority"] < 1 or kwargs["priority"] > 1000:
+        if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
             raise Exception("--priority must be in the range 1..1000.")
 
         runnerjob = None
-        if kwargs.get("submit"):
+        if runtimeContext.submit:
             # Submit a runner job to run the workflow for us.
             if self.work_api == "containers":
-                if tool.tool["class"] == "CommandLineTool" and kwargs.get("wait"):
-                    kwargs["runnerjob"] = tool.tool["id"]
+                if tool.tool["class"] == "CommandLineTool" and runtimeContext.wait:
+                    runtimeContext.runnerjob = tool.tool["id"]
                     runnerjob = tool.job(job_order,
                                          self.output_callback,
-                                         **kwargs).next()
+                                         runtimeContext).next()
                 else:
-                    runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"),
+                    runnerjob = RunnerContainer(self, tool, job_order, runtimeContext.enable_reuse,
                                                 self.output_name,
                                                 self.output_tags,
-                                                submit_runner_ram=kwargs.get("submit_runner_ram"),
-                                                name=kwargs.get("name"),
-                                                on_error=kwargs.get("on_error"),
-                                                submit_runner_image=kwargs.get("submit_runner_image"),
-                                                intermediate_output_ttl=kwargs.get("intermediate_output_ttl"),
+                                                submit_runner_ram=runtimeContext.submit_runner_ram,
+                                                name=runtimeContext.name,
+                                                on_error=runtimeContext.on_error,
+                                                submit_runner_image=runtimeContext.submit_runner_image,
+                                                intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
                                                 merged_map=merged_map,
-                                                priority=kwargs.get("priority"),
+                                                priority=runtimeContext.priority,
                                                 secret_store=self.secret_store)
             elif self.work_api == "jobs":
-                runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"),
+                runnerjob = RunnerJob(self, tool, job_order, runtimeContext.enable_reuse,
                                       self.output_name,
                                       self.output_tags,
-                                      submit_runner_ram=kwargs.get("submit_runner_ram"),
-                                      name=kwargs.get("name"),
-                                      on_error=kwargs.get("on_error"),
-                                      submit_runner_image=kwargs.get("submit_runner_image"),
+                                      submit_runner_ram=runtimeContext.submit_runner_ram,
+                                      name=runtimeContext.name,
+                                      on_error=runtimeContext.on_error,
+                                      submit_runner_image=runtimeContext.submit_runner_image,
                                       merged_map=merged_map)
-        elif "cwl_runner_job" not in kwargs and self.work_api == "jobs":
+        elif runtimeContext.cwl_runner_job is None and self.work_api == "jobs":
             # Create pipeline for local run
             self.pipeline = self.api.pipeline_instances().create(
                 body={
                     "owner_uuid": self.project_uuid,
-                    "name": kwargs["name"] if kwargs.get("name") else shortname(tool.tool["id"]),
+                    "name": runtimeContext.name if runtimeContext.name else shortname(tool.tool["id"]),
                     "components": {},
                     "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
             logger.info("Pipeline instance %s", self.pipeline["uuid"])
 
-        if runnerjob and not kwargs.get("wait"):
-            runnerjob.run(wait=kwargs.get("wait"))
+        if runnerjob and not runtimeContext.wait:
+            submitargs = runtimeContext.copy()
+            submitargs.submit = False
+            runnerjob.run(submitargs)
             return (runnerjob.uuid, "success")
 
         self.poll_api = arvados.api('v1')
         self.polling_thread = threading.Thread(target=self.poll_states)
         self.polling_thread.start()
 
+        self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)
+
         if runnerjob:
             jobiter = iter((runnerjob,))
         else:
-            if "cwl_runner_job" in kwargs:
-                self.uuid = kwargs.get("cwl_runner_job").get('uuid')
+            if runtimeContext.cwl_runner_job is not None:
+                self.uuid = runtimeContext.cwl_runner_job.get('uuid')
             jobiter = tool.job(job_order,
                                self.output_callback,
-                               **kwargs)
+                               runtimeContext)
 
         try:
-            self.cond.acquire()
-            # Will continue to hold the lock for the duration of this code
-            # except when in cond.wait(), at which point on_message can update
-            # job state and process output callbacks.
+            self.workflow_eval_lock.acquire()
+            # Holds the lock while this code runs and releases it when
+            # it is safe to do so in self.workflow_eval_lock.wait(),
+            # at which point on_message can update job state and
+            # process output callbacks.
 
             loopperf = Perf(metrics, "jobiter")
             loopperf.__enter__()
@@ -508,26 +558,31 @@ class ArvCwlRunner(object):
                 if self.stop_polling.is_set():
                     break
 
+                if self.task_queue.error is not None:
+                    raise self.task_queue.error
+
                 if runnable:
                     with Perf(metrics, "run"):
-                        runnable.run(**kwargs)
+                        self.start_run(runnable, runtimeContext)
                 else:
-                    if self.processes:
-                        self.cond.wait(1)
+                    if (self.task_queue.in_flight + len(self.processes)) > 0:
+                        self.workflow_eval_lock.wait(3)
                     else:
-                        logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
+                        logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
                         break
                 loopperf.__enter__()
             loopperf.__exit__()
 
-            while self.processes:
-                self.cond.wait(1)
+            while (self.task_queue.in_flight + len(self.processes)) > 0:
+                if self.task_queue.error is not None:
+                    raise self.task_queue.error
+                self.workflow_eval_lock.wait(3)
 
         except UnsupportedRequirement:
             raise
         except:
-            if sys.exc_info()[0] is KeyboardInterrupt:
-                logger.error("Interrupted, marking pipeline as failed")
+            if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
+                logger.error("Interrupted, workflow will be cancelled")
             else:
                 logger.error("Execution failed: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
             if self.pipeline:
@@ -537,9 +592,11 @@ class ArvCwlRunner(object):
                 self.api.container_requests().update(uuid=runnerjob.uuid,
                                                      body={"priority": "0"}).execute(num_retries=self.num_retries)
         finally:
-            self.cond.release()
+            self.workflow_eval_lock.release()
+            self.task_queue.drain()
             self.stop_polling.set()
             self.polling_thread.join()
+            self.task_queue.join()
 
         if self.final_status == "UnsupportedRequirement":
             raise UnsupportedRequirement("Check log for details.")
@@ -547,17 +604,19 @@ class ArvCwlRunner(object):
         if self.final_output is None:
             raise WorkflowException("Workflow did not return a result.")
 
-        if kwargs.get("submit") and isinstance(runnerjob, Runner):
+        if runtimeContext.submit and isinstance(runnerjob, Runner):
             logger.info("Final output collection %s", runnerjob.final_output)
         else:
             if self.output_name is None:
                 self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
             if self.output_tags is None:
                 self.output_tags = ""
-            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, self.output_tags, self.final_output)
+
+            storage_classes = runtimeContext.storage_classes.strip().split(",")
+            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
             self.set_crunch_output()
 
-        if kwargs.get("compute_checksum"):
+        if runtimeContext.compute_checksum:
             adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))
             adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
 
@@ -655,12 +714,16 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     parser.add_argument("--submit-runner-ram", type=int,
                         help="RAM (in MiB) required for the workflow runner job (default 1024)",
-                        default=1024)
+                        default=None)
 
     parser.add_argument("--submit-runner-image", type=str,
                         help="Docker image for workflow runner job, default arvados/jobs:%s" % __version__,
                         default=None)
 
+    parser.add_argument("--submit-request-uuid", type=str,
+                        default=None,
+                        help="Update and commit supplied container request instead of creating a new one (containers API only).")
+
     parser.add_argument("--name", type=str,
                         help="Name to use for workflow execution instance.",
                         default=None)
@@ -672,6 +735,8 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     parser.add_argument("--enable-dev", action="store_true",
                         help="Enable loading and running development versions "
                              "of CWL spec.", default=False)
+    parser.add_argument('--storage-classes', default="default", type=str,
+                        help="Specify comma separated list of storage classes to be used when saving workflow output to Keep.")
 
     parser.add_argument("--intermediate-output-ttl", type=int, metavar="N",
                         help="If N > 0, intermediate output collections will be trashed N seconds after creation.  Default is 0 (don't trash).",
@@ -689,6 +754,9 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         action="store_true", default=False,
                         help=argparse.SUPPRESS)
 
+    parser.add_argument("--thread-count", type=int,
+                        default=4, help="Number of threads to use for job submit and output collection.")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--trash-intermediate", action="store_true",
                         default=False, dest="trash_intermediate",
@@ -719,12 +787,28 @@ def add_arv_hints():
         "http://arvados.org/cwl#ReuseRequirement"
     ])
 
-def main(args, stdout, stderr, api_client=None, keep_client=None):
+def exit_signal_handler(sigcode, frame):
+    logger.error("Caught signal {}, exiting.".format(sigcode))
+    sys.exit(-sigcode)
+
+def main(args, stdout, stderr, api_client=None, keep_client=None,
+         install_sig_handlers=True):
     parser = arg_parser()
 
     job_order_object = None
     arvargs = parser.parse_args(args)
 
+    if len(arvargs.storage_classes.strip().split(',')) > 1:
+        logger.error("Multiple storage classes are not supported currently.")
+        return 1
+
+    arvargs.use_container = True
+    arvargs.relax_path_checks = True
+    arvargs.print_supported_versions = False
+
+    if install_sig_handlers:
+        arv_cmd.install_signal_handlers()
+
     if arvargs.update_workflow:
         if arvargs.update_workflow.find('-7fd4e-') == 5:
             want_api = 'containers'
@@ -745,12 +829,13 @@ def main(args, stdout, stderr, api_client=None, keep_client=None):
 
     try:
         if api_client is None:
-            api_client=arvados.api('v1', model=OrderedJsonModel())
+            api_client = arvados.safeapi.ThreadSafeApiCache(api_params={"model": OrderedJsonModel()}, keep_params={"num_retries": 4})
+            keep_client = api_client.keep
+            # Make an API object now so errors are reported early.
+            api_client.users().current().execute()
         if keep_client is None:
             keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
-        runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client,
-                              num_retries=4, output_name=arvargs.output_name,
-                              output_tags=arvargs.output_tags)
+        runner = ArvCwlRunner(api_client, arvargs, keep_client=keep_client, num_retries=4)
     except Exception as e:
         logger.error(e)
         return 1
@@ -775,26 +860,21 @@ def main(args, stdout, stderr, api_client=None, keep_client=None):
     else:
         arvados.log_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
 
-    arvargs.conformance_test = None
-    arvargs.use_container = True
-    arvargs.relax_path_checks = True
-    arvargs.print_supported_versions = False
+    for key, val in cwltool.argparser.get_default_args().items():
+        if not hasattr(arvargs, key):
+            setattr(arvargs, key, val)
 
-    make_fs_access = partial(CollectionFsAccess,
-                           collection_cache=runner.collection_cache)
+    runtimeContext = ArvRuntimeContext(vars(arvargs))
+    runtimeContext.make_fs_access = partial(CollectionFsAccess,
+                             collection_cache=runner.collection_cache)
 
     return cwltool.main.main(args=arvargs,
                              stdout=stdout,
                              stderr=stderr,
                              executor=runner.arv_executor,
-                             makeTool=runner.arv_make_tool,
                              versionfunc=versionstring,
                              job_order_object=job_order_object,
-                             make_fs_access=make_fs_access,
-                             fetcher_constructor=partial(CollectionFetcher,
-                                                         api_client=api_client,
-                                                         fs_access=make_fs_access(""),
-                                                         num_retries=runner.num_retries),
-                             resolver=partial(collectionResolver, api_client, num_retries=runner.num_retries),
                              logger_handler=arvados.log_handler,
-                             custom_schema_callback=add_arv_hints)
+                             custom_schema_callback=add_arv_hints,
+                             loadingContext=runner.loadingContext,
+                             runtimeContext=runtimeContext)
index 2ab96c94f0b3e54b42ae51b9b9f42eca6c7071fc..4f762192a2a386f3c08c0d17e5704eccbf8f65e3 100644 (file)
@@ -47,6 +47,28 @@ $graph:
         "_type": "@id"
         refScope: 0
 
+- name: cwltool:TimeLimit
+  type: record
+  inVocab: false
+  extends: cwl:ProcessRequirement
+  doc: |
+    Set an upper limit on the execution time of a CommandLineTool or
+    ExpressionTool.  A tool execution which exceeds the time limit may
+    be preemptively terminated and considered failed.  May also be
+    used by batch systems to make scheduling decisions.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'TimeLimit'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: timelimit
+      type: [long, string]
+      doc: |
+        The time limit, in seconds.  A time limit of zero means no
+        time limit.  Negative time limits are an error.
+
 - name: RunInSingleContainer
   type: record
   extends: cwl:ProcessRequirement
@@ -189,3 +211,25 @@ $graph:
         _type: "@vocab"
     - name: enableReuse
       type: boolean
+
+- name: WorkflowRunnerResources
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Specify memory or cores resource request for the CWL runner process itself.
+  fields:
+    class:
+      type: string
+      doc: "Always 'arv:WorkflowRunnerResources'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    ramMin:
+      type: int?
+      doc: Minimum RAM, in mebibytes (2**20)
+      jsonldPredicate: "https://w3id.org/cwl/cwl#ResourceRequirement/ramMin"
+    coresMin:
+      type: int?
+      doc: Minimum cores allocated to cwl-runner
+      jsonldPredicate: "https://w3id.org/cwl/cwl#ResourceRequirement/coresMin"
\ No newline at end of file
index 5c11babfc62375037e648a5615f48e3a590a37d2..948a9a46feab30bf3f8759fee94d81d14205e42d 100644 (file)
@@ -11,12 +11,14 @@ import datetime
 import ciso8601
 import uuid
 
+from arvados_cwl.util import get_current_container, get_intermediate_collection_info
 import ruamel.yaml as yaml
 
 from cwltool.errors import WorkflowException
-from cwltool.process import get_feature, UnsupportedRequirement, shortname
+from cwltool.process import UnsupportedRequirement, shortname
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
 from cwltool.utils import aslist
+from cwltool.job import JobBase
 
 import arvados.collection
 
@@ -30,10 +32,18 @@ from .perf import Perf
 logger = logging.getLogger('arvados.cwl-runner')
 metrics = logging.getLogger('arvados.cwl-runner.metrics')
 
-class ArvadosContainer(object):
+class ArvadosContainer(JobBase):
     """Submit and manage a Crunch container request for executing a CWL CommandLineTool."""
 
-    def __init__(self, runner):
+    def __init__(self, runner,
+                 builder,   # type: Builder
+                 joborder,  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+                 make_path_mapper,  # type: Callable[..., PathMapper]
+                 requirements,      # type: List[Dict[Text, Text]]
+                 hints,     # type: List[Dict[Text, Text]]
+                 name       # type: Text
+    ):
+        super(ArvadosContainer, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)
         self.arvrunner = runner
         self.running = False
         self.uuid = None
@@ -41,7 +51,7 @@ class ArvadosContainer(object):
     def update_pipeline_component(self, r):
         pass
 
-    def run(self, dry_run=False, pull_image=True, **kwargs):
+    def run(self, runtimeContext):
         # ArvadosCommandTool subclasses from cwltool.CommandLineTool,
         # which calls makeJobRunner() to get a new ArvadosContainer
         # object.  The fields that define execution such as
@@ -51,16 +61,18 @@ class ArvadosContainer(object):
 
         container_request = {
             "command": self.command_line,
-            "owner_uuid": self.arvrunner.project_uuid,
             "name": self.name,
             "output_path": self.outdir,
             "cwd": self.outdir,
-            "priority": kwargs.get("priority"),
+            "priority": runtimeContext.priority,
             "state": "Committed",
             "properties": {},
         }
         runtime_constraints = {}
 
+        if self.arvrunner.project_uuid:
+            container_request["owner_uuid"] = self.arvrunner.project_uuid
+
         if self.arvrunner.secret_store.has_secret(self.command_line):
             raise WorkflowException("Secret material leaked on command line, only file literals may contain secrets")
 
@@ -154,8 +166,14 @@ class ArvadosContainer(object):
 
                 keepemptydirs(vwd)
 
-                with Perf(metrics, "generatefiles.save_new %s" % self.name):
-                    vwd.save_new()
+                if not runtimeContext.current_container:
+                    runtimeContext.current_container = get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl)
+                vwd.save_new(name=info["name"],
+                             owner_uuid=self.arvrunner.project_uuid,
+                             ensure_unique_name=True,
+                             trash_at=info["trash_at"],
+                             properties=info["properties"])
 
                 prev = None
                 for f, p in sorteditems:
@@ -188,20 +206,20 @@ class ArvadosContainer(object):
             mounts["stdout"] = {"kind": "file",
                                 "path": "%s/%s" % (self.outdir, self.stdout)}
 
-        (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
+        (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
         if not docker_req:
             docker_req = {"dockerImageId": "arvados/jobs"}
 
         container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
                                                                      docker_req,
-                                                                     pull_image,
+                                                                     runtimeContext.pull_image,
                                                                      self.arvrunner.project_uuid)
 
-        api_req, _ = get_feature(self, "http://arvados.org/cwl#APIRequirement")
+        api_req, _ = self.get_requirement("http://arvados.org/cwl#APIRequirement")
         if api_req:
             runtime_constraints["API"] = True
 
-        runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
+        runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
         if runtime_req:
             if "keep_cache" in runtime_req:
                 runtime_constraints["keep_cache_ram"] = runtime_req["keep_cache"] * 2**20
@@ -215,11 +233,11 @@ class ArvadosContainer(object):
                         "writable": True
                     }
 
-        partition_req, _ = get_feature(self, "http://arvados.org/cwl#PartitionRequirement")
+        partition_req, _ = self.get_requirement("http://arvados.org/cwl#PartitionRequirement")
         if partition_req:
             scheduling_parameters["partitions"] = aslist(partition_req["partition"])
 
-        intermediate_output_req, _ = get_feature(self, "http://arvados.org/cwl#IntermediateOutput")
+        intermediate_output_req, _ = self.get_requirement("http://arvados.org/cwl#IntermediateOutput")
         if intermediate_output_req:
             self.output_ttl = intermediate_output_req["outputTTL"]
         else:
@@ -228,37 +246,48 @@ class ArvadosContainer(object):
         if self.output_ttl < 0:
             raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
 
+        if self.timelimit is not None:
+            scheduling_parameters["max_run_time"] = self.timelimit
+
+        container_request["output_name"] = "Output for step %s" % (self.name)
         container_request["output_ttl"] = self.output_ttl
         container_request["mounts"] = mounts
         container_request["secret_mounts"] = secret_mounts
         container_request["runtime_constraints"] = runtime_constraints
         container_request["scheduling_parameters"] = scheduling_parameters
 
-        enable_reuse = kwargs.get("enable_reuse", True)
+        enable_reuse = runtimeContext.enable_reuse
         if enable_reuse:
-            reuse_req, _ = get_feature(self, "http://arvados.org/cwl#ReuseRequirement")
+            reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
             if reuse_req:
                 enable_reuse = reuse_req["enableReuse"]
         container_request["use_existing"] = enable_reuse
 
-        if kwargs.get("runnerjob", "").startswith("arvwf:"):
-            wfuuid = kwargs["runnerjob"][6:kwargs["runnerjob"].index("#")]
+        if runtimeContext.runnerjob.startswith("arvwf:"):
+            wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
             wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)
             if container_request["name"] == "main":
                 container_request["name"] = wfrecord["name"]
             container_request["properties"]["template_uuid"] = wfuuid
 
+        self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+
         try:
-            response = self.arvrunner.api.container_requests().create(
-                body=container_request
-            ).execute(num_retries=self.arvrunner.num_retries)
+            if runtimeContext.submit_request_uuid:
+                response = self.arvrunner.api.container_requests().update(
+                    uuid=runtimeContext.submit_request_uuid,
+                    body=container_request
+                ).execute(num_retries=self.arvrunner.num_retries)
+            else:
+                response = self.arvrunner.api.container_requests().create(
+                    body=container_request
+                ).execute(num_retries=self.arvrunner.num_retries)
 
             self.uuid = response["uuid"]
-            self.arvrunner.processes[self.uuid] = self
+            self.arvrunner.process_submitted(self)
 
             if response["state"] == "Final":
                 logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
-                self.done(response)
             else:
                 logger.info("%s %s state is %s", self.arvrunner.label(self), response["uuid"], response["state"])
         except Exception as e:
@@ -291,7 +320,7 @@ class ArvadosContainer(object):
                                                            api_client=self.arvrunner.api,
                                                            keep_client=self.arvrunner.keep_client,
                                                            num_retries=self.arvrunner.num_retries)
-                done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self))
+                done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
 
             if record["output_uuid"]:
                 if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:
@@ -315,14 +344,12 @@ class ArvadosContainer(object):
             processStatus = "permanentFail"
         finally:
             self.output_callback(outputs, processStatus)
-            if record["uuid"] in self.arvrunner.processes:
-                del self.arvrunner.processes[record["uuid"]]
 
 
 class RunnerContainer(Runner):
     """Submit and manage a container that runs arvados-cwl-runner."""
 
-    def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
+    def arvados_job_spec(self, runtimeContext):
         """Create an Arvados container request for this workflow.
 
         The returned dict can be used to create a container passed as
@@ -344,7 +371,6 @@ class RunnerContainer(Runner):
                 self.job_order[param] = {"$include": mnt}
 
         container_req = {
-            "owner_uuid": self.arvrunner.project_uuid,
             "name": self.name,
             "output_path": "/var/spool/cwl",
             "cwd": "/var/spool/cwl",
@@ -367,7 +393,7 @@ class RunnerContainer(Runner):
             },
             "secret_mounts": secret_mounts,
             "runtime_constraints": {
-                "vcpus": 1,
+                "vcpus": self.submit_runner_cores,
                 "ram": 1024*1024 * self.submit_runner_ram,
                 "API": True
             },
@@ -399,7 +425,18 @@ class RunnerContainer(Runner):
         # --api=containers means use the containers API
         # --no-log-timestamps means don't add timestamps (the logging infrastructure does this)
         # --disable-validate because we already validated so don't need to do it again
-        command = ["arvados-cwl-runner", "--local", "--api=containers", "--no-log-timestamps", "--disable-validate"]
+        # --eval-timeout is the timeout for javascript invocation
+        # --parallel-task-count is the number of threads to use for job submission
+        # --enable/disable-reuse sets desired job reuse
+        command = ["arvados-cwl-runner",
+                   "--local",
+                   "--api=containers",
+                   "--no-log-timestamps",
+                   "--disable-validate",
+                   "--eval-timeout=%s" % self.arvrunner.eval_timeout,
+                   "--thread-count=%s" % self.arvrunner.thread_count,
+                   "--enable-reuse" if self.enable_reuse else "--disable-reuse"]
+
         if self.output_name:
             command.append("--output-name=" + self.output_name)
             container_req["output_name"] = self.output_name
@@ -407,13 +444,11 @@ class RunnerContainer(Runner):
         if self.output_tags:
             command.append("--output-tags=" + self.output_tags)
 
-        if kwargs.get("debug"):
+        if runtimeContext.debug:
             command.append("--debug")
 
-        if self.enable_reuse:
-            command.append("--enable-reuse")
-        else:
-            command.append("--disable-reuse")
+        if runtimeContext.storage_classes != "default":
+            command.append("--storage-classes=" + runtimeContext.storage_classes)
 
         if self.on_error:
             command.append("--on-error=" + self.on_error)
@@ -427,8 +462,6 @@ class RunnerContainer(Runner):
         if self.arvrunner.project_uuid:
             command.append("--project-uuid="+self.arvrunner.project_uuid)
 
-        command.append("--eval-timeout=%s" % self.arvrunner.eval_timeout)
-
         command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
 
         container_req["command"] = command
@@ -436,23 +469,27 @@ class RunnerContainer(Runner):
         return container_req
 
 
-    def run(self, *args, **kwargs):
-        kwargs["keepprefix"] = "keep:"
-        job_spec = self.arvados_job_spec(*args, **kwargs)
-        job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
+    def run(self, runtimeContext):
+        runtimeContext.keepprefix = "keep:"
+        job_spec = self.arvados_job_spec(runtimeContext)
+        if self.arvrunner.project_uuid:
+            job_spec["owner_uuid"] = self.arvrunner.project_uuid
 
-        response = self.arvrunner.api.container_requests().create(
-            body=job_spec
-        ).execute(num_retries=self.arvrunner.num_retries)
+        if runtimeContext.submit_request_uuid:
+            response = self.arvrunner.api.container_requests().update(
+                uuid=runtimeContext.submit_request_uuid,
+                body=job_spec
+            ).execute(num_retries=self.arvrunner.num_retries)
+        else:
+            response = self.arvrunner.api.container_requests().create(
+                body=job_spec
+            ).execute(num_retries=self.arvrunner.num_retries)
 
         self.uuid = response["uuid"]
-        self.arvrunner.processes[self.uuid] = self
+        self.arvrunner.process_submitted(self)
 
         logger.info("%s submitted container %s", self.arvrunner.label(self), response["uuid"])
 
-        if response["state"] == "Final":
-            self.done(response)
-
     def done(self, record):
         try:
             container = self.arvrunner.api.containers().get(
@@ -463,6 +500,3 @@ class RunnerContainer(Runner):
             self.arvrunner.output_callback({}, "permanentFail")
         else:
             super(RunnerContainer, self).done(container)
-        finally:
-            if record["uuid"] in self.arvrunner.processes:
-                del self.arvrunner.processes[record["uuid"]]
index 5483ccbf52b59f2d2133bacd76ad7eaaa775a91f..7508febb08cc8bd704d251cc0490ea045a75053b 100644 (file)
@@ -57,7 +57,8 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid
             args.append(image_tag)
             logger.info("Uploading Docker image %s:%s", image_name, image_tag)
             try:
-                arvados.commands.keepdocker.main(args, stdout=sys.stderr)
+                arvados.commands.put.api_client = api_client
+                arvados.commands.keepdocker.main(args, stdout=sys.stderr, install_sig_handlers=False, api=api_client)
             except SystemExit as e:
                 if e.code:
                     raise WorkflowException("keepdocker exited with code %s" % e.code)
index 88155b5b958df8e0b7b04a602becccd772718bc3..1287fbb6eaf7b8387ca3fe700c7c97cf0678b867 100644 (file)
@@ -8,15 +8,17 @@ import copy
 import json
 import time
 
-from cwltool.process import get_feature, shortname, UnsupportedRequirement
+from cwltool.process import shortname, UnsupportedRequirement
 from cwltool.errors import WorkflowException
 from cwltool.command_line_tool import revmap_file, CommandLineTool
 from cwltool.load_tool import fetch_document
 from cwltool.builder import Builder
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.job import JobBase
 
 from schema_salad.sourceline import SourceLine
 
+from arvados_cwl.util import get_current_container, get_intermediate_collection_info
 import ruamel.yaml as yaml
 
 import arvados.collection
@@ -36,15 +38,23 @@ crunchrunner_re = re.compile(r"^.*crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)
 
 crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
 
-class ArvadosJob(object):
+class ArvadosJob(JobBase):
     """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
 
-    def __init__(self, runner):
+    def __init__(self, runner,
+                 builder,   # type: Builder
+                 joborder,  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+                 make_path_mapper,  # type: Callable[..., PathMapper]
+                 requirements,      # type: List[Dict[Text, Text]]
+                 hints,     # type: List[Dict[Text, Text]]
+                 name       # type: Text
+    ):
+        super(ArvadosJob, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)
         self.arvrunner = runner
         self.running = False
         self.uuid = None
 
-    def run(self, dry_run=False, pull_image=True, **kwargs):
+    def run(self, runtimeContext):
         script_parameters = {
             "command": self.command_line
         }
@@ -67,7 +77,14 @@ class ArvadosJob(object):
 
                 if vwd:
                     with Perf(metrics, "generatefiles.save_new %s" % self.name):
-                        vwd.save_new()
+                        if not runtimeContext.current_container:
+                            runtimeContext.current_container = get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                        info = get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl)
+                        vwd.save_new(name=info["name"],
+                                     owner_uuid=self.arvrunner.project_uuid,
+                                     ensure_unique_name=True,
+                                     trash_at=info["trash_at"],
+                                     properties=info["properties"])
 
                 for f, p in generatemapper.items():
                     if p.type == "File":
@@ -96,12 +113,15 @@ class ArvadosJob(object):
             script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
 
         with Perf(metrics, "arv_docker_get_image %s" % self.name):
-            (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
-            if docker_req and kwargs.get("use_container") is not False:
+            (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
+            if docker_req and runtimeContext.use_container is not False:
                 if docker_req.get("dockerOutputDirectory"):
                     raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
                         "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
-                runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
+                runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api,
+                                                                           docker_req,
+                                                                           runtimeContext.pull_image,
+                                                                           self.arvrunner.project_uuid)
             else:
                 runtime_constraints["docker_image"] = "arvados/jobs"
 
@@ -111,7 +131,7 @@ class ArvadosJob(object):
             runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
             runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
 
-        runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
+        runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
         if runtime_req:
             if "keep_cache" in runtime_req:
                 runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
@@ -128,12 +148,14 @@ class ArvadosJob(object):
         if not self.arvrunner.ignore_docker_for_reuse:
             filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
 
-        enable_reuse = kwargs.get("enable_reuse", True)
+        enable_reuse = runtimeContext.enable_reuse
         if enable_reuse:
-            reuse_req, _ = get_feature(self, "http://arvados.org/cwl#ReuseRequirement")
+            reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
             if reuse_req:
                 enable_reuse = reuse_req["enableReuse"]
 
+        self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+
         try:
             with Perf(metrics, "create %s" % self.name):
                 response = self.arvrunner.api.jobs().create(
@@ -150,7 +172,8 @@ class ArvadosJob(object):
                     find_or_create=enable_reuse
                 ).execute(num_retries=self.arvrunner.num_retries)
 
-            self.arvrunner.processes[response["uuid"]] = self
+            self.uuid = response["uuid"]
+            self.arvrunner.process_submitted(self)
 
             self.update_pipeline_component(response)
 
@@ -171,9 +194,6 @@ class ArvadosJob(object):
                         logger.info("Creating read permission on job %s: %s",
                                     response["uuid"],
                                     e)
-
-                with Perf(metrics, "done %s" % self.name):
-                    self.done(response)
             else:
                 logger.info("%s %s is %s", self.arvrunner.label(self), response["uuid"], response["state"])
         except Exception as e:
@@ -181,27 +201,28 @@ class ArvadosJob(object):
             self.output_callback({}, "permanentFail")
 
     def update_pipeline_component(self, record):
-        if self.arvrunner.pipeline:
-            self.arvrunner.pipeline["components"][self.name] = {"job": record}
-            with Perf(metrics, "update_pipeline_component %s" % self.name):
-                self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
-                    uuid=self.arvrunner.pipeline["uuid"],
-                    body={
-                        "components": self.arvrunner.pipeline["components"]
-                    }).execute(num_retries=self.arvrunner.num_retries)
-        if self.arvrunner.uuid:
-            try:
-                job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
-                if job:
-                    components = job["components"]
-                    components[self.name] = record["uuid"]
-                    self.arvrunner.api.jobs().update(
-                        uuid=self.arvrunner.uuid,
+        with self.arvrunner.workflow_eval_lock:
+            if self.arvrunner.pipeline:
+                self.arvrunner.pipeline["components"][self.name] = {"job": record}
+                with Perf(metrics, "update_pipeline_component %s" % self.name):
+                    self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
+                        uuid=self.arvrunner.pipeline["uuid"],
                         body={
-                            "components": components
+                            "components": self.arvrunner.pipeline["components"]
                         }).execute(num_retries=self.arvrunner.num_retries)
-            except Exception as e:
-                logger.info("Error adding to components: %s", e)
+            if self.arvrunner.uuid:
+                try:
+                    job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
+                    if job:
+                        components = job["components"]
+                        components[self.name] = record["uuid"]
+                        self.arvrunner.api.jobs().update(
+                            uuid=self.arvrunner.uuid,
+                            body={
+                                "components": components
+                            }).execute(num_retries=self.arvrunner.num_retries)
+                except Exception as e:
+                    logger.info("Error adding to components: %s", e)
 
     def done(self, record):
         try:
@@ -242,7 +263,7 @@ class ArvadosJob(object):
                                 dirs[g.group(1)] = g.group(2)
 
                     if processStatus == "permanentFail":
-                        done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self))
+                        done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
 
                     with Perf(metrics, "output collection %s" % self.name):
                         outputs = done.done(self, record, dirs["tmpdir"],
@@ -263,13 +284,12 @@ class ArvadosJob(object):
                 processStatus = "permanentFail"
         finally:
             self.output_callback(outputs, processStatus)
-            if record["uuid"] in self.arvrunner.processes:
-                del self.arvrunner.processes[record["uuid"]]
+
 
 class RunnerJob(Runner):
     """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
 
-    def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
+    def arvados_job_spec(self, debug=False):
         """Create an Arvados job specification for this workflow.
 
         The returned dict can be used to create a job (i.e., passed as
@@ -299,7 +319,7 @@ class RunnerJob(Runner):
         if self.on_error:
             self.job_order["arv:on_error"] = self.on_error
 
-        if kwargs.get("debug"):
+        if debug:
             self.job_order["arv:debug"] = True
 
         return {
@@ -314,8 +334,8 @@ class RunnerJob(Runner):
             }
         }
 
-    def run(self, *args, **kwargs):
-        job_spec = self.arvados_job_spec(*args, **kwargs)
+    def run(self, runtimeContext):
+        job_spec = self.arvados_job_spec(runtimeContext.debug)
 
         job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
 
@@ -346,15 +366,12 @@ class RunnerJob(Runner):
             body=instance_spec).execute(num_retries=self.arvrunner.num_retries)
         logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
 
-        if kwargs.get("wait") is False:
+        if runtimeContext.wait is False:
             self.uuid = self.arvrunner.pipeline["uuid"]
             return
 
         self.uuid = job["uuid"]
-        self.arvrunner.processes[self.uuid] = self
-
-        if job["state"] in ("Complete", "Failed", "Cancelled"):
-            self.done(job)
+        self.arvrunner.process_submitted(self)
 
 
 class RunnerTemplate(object):
index de329796e42384a18d4f4f669103c3fcb8a982a5..119acc30392ceb9f124a6d0101c0868beeb6c1ae 100644 (file)
@@ -6,46 +6,55 @@ from cwltool.command_line_tool import CommandLineTool
 from .arvjob import ArvadosJob
 from .arvcontainer import ArvadosContainer
 from .pathmapper import ArvPathMapper
+from functools import partial
 
 class ArvadosCommandTool(CommandLineTool):
     """Wrap cwltool CommandLineTool to override selected methods."""
 
-    def __init__(self, arvrunner, toolpath_object, **kwargs):
-        super(ArvadosCommandTool, self).__init__(toolpath_object, **kwargs)
+    def __init__(self, arvrunner, toolpath_object, loadingContext):
+        super(ArvadosCommandTool, self).__init__(toolpath_object, loadingContext)
         self.arvrunner = arvrunner
-        self.work_api = kwargs["work_api"]
-
-    def makeJobRunner(self, **kwargs):
-        if self.work_api == "containers":
-            return ArvadosContainer(self.arvrunner)
-        elif self.work_api == "jobs":
-            return ArvadosJob(self.arvrunner)
-
-    def makePathMapper(self, reffiles, stagedir, **kwargs):
-        # type: (List[Any], unicode, **Any) -> PathMapper
-        if self.work_api == "containers":
-            return ArvPathMapper(self.arvrunner, reffiles, kwargs["basedir"],
+
+    def make_job_runner(self, runtimeContext):
+        if runtimeContext.work_api == "containers":
+            return partial(ArvadosContainer, self.arvrunner)
+        elif runtimeContext.work_api == "jobs":
+            return partial(ArvadosJob, self.arvrunner)
+        else:
+            raise Exception("Unsupported work_api %s", runtimeContext.work_api)
+
+    def make_path_mapper(self, reffiles, stagedir, runtimeContext, separateDirs):
+        if runtimeContext.work_api == "containers":
+            return ArvPathMapper(self.arvrunner, reffiles+runtimeContext.extra_reffiles, runtimeContext.basedir,
                                  "/keep/%s",
-                                 "/keep/%s/%s",
-                                 **kwargs)
-        elif self.work_api == "jobs":
-            return ArvPathMapper(self.arvrunner, reffiles, kwargs["basedir"],
+                                 "/keep/%s/%s")
+        elif runtimeContext.work_api == "jobs":
+            return ArvPathMapper(self.arvrunner, reffiles, runtimeContext.basedir,
                                  "$(task.keep)/%s",
-                                 "$(task.keep)/%s/%s",
-                                 **kwargs)
+                                 "$(task.keep)/%s/%s")
+
+    def job(self, joborder, output_callback, runtimeContext):
+
+        # Workaround for #13365
+        builderargs = runtimeContext.copy()
+        builderargs.toplevel = True
+        builderargs.tmp_outdir_prefix = ""
+        builder = self._init_job(joborder, builderargs)
+        joborder = builder.job
+
+        runtimeContext = runtimeContext.copy()
 
-    def job(self, joborder, output_callback, **kwargs):
-        if self.work_api == "containers":
+        if runtimeContext.work_api == "containers":
             dockerReq, is_req = self.get_requirement("DockerRequirement")
             if dockerReq and dockerReq.get("dockerOutputDirectory"):
-                kwargs["outdir"] = dockerReq.get("dockerOutputDirectory")
-                kwargs["docker_outdir"] = dockerReq.get("dockerOutputDirectory")
+                runtimeContext.outdir = dockerReq.get("dockerOutputDirectory")
+                runtimeContext.docker_outdir = dockerReq.get("dockerOutputDirectory")
             else:
-                kwargs["outdir"] = "/var/spool/cwl"
-                kwargs["docker_outdir"] = "/var/spool/cwl"
-        elif self.work_api == "jobs":
-            kwargs["outdir"] = "$(task.outdir)"
-            kwargs["docker_outdir"] = "$(task.outdir)"
-            kwargs["tmpdir"] = "$(task.tmpdir)"
-            kwargs["docker_tmpdir"] = "$(task.tmpdir)"
-        return super(ArvadosCommandTool, self).job(joborder, output_callback, **kwargs)
+                runtimeContext.outdir = "/var/spool/cwl"
+                runtimeContext.docker_outdir = "/var/spool/cwl"
+        elif runtimeContext.work_api == "jobs":
+            runtimeContext.outdir = "$(task.outdir)"
+            runtimeContext.docker_outdir = "$(task.outdir)"
+            runtimeContext.tmpdir = "$(task.tmpdir)"
+            runtimeContext.docker_tmpdir = "$(task.tmpdir)"
+        return super(ArvadosCommandTool, self).job(joborder, output_callback, runtimeContext)
index bdc2e274b0daa695e9b0f2cdcfe698f53f502730..ae90625102ff155cd67daa44d4ab4384aa996866 100644 (file)
@@ -15,6 +15,7 @@ from cwltool.process import shortname
 from cwltool.workflow import Workflow, WorkflowException
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
 from cwltool.builder import Builder
+from cwltool.context import LoadingContext
 
 import ruamel.yaml as yaml
 
@@ -51,13 +52,24 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
     upload_dependencies(arvRunner, name, tool.doc_loader,
                         packed, tool.tool["id"], False)
 
-    # TODO nowhere for submit_runner_ram to go.
+    if submit_runner_ram:
+        hints = main.get("hints", [])
+        found = False
+        for h in hints:
+            if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
+                h["ramMin"] = submit_runner_ram
+                found = True
+                break
+        if not found:
+            hints.append({"class": "http://arvados.org/cwl#WorkflowRunnerResources",
+                          "ramMin": submit_runner_ram})
+        main["hints"] = hints
 
     body = {
         "workflow": {
             "name": name,
             "description": tool.tool.get("doc", ""),
-            "definition":yaml.round_trip_dump(packed)
+            "definition":json.dumps(packed, sort_keys=True, indent=4, separators=(',',': '))
         }}
     if project_uuid:
         body["workflow"]["owner_uuid"] = project_uuid
@@ -109,16 +121,16 @@ def get_overall_res_req(res_reqs):
 class ArvadosWorkflow(Workflow):
     """Wrap cwltool Workflow to override selected methods."""
 
-    def __init__(self, arvrunner, toolpath_object, **kwargs):
-        super(ArvadosWorkflow, self).__init__(toolpath_object, **kwargs)
+    def __init__(self, arvrunner, toolpath_object, loadingContext):
+        super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext)
         self.arvrunner = arvrunner
-        self.work_api = kwargs["work_api"]
         self.wf_pdh = None
         self.dynamic_resource_req = []
         self.static_resource_req = []
+        self.wf_reffiles = []
+        self.loadingContext = loadingContext
 
-    def job(self, joborder, output_callback, **kwargs):
-        kwargs["work_api"] = self.work_api
+    def job(self, joborder, output_callback, runtimeContext):
         req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
         if req:
             with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
@@ -142,11 +154,10 @@ class ArvadosWorkflow(Workflow):
 
                     packed = pack(document_loader, workflowobj, uri, self.metadata)
 
-                    builder = Builder()
-                    builder.job = joborder
-                    builder.requirements = workflowobj["requirements"]
-                    builder.hints = workflowobj["hints"]
-                    builder.resources = {}
+                    builder = Builder(joborder,
+                                      requirements=workflowobj["requirements"],
+                                      hints=workflowobj["hints"],
+                                      resources={})
 
                     def visit(item):
                         for t in ("hints", "requirements"):
@@ -175,18 +186,22 @@ class ArvadosWorkflow(Workflow):
                         self.static_resource_req = [get_overall_res_req(self.static_resource_req)]
 
                     upload_dependencies(self.arvrunner,
-                                        kwargs.get("name", ""),
+                                        runtimeContext.name,
                                         document_loader,
                                         packed,
                                         uri,
                                         False)
 
+                    # Discover files/directories referenced by the
+                    # workflow (mainly "default" values)
+                    visit_class(packed, ("File", "Directory"), self.wf_reffiles.append)
+
+
             if self.dynamic_resource_req:
-                builder = Builder()
-                builder.job = joborder
-                builder.requirements = self.requirements
-                builder.hints = self.hints
-                builder.resources = {}
+                builder = Builder(joborder,
+                                  requirements=self.requirements,
+                                  hints=self.hints,
+                                  resources={})
 
                 # Evaluate dynamic resource requirements using current builder
                 rs = copy.copy(self.static_resource_req)
@@ -205,12 +220,18 @@ class ArvadosWorkflow(Workflow):
                 joborder_keepmount = copy.deepcopy(joborder)
 
                 reffiles = []
-                visit_class(joborder_keepmount, ("File", "Directory"), lambda x: reffiles.append(x))
+                visit_class(joborder_keepmount, ("File", "Directory"), reffiles.append)
+
+                mapper = ArvPathMapper(self.arvrunner, reffiles+self.wf_reffiles, runtimeContext.basedir,
+                                       "/keep/%s",
+                                       "/keep/%s/%s")
 
-                mapper = ArvPathMapper(self.arvrunner, reffiles, kwargs["basedir"],
-                                 "/keep/%s",
-                                 "/keep/%s/%s",
-                                 **kwargs)
+                # For containers API, we need to make sure any extra
+                # referenced files (ie referenced by the workflow but
+                # not in the inputs) are included in the mounts.
+                if self.wf_reffiles:
+                    runtimeContext = runtimeContext.copy()
+                    runtimeContext.extra_reffiles = copy.deepcopy(self.wf_reffiles)
 
                 def keepmount(obj):
                     remove_redundant_fields(obj)
@@ -247,14 +268,12 @@ class ArvadosWorkflow(Workflow):
                 "outputs": self.tool["outputs"],
                 "stdout": "cwl.output.json",
                 "requirements": self.requirements+job_res_reqs+[
+                    {"class": "InlineJavascriptRequirement"},
                     {
                     "class": "InitialWorkDirRequirement",
                     "listing": [{
                             "entryname": "workflow.cwl",
-                            "entry": {
-                                "class": "File",
-                                "location": "keep:%s/workflow.cwl" % self.wf_pdh
-                            }
+                            "entry": '$({"class": "File", "location": "keep:%s/workflow.cwl"})' % self.wf_pdh
                         }, {
                             "entryname": "cwl.input.yml",
                             "entry": json.dumps(joborder_keepmount, indent=2, sort_keys=True, separators=(',',': ')).replace("\\", "\\\\").replace('$(', '\$(').replace('${', '\${')
@@ -264,8 +283,6 @@ class ArvadosWorkflow(Workflow):
                 "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"],
                 "id": "#"
             })
-            kwargs["loader"] = self.doc_loader
-            kwargs["avsc_names"] = self.doc_schema
-            return ArvadosCommandTool(self.arvrunner, wf_runner, **kwargs).job(joborder_resolved, output_callback, **kwargs)
+            return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)
         else:
-            return super(ArvadosWorkflow, self).job(joborder, output_callback, **kwargs)
+            return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
diff --git a/sdk/cwl/arvados_cwl/context.py b/sdk/cwl/arvados_cwl/context.py
new file mode 100644 (file)
index 0000000..81e256e
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from cwltool.context import LoadingContext, RuntimeContext
+
+class ArvLoadingContext(LoadingContext):
+    def __init__(self, kwargs=None):
+        super(ArvLoadingContext, self).__init__(kwargs)
+
+class ArvRuntimeContext(RuntimeContext):
+    def __init__(self, kwargs=None):
+        self.work_api = None
+        self.extra_reffiles = []
+        self.priority = 500
+        self.enable_reuse = True
+        self.runnerjob = ""
+        self.submit_request_uuid = None
+        self.project_uuid = None
+        self.trash_intermediate = False
+        self.intermediate_output_ttl = 0
+        self.update_workflow = ""
+        self.create_workflow = False
+        self.submit_runner_ram = 0
+        self.ignore_docker_for_reuse = False
+        self.submit = True
+        self.submit_runner_image = None
+        self.wait = True
+        self.cwl_runner_job = None
+        self.storage_classes = "default"
+        self.current_container = None
+
+        super(ArvRuntimeContext, self).__init__(kwargs)
index aaeffea24b22ef0a3dc4a2de3b35e78f4d5d8e46..9f0c91f111b0f547c2bb60f3f9c48faf0bbe0404 100644 (file)
@@ -27,6 +27,7 @@ from cwltool.process import shortname
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, normalizeFilesDirs
 from cwltool.load_tool import load_tool
 from cwltool.errors import WorkflowException
+from arvados_cwl.context import ArvRuntimeContext
 
 from .fsaccess import CollectionFetcher, CollectionFsAccess
 
@@ -97,24 +98,27 @@ def run():
             debug = job_order_object["arv:debug"]
             del job_order_object["arv:debug"]
 
-        runner = arvados_cwl.ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()),
-                                          output_name=output_name, output_tags=output_tags)
+        arvargs = argparse.Namespace()
+        arvargs.work_api = "jobs"
+        arvargs.output_name = output_name
+        arvargs.output_tags = output_tags
+        arvargs.thread_count = 1
+
+        runner = arvados_cwl.ArvCwlRunner(api_client=arvados.safeapi.ThreadSafeApiCache(
+            api_params={"model": OrderedJsonModel()}, keep_params={"num_retries": 4}),
+                                          arvargs=arvargs)
 
         make_fs_access = functools.partial(CollectionFsAccess,
                                  collection_cache=runner.collection_cache)
 
-        t = load_tool(toolpath, runner.arv_make_tool,
-                      fetcher_constructor=functools.partial(CollectionFetcher,
-                                                  api_client=runner.api,
-                                                  fs_access=make_fs_access(""),
-                                                  num_retries=runner.num_retries))
+        t = load_tool(toolpath, runner.loadingContext)
 
         if debug:
             logger.setLevel(logging.DEBUG)
             logging.getLogger('arvados').setLevel(logging.DEBUG)
             logging.getLogger("cwltool").setLevel(logging.DEBUG)
 
-        args = argparse.Namespace()
+        args = ArvRuntimeContext(vars(arvargs))
         args.project_uuid = arvados.current_job()["owner_uuid"]
         args.enable_reuse = enable_reuse
         args.on_error = on_error
@@ -131,8 +135,9 @@ def run():
         args.priority = arvados_cwl.DEFAULT_PRIORITY
         args.do_validate = True
         args.disable_js_validation = False
+        args.tmp_outdir_prefix = "tmp"
 
-        runner.arv_executor(t, job_order_object, **vars(args))
+        runner.arv_executor(t, job_order_object, args, logger=logger)
     except Exception as e:
         if isinstance(e, WorkflowException):
             logging.info("Workflow error %s", e)
index e9254c013845292b68b03be4fc867c0d0db41558..25efade2ab21900dd49dc16978fc9daac536efed 100644 (file)
@@ -55,10 +55,10 @@ def done_outputs(self, record, tmpdir, outdir, keepdir):
 crunchstat_re = re.compile(r"^\d{4}-\d\d-\d\d_\d\d:\d\d:\d\d [a-z0-9]{5}-8i9sb-[a-z0-9]{15} \d+ \d+ stderr crunchstat:")
 timestamp_re = re.compile(r"^(\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d+Z) (.*)")
 
-def logtail(logcollection, logger, header, maxlen=25):
+def logtail(logcollection, logfunc, header, maxlen=25):
     if len(logcollection) == 0:
-        logger.info(header)
-        logger.info("  ** log is empty **")
+        logfunc(header)
+        logfunc("  ** log is empty **")
         return
 
     containersapi = ("crunch-run.txt" in logcollection)
@@ -95,5 +95,5 @@ def logtail(logcollection, logger, header, maxlen=25):
         loglines = mergelogs.values()[0]
 
     logtxt = "\n  ".join(l.strip() for l in loglines)
-    logger.info(header)
-    logger.info("\n  %s", logtxt)
+    logfunc(header)
+    logfunc("\n  %s", logtxt)
index 0b577b06a2e324dbea743244da955f2661a52bea..15689a9010934cf2b8847ec08825cf30bd3e13eb 100644 (file)
@@ -22,6 +22,8 @@ import arvados.collection
 import arvados.arvfile
 import arvados.errors
 
+from googleapiclient.errors import HttpError
+
 from schema_salad.ref_resolver import DefaultFetcher
 
 logger = logging.getLogger('arvados.cwl-runner')
@@ -122,7 +124,13 @@ class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
             return super(CollectionFsAccess, self).open(self._abs(fn), mode)
 
     def exists(self, fn):
-        collection, rest = self.get_collection(fn)
+        try:
+            collection, rest = self.get_collection(fn)
+        except HttpError as err:
+            if err.resp.status == 404:
+                return False
+            else:
+                raise
         if collection is not None:
             if rest:
                 return collection.exists(rest)
diff --git a/sdk/cwl/arvados_cwl/http.py b/sdk/cwl/arvados_cwl/http.py
new file mode 100644 (file)
index 0000000..4516de0
--- /dev/null
@@ -0,0 +1,151 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import requests
+import email.utils
+import time
+import datetime
+import re
+import arvados
+import arvados.collection
+import urlparse
+import logging
+import calendar
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+def my_formatdate(dt):
+    return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
+                                  localtime=False, usegmt=True)
+
+def my_parsedate(text):
+    parsed = email.utils.parsedate_tz(text)
+    if parsed:
+        if parsed[9]:
+            # Adjust to UTC
+            return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
+        else:
+            # TZ is zero or missing, assume UTC.
+            return datetime.datetime(*parsed[:6])
+    else:
+        return datetime.datetime(1970, 1, 1)
+
+def fresh_cache(url, properties, now):
+    pr = properties[url]
+    expires = None
+
+    logger.debug("Checking cache freshness for %s using %s", url, pr)
+
+    if "Cache-Control" in pr:
+        if re.match(r"immutable", pr["Cache-Control"]):
+            return True
+
+        g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
+        if g:
+            expires = my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
+
+    if expires is None and "Expires" in pr:
+        expires = my_parsedate(pr["Expires"])
+
+    if expires is None:
+        # Use a default cache time of 24 hours if upstream didn't set
+        # any cache headers, to reduce redundant downloads.
+        expires = my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
+
+    if not expires:
+        return False
+
+    return (now < expires)
+
+def remember_headers(url, properties, headers, now):
+    properties.setdefault(url, {})
+    for h in ("Cache-Control", "ETag", "Expires", "Date", "Content-Length"):
+        if h in headers:
+            properties[url][h] = headers[h]
+    if "Date" not in headers:
+        properties[url]["Date"] = my_formatdate(now)
+
+
+def changed(url, properties, now):
+    req = requests.head(url, allow_redirects=True)
+    remember_headers(url, properties, req.headers, now)
+
+    if req.status_code != 200:
+        raise Exception("Got status %s" % req.status_code)
+
+    pr = properties[url]
+    if "ETag" in pr and "ETag" in req.headers:
+        if pr["ETag"] == req.headers["ETag"]:
+            return False
+
+    return True
+
+def http_to_keep(api, project_uuid, url, utcnow=datetime.datetime.utcnow):
+    r = api.collections().list(filters=[["properties", "exists", url]]).execute()
+
+    now = utcnow()
+
+    for item in r["items"]:
+        properties = item["properties"]
+        if fresh_cache(url, properties, now):
+            # Do nothing
+            cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+            return "keep:%s/%s" % (item["portable_data_hash"], cr.keys()[0])
+
+        if not changed(url, properties, now):
+            # ETag didn't change, same content, just update headers
+            api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
+            cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+            return "keep:%s/%s" % (item["portable_data_hash"], cr.keys()[0])
+
+    properties = {}
+    req = requests.get(url, stream=True, allow_redirects=True)
+
+    if req.status_code != 200:
+        raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
+
+    remember_headers(url, properties, req.headers, now)
+
+    if "Content-Length" in properties[url]:
+        cl = int(properties[url]["Content-Length"])
+        logger.info("Downloading %s (%s bytes)", url, cl)
+    else:
+        cl = None
+        logger.info("Downloading %s (unknown size)", url)
+
+    c = arvados.collection.Collection()
+
+    if req.headers.get("Content-Disposition"):
+        grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))', req.headers["Content-Disposition"])
+        if grp.group(2):
+            name = grp.group(2)
+        else:
+            name = grp.group(4)
+    else:
+        name = urlparse.urlparse(url).path.split("/")[-1]
+
+    count = 0
+    start = time.time()
+    checkpoint = start
+    with c.open(name, "w") as f:
+        for chunk in req.iter_content(chunk_size=1024):
+            count += len(chunk)
+            f.write(chunk)
+            loopnow = time.time()
+            if (loopnow - checkpoint) > 20:
+                bps = (float(count)/float(loopnow - start))
+                if cl is not None:
+                    logger.info("%2.1f%% complete, %3.2f MiB/s, %1.0f seconds left",
+                                float(count * 100) / float(cl),
+                                bps/(1024*1024),
+                                (cl-count)/bps)
+                else:
+                    logger.info("%d downloaded, %3.2f MiB/s", count, bps/(1024*1024))
+                checkpoint = loopnow
+
+    c.save_new(name="Downloaded from %s" % url, owner_uuid=project_uuid, ensure_unique_name=True)
+
+    api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
+
+    return "keep:%s/%s" % (c.portable_data_hash(), name)
index 998890a31c50acac0513479d0fad9675fd790647..d083b78f5a061906164a5978530af9230e767473 100644 (file)
@@ -8,14 +8,18 @@ import uuid
 import os
 import urllib
 
+from arvados_cwl.util import get_current_container, get_intermediate_collection_info
 import arvados.commands.run
 import arvados.collection
 
 from schema_salad.sourceline import SourceLine
 
+from arvados.errors import ApiError
 from cwltool.pathmapper import PathMapper, MapperEnt, abspath, adjustFileObjs, adjustDirObjs
 from cwltool.workflow import WorkflowException
 
+from .http import http_to_keep
+
 logger = logging.getLogger('arvados.cwl-runner')
 
 def trim_listing(obj):
@@ -40,7 +44,7 @@ class ArvPathMapper(PathMapper):
     pdh_dirpath = re.compile(r'^keep:[0-9a-f]{32}\+\d+(/.*)?$')
 
     def __init__(self, arvrunner, referenced_files, input_basedir,
-                 collection_pattern, file_pattern, name=None, single_collection=False, **kwargs):
+                 collection_pattern, file_pattern, name=None, single_collection=False):
         self.arvrunner = arvrunner
         self.input_basedir = input_basedir
         self.collection_pattern = collection_pattern
@@ -81,6 +85,10 @@ class ArvPathMapper(PathMapper):
                     raise WorkflowException("File literal '%s' is missing `contents`" % src)
                 if srcobj["class"] == "Directory" and "listing" not in srcobj:
                     raise WorkflowException("Directory literal '%s' is missing `listing`" % src)
+            elif src.startswith("http:") or src.startswith("https:"):
+                keepref = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src)
+                logger.info("%s is %s", src, keepref)
+                self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
             else:
                 self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
 
@@ -107,6 +115,7 @@ class ArvPathMapper(PathMapper):
         elif obj["location"].startswith("_:") and "contents" in obj:
             with c.open(path + "/" + obj["basename"], "w") as f:
                 f.write(obj["contents"].encode("utf-8"))
+            remap.append((obj["location"], path + "/" + obj["basename"]))
         else:
             raise SourceLine(obj, "location", WorkflowException).makeError("Don't know what to do with '%s'" % obj["location"])
 
@@ -120,19 +129,6 @@ class ArvPathMapper(PathMapper):
                                                        keep_client=self.arvrunner.keep_client,
                                                        num_retries=self.arvrunner.num_retries)
 
-        already_uploaded = self.arvrunner.get_uploaded()
-        copied_files = set()
-        for k in referenced_files:
-            loc = k["location"]
-            if loc in already_uploaded:
-                v = already_uploaded[loc]
-                self._pathmap[loc] = MapperEnt(v.resolved, self.collection_pattern % urllib.unquote(v.resolved[5:]), v.type, True)
-                if self.single_collection:
-                    basename = k["basename"]
-                    if basename not in collection:
-                        self.addentry({"location": loc, "class": v.type, "basename": basename}, collection, ".", [])
-                        copied_files.add((loc, basename, v.type))
-
         for srcobj in referenced_files:
             self.visit(srcobj, uploadfiles)
 
@@ -143,16 +139,12 @@ class ArvPathMapper(PathMapper):
                                          fnPattern="keep:%s/%s",
                                          name=self.name,
                                          project=self.arvrunner.project_uuid,
-                                         collection=collection)
+                                         collection=collection,
+                                         packed=False)
 
         for src, ab, st in uploadfiles:
             self._pathmap[src] = MapperEnt(urllib.quote(st.fn, "/:+@"), self.collection_pattern % st.fn[5:],
                                            "Directory" if os.path.isdir(ab) else "File", True)
-            self.arvrunner.add_uploaded(src, self._pathmap[src])
-
-        for loc, basename, cls in copied_files:
-            fn = "keep:%s/%s" % (collection.portable_data_hash(), basename)
-            self._pathmap[loc] = MapperEnt(urllib.quote(fn, "/:+@"), self.collection_pattern % fn[5:], cls, True)
 
         for srcobj in referenced_files:
             remap = []
@@ -163,9 +155,14 @@ class ArvPathMapper(PathMapper):
                 for l in srcobj.get("listing", []):
                     self.addentry(l, c, ".", remap)
 
-                check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
-                if not check["items"]:
-                    c.save_new(owner_uuid=self.arvrunner.project_uuid)
+                container = get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)
+
+                c.save_new(name=info["name"],
+                           owner_uuid=self.arvrunner.project_uuid,
+                           ensure_unique_name=True,
+                           trash_at=info["trash_at"],
+                           properties=info["properties"])
 
                 ab = self.collection_pattern % c.portable_data_hash()
                 self._pathmap[srcobj["location"]] = MapperEnt("keep:"+c.portable_data_hash(), ab, "Directory", True)
@@ -177,9 +174,14 @@ class ArvPathMapper(PathMapper):
                                                   num_retries=self.arvrunner.num_retries                                                  )
                 self.addentry(srcobj, c, ".", remap)
 
-                check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
-                if not check["items"]:
-                    c.save_new(owner_uuid=self.arvrunner.project_uuid)
+                container = get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)
+
+                c.save_new(name=info["name"],
+                           owner_uuid=self.arvrunner.project_uuid,
+                           ensure_unique_name=True,
+                           trash_at=info["trash_at"],
+                           properties=info["properties"])
 
                 ab = self.file_pattern % (c.portable_data_hash(), srcobj["basename"])
                 self._pathmap[srcobj["location"]] = MapperEnt("keep:%s/%s" % (c.portable_data_hash(), srcobj["basename"]),
@@ -212,6 +214,7 @@ class ArvPathMapper(PathMapper):
         else:
             return None
 
+
 class StagingPathMapper(PathMapper):
     _follow_dirs = True
 
index 053c99502bf06e0c5829b67a4563bfd7544b8c1a..3ad1aa6a704632a945b2ed059c10f40a87cdb578 100644 (file)
@@ -7,15 +7,16 @@ import urlparse
 from functools import partial
 import logging
 import json
-import subprocess
+import subprocess32 as subprocess
+from collections import namedtuple
 
 from StringIO import StringIO
 
-from schema_salad.sourceline import SourceLine
+from schema_salad.sourceline import SourceLine, cmap
 
 from cwltool.command_line_tool import CommandLineTool
 import cwltool.workflow
-from cwltool.process import get_feature, scandeps, UnsupportedRequirement, normalizeFilesDirs, shortname
+from cwltool.process import scandeps, UnsupportedRequirement, normalizeFilesDirs, shortname
 from cwltool.load_tool import fetch_document
 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
 from cwltool.utils import aslist
@@ -45,11 +46,13 @@ def trim_anonymous_location(obj):
     if obj.get("location", "").startswith("_:"):
         del obj["location"]
 
+
 def remove_redundant_fields(obj):
     for field in ("path", "nameext", "nameroot", "dirname"):
         if field in obj:
             del obj[field]
 
+
 def find_defaults(d, op):
     if isinstance(d, list):
         for i in d:
@@ -61,8 +64,25 @@ def find_defaults(d, op):
             for i in d.itervalues():
                 find_defaults(i, op)
 
+def setSecondary(t, fileobj, discovered):
+    if isinstance(fileobj, dict) and fileobj.get("class") == "File":
+        if "secondaryFiles" not in fileobj:
+            fileobj["secondaryFiles"] = cmap([{"location": substitute(fileobj["location"], sf), "class": "File"} for sf in t["secondaryFiles"]])
+            if discovered is not None:
+                discovered[fileobj["location"]] = fileobj["secondaryFiles"]
+    elif isinstance(fileobj, list):
+        for e in fileobj:
+            setSecondary(t, e, discovered)
+
+def discover_secondary_files(inputs, job_order, discovered=None):
+    for t in inputs:
+        if shortname(t["id"]) in job_order and t.get("secondaryFiles"):
+            setSecondary(t, job_order[shortname(t["id"])], discovered)
+
+
 def upload_dependencies(arvrunner, name, document_loader,
-                        workflowobj, uri, loadref_run, include_primary=True):
+                        workflowobj, uri, loadref_run,
+                        include_primary=True, discovered_secondaryfiles=None):
     """Upload the dependencies of the workflowobj document to Keep.
 
     Returns a pathmapper object mapping local paths to keep references.  Also
@@ -102,11 +122,18 @@ def upload_dependencies(arvrunner, name, document_loader,
         # that external references in $include and $mixin are captured.
         scanobj = loadref("", workflowobj["id"])
 
-    sc = scandeps(uri, scanobj,
+    sc_result = scandeps(uri, scanobj,
                   loadref_fields,
                   set(("$include", "$schemas", "location")),
                   loadref, urljoin=document_loader.fetcher.urljoin)
 
+    sc = []
+    def only_real(obj):
+        if obj.get("location", "").startswith("file:"):
+            sc.append(obj)
+
+    visit_class(sc_result, ("File", "Directory"), only_real)
+
     normalizeFilesDirs(sc)
 
     if include_primary and "id" in workflowobj:
@@ -116,22 +143,33 @@ def upload_dependencies(arvrunner, name, document_loader,
         for s in workflowobj["$schemas"]:
             sc.append({"class": "File", "location": s})
 
-    def capture_default(obj):
+    def visit_default(obj):
         remove = [False]
-        def add_default(f):
+        def ensure_default_location(f):
             if "location" not in f and "path" in f:
                 f["location"] = f["path"]
                 del f["path"]
             if "location" in f and not arvrunner.fs_access.exists(f["location"]):
-                # Remove from sc
+                # Doesn't exist, remove from list of dependencies to upload
                 sc[:] = [x for x in sc if x["location"] != f["location"]]
                 # Delete "default" from workflowobj
                 remove[0] = True
-        visit_class(obj["default"], ("File", "Directory"), add_default)
+        visit_class(obj["default"], ("File", "Directory"), ensure_default_location)
         if remove[0]:
             del obj["default"]
 
-    find_defaults(workflowobj, capture_default)
+    find_defaults(workflowobj, visit_default)
+
+    discovered = {}
+    def discover_default_secondary_files(obj):
+        discover_secondary_files(obj["inputs"],
+                                 {shortname(t["id"]): t["default"] for t in obj["inputs"] if "default" in t},
+                                 discovered)
+
+    visit_class(workflowobj, ("CommandLineTool", "Workflow"), discover_default_secondary_files)
+
+    for d in discovered:
+        sc.extend(discovered[d])
 
     mapper = ArvPathMapper(arvrunner, sc, "",
                            "keep:%s",
@@ -142,8 +180,13 @@ def upload_dependencies(arvrunner, name, document_loader,
     def setloc(p):
         if "location" in p and (not p["location"].startswith("_:")) and (not p["location"].startswith("keep:")):
             p["location"] = mapper.mapper(p["location"]).resolved
-    adjustFileObjs(workflowobj, setloc)
-    adjustDirObjs(workflowobj, setloc)
+
+    visit_class(workflowobj, ("File", "Directory"), setloc)
+    visit_class(discovered, ("File", "Directory"), setloc)
+
+    if discovered_secondaryfiles is not None:
+        for d in discovered:
+            discovered_secondaryfiles[mapper.mapper(d).resolved] = discovered[d]
 
     if "$schemas" in workflowobj:
         sch = []
@@ -158,7 +201,7 @@ def upload_docker(arvrunner, tool):
     """Uploads Docker images used in CommandLineTool objects."""
 
     if isinstance(tool, CommandLineTool):
-        (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement")
+        (docker_req, docker_is_req) = tool.get_requirement("DockerRequirement")
         if docker_req:
             if docker_req.get("dockerOutputDirectory") and arvrunner.work_api != "containers":
                 # TODO: can be supported by containers API, but not jobs API.
@@ -171,6 +214,7 @@ def upload_docker(arvrunner, tool):
         for s in tool.steps:
             upload_docker(arvrunner, s.embedded_tool)
 
+
 def packed_workflow(arvrunner, tool, merged_map):
     """Create a packed workflow.
 
@@ -180,16 +224,18 @@ def packed_workflow(arvrunner, tool, merged_map):
     packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
                   tool.tool["id"], tool.metadata, rewrite_out=rewrites)
 
-    rewrite_to_orig = {}
-    for k,v in rewrites.items():
-        rewrite_to_orig[v] = k
+    rewrite_to_orig = {v: k for k,v in rewrites.items()}
 
     def visit(v, cur_id):
         if isinstance(v, dict):
             if v.get("class") in ("CommandLineTool", "Workflow"):
+                if "id" not in v:
+                    raise SourceLine(v, None, Exception).makeError("Embedded process object is missing required 'id' field")
                 cur_id = rewrite_to_orig.get(v["id"], v["id"])
             if "location" in v and not v["location"].startswith("keep:"):
-                v["location"] = merged_map[cur_id][v["location"]]
+                v["location"] = merged_map[cur_id].resolved[v["location"]]
+            if "location" in v and v["location"] in merged_map[cur_id].secondaryFiles:
+                v["secondaryFiles"] = merged_map[cur_id].secondaryFiles[v["location"]]
             for l in v:
                 visit(v[l], cur_id)
         if isinstance(v, list):
@@ -198,6 +244,7 @@ def packed_workflow(arvrunner, tool, merged_map):
     visit(packed, None)
     return packed
 
+
 def tag_git_version(packed):
     if tool.tool["id"].startswith("file://"):
         path = os.path.dirname(tool.tool["id"][7:])
@@ -209,20 +256,6 @@ def tag_git_version(packed):
             packed["http://schema.org/version"] = githash
 
 
-def discover_secondary_files(inputs, job_order):
-    for t in inputs:
-        def setSecondary(fileobj):
-            if isinstance(fileobj, dict) and fileobj.get("class") == "File":
-                if "secondaryFiles" not in fileobj:
-                    fileobj["secondaryFiles"] = [{"location": substitute(fileobj["location"], sf), "class": "File"} for sf in t["secondaryFiles"]]
-
-            if isinstance(fileobj, list):
-                for e in fileobj:
-                    setSecondary(e)
-
-        if shortname(t["id"]) in job_order and t.get("secondaryFiles"):
-            setSecondary(job_order[shortname(t["id"])])
-
 def upload_job_order(arvrunner, name, tool, job_order):
     """Upload local files referenced in the input object and return updated input
     object with 'location' updated to the proper keep references.
@@ -247,6 +280,8 @@ def upload_job_order(arvrunner, name, tool, job_order):
 
     return job_order
 
+FileUpdates = namedtuple("FileUpdates", ["resolved", "secondaryFiles"])
+
 def upload_workflow_deps(arvrunner, tool):
     # Ensure that Docker images needed by this workflow are available
 
@@ -258,18 +293,20 @@ def upload_workflow_deps(arvrunner, tool):
 
     def upload_tool_deps(deptool):
         if "id" in deptool:
+            discovered_secondaryfiles = {}
             pm = upload_dependencies(arvrunner,
-                                "%s dependencies" % (shortname(deptool["id"])),
-                                document_loader,
-                                deptool,
-                                deptool["id"],
-                                False,
-                                include_primary=False)
+                                     "%s dependencies" % (shortname(deptool["id"])),
+                                     document_loader,
+                                     deptool,
+                                     deptool["id"],
+                                     False,
+                                     include_primary=False,
+                                     discovered_secondaryfiles=discovered_secondaryfiles)
             document_loader.idx[deptool["id"]] = deptool
             toolmap = {}
             for k,v in pm.items():
                 toolmap[k] = v.resolved
-            merged_map[deptool["id"]] = toolmap
+            merged_map[deptool["id"]] = FileUpdates(toolmap, discovered_secondaryfiles)
 
     tool.visit(upload_tool_deps)
 
@@ -316,8 +353,8 @@ class Runner(object):
     def __init__(self, runner, tool, job_order, enable_reuse,
                  output_name, output_tags, submit_runner_ram=0,
                  name=None, on_error=None, submit_runner_image=None,
-                 intermediate_output_ttl=0, merged_map=None, priority=None,
-                 secret_store=None):
+                 intermediate_output_ttl=0, merged_map=None,
+                 priority=None, secret_store=None):
         self.arvrunner = runner
         self.tool = tool
         self.job_order = job_order
@@ -325,7 +362,7 @@ class Runner(object):
         if enable_reuse:
             # If reuse is permitted by command line arguments but
             # disabled by the workflow itself, disable it.
-            reuse_req, _ = get_feature(self.tool, "http://arvados.org/cwl#ReuseRequirement")
+            reuse_req, _ = self.tool.get_requirement("http://arvados.org/cwl#ReuseRequirement")
             if reuse_req:
                 enable_reuse = reuse_req["enableReuse"]
         self.enable_reuse = enable_reuse
@@ -340,13 +377,25 @@ class Runner(object):
         self.priority = priority
         self.secret_store = secret_store
 
+        self.submit_runner_cores = 1
+        self.submit_runner_ram = 1024  # defaut 1 GiB
+
+        runner_resource_req, _ = self.tool.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+        if runner_resource_req:
+            if runner_resource_req.get("coresMin"):
+                self.submit_runner_cores = runner_resource_req["coresMin"]
+            if runner_resource_req.get("ramMin"):
+                self.submit_runner_ram = runner_resource_req["ramMin"]
+
         if submit_runner_ram:
+            # Command line / initializer overrides default and/or spec from workflow
             self.submit_runner_ram = submit_runner_ram
-        else:
-            self.submit_runner_ram = 3000
 
         if self.submit_runner_ram <= 0:
-            raise Exception("Value of --submit-runner-ram must be greater than zero")
+            raise Exception("Value of submit-runner-ram must be greater than zero")
+
+        if self.submit_runner_cores <= 0:
+            raise Exception("Value of submit-runner-cores must be greater than zero")
 
         self.merged_map = merged_map or {}
 
@@ -377,7 +426,7 @@ class Runner(object):
                                                            api_client=self.arvrunner.api,
                                                            keep_client=self.arvrunner.keep_client,
                                                            num_retries=self.arvrunner.num_retries)
-                done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self), maxlen=40)
+                done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
 
             self.final_output = record["output"]
             outc = arvados.collection.CollectionReader(self.final_output,
@@ -399,6 +448,3 @@ class Runner(object):
             self.arvrunner.output_callback({}, "permanentFail")
         else:
             self.arvrunner.output_callback(outputs, processStatus)
-        finally:
-            if record["uuid"] in self.arvrunner.processes:
-                del self.arvrunner.processes[record["uuid"]]
diff --git a/sdk/cwl/arvados_cwl/task_queue.py b/sdk/cwl/arvados_cwl/task_queue.py
new file mode 100644 (file)
index 0000000..b9fd098
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import Queue
+import threading
+import logging
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+class TaskQueue(object):
+    def __init__(self, lock, thread_count):
+        self.thread_count = thread_count
+        self.task_queue = Queue.Queue()
+        self.task_queue_threads = []
+        self.lock = lock
+        self.in_flight = 0
+        self.error = None
+
+        for r in xrange(0, self.thread_count):
+            t = threading.Thread(target=self.task_queue_func)
+            self.task_queue_threads.append(t)
+            t.start()
+
+    def task_queue_func(self):
+
+            while True:
+                task = self.task_queue.get()
+                if task is None:
+                    return
+                try:
+                    task()
+                except Exception as e:
+                    logger.exception("Unhandled exception running task")
+                    self.error = e
+
+                with self.lock:
+                    self.in_flight -= 1
+
+    def add(self, task):
+        with self.lock:
+            if self.thread_count > 1:
+                self.in_flight += 1
+                self.task_queue.put(task)
+            else:
+                task()
+
+    def drain(self):
+        try:
+            # Drain queue
+            while not self.task_queue.empty():
+                self.task_queue.get(True, .1)
+        except Queue.Empty:
+            pass
+
+    def join(self):
+        for t in self.task_queue_threads:
+            self.task_queue.put(None)
+        for t in self.task_queue_threads:
+            t.join()
diff --git a/sdk/cwl/arvados_cwl/util.py b/sdk/cwl/arvados_cwl/util.py
new file mode 100644 (file)
index 0000000..98a2a89
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import datetime
+from arvados.errors import ApiError
+
+def get_intermediate_collection_info(workflow_step_name, current_container, intermediate_output_ttl):
+        if workflow_step_name:
+            name = "Intermediate collection for step %s" % (workflow_step_name)
+        else:
+            name = "Intermediate collection"
+        trash_time = None
+        if intermediate_output_ttl > 0:
+            trash_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=intermediate_output_ttl)
+        container_uuid = None
+        if current_container:
+            container_uuid = current_container['uuid']
+        props = {"type": "intermediate", "container": container_uuid}
+
+        return {"name" : name, "trash_at" : trash_time, "properties" : props}
+
+def get_current_container(api, num_retries=0, logger=None):
+    current_container = None
+    try:
+        current_container = api.containers().current().execute(num_retries=num_retries)
+    except ApiError as e:
+        # Status code 404 just means we're not running in a container.
+        if e.resp.status != 404 and logger:
+            logger.info("Getting current container: %s", e)
+    return current_container
index a24d53dad6a629f9d08692bb19dd62e144655a7b..88cf1ed7caa1da04fd5a1794c616cd5a0f2039b3 100644 (file)
@@ -7,15 +7,30 @@ import time
 import os
 import re
 
+SETUP_DIR = os.path.dirname(__file__) or '.'
+
 def git_latest_tag():
     gitinfo = subprocess.check_output(
         ['git', 'describe', '--abbrev=0']).strip()
     return str(gitinfo.decode('utf-8'))
 
+def choose_version_from():
+    sdk_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
+    cwl_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', SETUP_DIR]).strip()
+    if int(sdk_ts) > int(cwl_ts):
+        getver = os.path.join(SETUP_DIR, "../python")
+    else:
+        getver = SETUP_DIR
+    return getver
+
 def git_timestamp_tag():
     gitinfo = subprocess.check_output(
         ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', '.']).strip()
+         '--format=format:%ct', choose_version_from()]).strip()
     return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
 
 def save_version(setup_dir, module, v):
@@ -34,7 +49,7 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
-        except subprocess.CalledProcessError:
+        except (subprocess.CalledProcessError, OSError):
             pass
 
     return read_version(setup_dir, module)
index 7893aa97fa2261aece6a14f49230834122c674fe..0cab074d9a8a9755f941c6a59e226d4bd9d1e5f3 100644 (file)
@@ -33,18 +33,22 @@ setup(name='arvados-cwl-runner',
       # Note that arvados/build/run-build-packages.sh looks at this
       # file to determine what version of cwltool and schema-salad to build.
       install_requires=[
-          'cwltool==1.0.20180403145700',
-          'schema-salad==2.6.20171201034858',
-          'typing==3.5.3.0',
-          'ruamel.yaml==0.13.7',
-          'arvados-python-client>=0.1.20170526013812',
+          'cwltool==1.0.20180615183820',
+          'schema-salad==2.7.20180501211602',
+          'typing >= 3.5.3',
+          'ruamel.yaml >=0.13.11, <0.15',
+          'arvados-python-client>=1.1.4.20180607143841',
           'setuptools',
-          'ciso8601 >=1.0.0, <=1.0.4',
+          'ciso8601 >=1.0.6, <2.0.0',
+          'subprocess32>=3.5.1',
       ],
       data_files=[
           ('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
       ],
       test_suite='tests',
-      tests_require=['mock>=1.0'],
+      tests_require=[
+          'mock>=1.0',
+          'subprocess32>=3.5.1',
+      ],
       zip_safe=True
       )
index ddc7ff9588c67d396b28226d344a493a7a281200..a7445449af6030e7afee4bdb524ac55afc90b8ec 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: ExpressionTool
 requirements:
index 5c5571ab264097ad6ca1cee8196b00ccf92da22e..60c765788c3fad8233b5be760335407656ca13b4 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 dir:
   class: Directory
   location: samples
\ No newline at end of file
index 8c28cc2215b5d3eac21b94ad31292595dae25f43..e4730cfc76982e142cae4a9b52369e65be135af8 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: CommandLineTool
 requirements:
index 3f1e8902cc5a00488315d13cdb81a40f90000bb1..343df0bbda5c40f945f203a37d032ac154cde75a 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 requirements:
index 6c9e7f760c05c2fbeb67ae98573128709b3b0dd0..f5e5e702285e1ee545048a8b0f2ea54a61f645cf 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 {
    "cwlVersion": "v1.0",
       "arguments": [
index d3c1e90637d5419320b0115b386780d6321d9975..4869e3e524153af30feb6a654e65e2cac6c57f3f 100755 (executable)
@@ -9,4 +9,7 @@ fi
 if ! arv-get f225e6259bdd63bc7240599648dde9f1+97 > /dev/null ; then
     arv-put --portable-data-hash hg19/*
 fi
+if ! arv-get 4d8a70b1e63b2aad6984e40e338e2373+69 > /dev/null ; then
+    arv-put --portable-data-hash secondaryFiles/hello.txt*
+fi
 exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
index 87db44b094f9c234280c7c7e37bc5be5e9d5d313..8eac71886cbf643ca97db1e033b9ba2808b40137 100644 (file)
   }
   tool: wf/secret_wf.cwl
   doc: "Test secret input parameters"
+  tags: [ secrets ]
 
 - job: null
   output:
     out: null
   tool: wf/runin-reqs-wf4.cwl
   doc: "RunInSingleContainer discovers static resource request in subworkflow steps"
+
+- job: secondaryFiles/inp3.yml
+  output: {}
+  tool: secondaryFiles/example1.cwl
+  doc: Discover secondaryFiles at runtime if they are in keep
+
+- job: null
+  output: {}
+  tool: secondaryFiles/example3.cwl
+  doc: Discover secondaryFiles on default values
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf1.cwl
+  doc: "Can have separate default parameters including directory and file inside same directory"
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf2.cwl
+  doc: "Can have a parameter default value that is a directory literal with a file literal"
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf3.cwl
+  doc: "Do not accept a directory literal without a basename"
+  should_fail: true
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf4.cwl
+  doc: default in embedded subworkflow missing 'id' field
+  should_fail: true
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf5.cwl
+  doc: default in embedded subworkflow
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf6.cwl
+  doc: default in RunInSingleContainer step
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf7.cwl
+  doc: workflow level default in RunInSingleContainer
index 55944de21bb0e593e2acce192f36b725f3994216..9bf1c20aabc6591a4b1d00282e9c871456fca219 100644 (file)
@@ -2,48 +2,84 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
-$graph:
-- class: Workflow
-  inputs: []
-  outputs: []
-  steps:
-  - in: []
-    out: []
-    run: '#step1.cwl'
-    id: '#main/step1'
-  - in: []
-    out: []
-    run: '#step2.cwl'
-    id: '#main/step2'
-  id: '#main'
-- class: CommandLineTool
-  inputs:
-  - type: File
-    default:
-      class: File
-      location: keep:b9fca8bf06b170b8507b80b2564ee72b+57/a.txt
-    id: '#step1.cwl/a'
-  - type: File
-    default:
-      class: File
-      location: keep:b9fca8bf06b170b8507b80b2564ee72b+57/b.txt
-    id: '#step1.cwl/b'
-  outputs: []
-  arguments: [echo, $(inputs.a), $(inputs.b)]
-  id: '#step1.cwl'
-- class: CommandLineTool
-  inputs:
-  - type: File
-    default:
-      class: File
-      location: keep:8e2d09a066d96cdffdd2be41579e4e2e+57/b.txt
-    id: '#step2.cwl/b'
-  - type: File
-    default:
-      class: File
-      location: keep:8e2d09a066d96cdffdd2be41579e4e2e+57/c.txt
-    id: '#step2.cwl/c'
-  outputs: []
-  arguments: [echo, $(inputs.c), $(inputs.b)]
-  id: '#step2.cwl'
+{
+    "$graph": [
+        {
+            "class": "Workflow",
+            "id": "#main",
+            "inputs": [],
+            "outputs": [],
+            "steps": [
+                {
+                    "id": "#main/step1",
+                    "in": [],
+                    "out": [],
+                    "run": "#step1.cwl"
+                },
+                {
+                    "id": "#main/step2",
+                    "in": [],
+                    "out": [],
+                    "run": "#step2.cwl"
+                }
+            ]
+        },
+        {
+            "arguments": [
+                "echo",
+                "$(inputs.a)",
+                "$(inputs.b)"
+            ],
+            "class": "CommandLineTool",
+            "id": "#step1.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:b9fca8bf06b170b8507b80b2564ee72b+57/a.txt"
+                    },
+                    "id": "#step1.cwl/a",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:b9fca8bf06b170b8507b80b2564ee72b+57/b.txt"
+                    },
+                    "id": "#step1.cwl/b",
+                    "type": "File"
+                }
+            ],
+            "outputs": []
+        },
+        {
+            "arguments": [
+                "echo",
+                "$(inputs.c)",
+                "$(inputs.b)"
+            ],
+            "class": "CommandLineTool",
+            "id": "#step2.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:8e2d09a066d96cdffdd2be41579e4e2e+57/b.txt"
+                    },
+                    "id": "#step2.cwl/b",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:8e2d09a066d96cdffdd2be41579e4e2e+57/c.txt"
+                    },
+                    "id": "#step2.cwl/c",
+                    "type": "File"
+                }
+            ],
+            "outputs": []
+        }
+    ],
+    "cwlVersion": "v1.0"
+}
\ No newline at end of file
diff --git a/sdk/cwl/tests/makes_intermediates/echo.cwl b/sdk/cwl/tests/makes_intermediates/echo.cwl
new file mode 100644 (file)
index 0000000..5449bc3
--- /dev/null
@@ -0,0 +1,14 @@
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - $(inputs.inp1)
+      - $(inputs.inp2)
+      - $(inputs.inp3)
+inputs:
+  inp1: File
+  inp2: [File, Directory]
+  inp3: Directory
+outputs: []
+arguments: [echo, $(inputs.inp1), $(inputs.inp2), $(inputs.inp3)]
diff --git a/sdk/cwl/tests/makes_intermediates/hello1.txt b/sdk/cwl/tests/makes_intermediates/hello1.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/makes_intermediates/run_in_single.cwl b/sdk/cwl/tests/makes_intermediates/run_in_single.cwl
new file mode 100644 (file)
index 0000000..bb596b2
--- /dev/null
@@ -0,0 +1,38 @@
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+inputs:
+  inp1:
+    type: File
+    default:
+      class: File
+      location: hello1.txt
+  inp2:
+    type: [File, Directory]
+    default:
+      class: File
+      basename: "hello2.txt"
+      contents: "Hello world"
+  inp3:
+    type: [File, Directory]
+    default:
+      class: Directory
+      basename: inp3
+      listing:
+        - class: File
+          basename: "hello3.txt"
+          contents: "hello world"
+outputs: []
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in:
+      inp1: inp1
+      inp2: inp2
+      inp3: inp3
+    out: []
+    run: subwf.cwl
diff --git a/sdk/cwl/tests/makes_intermediates/subwf.cwl b/sdk/cwl/tests/makes_intermediates/subwf.cwl
new file mode 100644 (file)
index 0000000..1852ab4
--- /dev/null
@@ -0,0 +1,15 @@
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp1: File
+  inp2: File
+  inp3: Directory
+outputs: []
+steps:
+  step1:
+    in:
+      inp1: inp1
+      inp2: inp2
+      inp3: inp3
+    out: []
+    run: echo.cwl
index b37990aa9d78fb5f2efbbf93f9996dc013aa9cb0..6c49757bbe630625dcc84e01589bd917338d83e7 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: CommandLineTool
 inputs:
index 5d2c699022cfc4262b6f55e55551e726a7bc8590..19e4077e8a1e0965d331db2b958d6690e6c6e2e1 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 $namespaces:
index 8b9dd83031aa530340026b8be623db52442bf421..7eb6bcee6de4e01615be0699901dd539f8ccafe2 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 i:
   class: File
   location: keep:f225e6259bdd63bc7240599648dde9f1+97/hg19.fa
index 248aefd2c6ef9ad482f22be3f5c771a73b1e45b7..5539562070ff2c226b8188f66df8a452af7f59a8 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 $namespaces:
diff --git a/sdk/cwl/tests/secondaryFiles/example1.cwl b/sdk/cwl/tests/secondaryFiles/example1.cwl
new file mode 100644 (file)
index 0000000..20847d4
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  toplevel_input: File
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
diff --git a/sdk/cwl/tests/secondaryFiles/example3.cwl b/sdk/cwl/tests/secondaryFiles/example3.cwl
new file mode 100644 (file)
index 0000000..29f58f0
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    default:
+      class: File
+      location: hello.txt
+outputs: []
+baseCommand: echo
diff --git a/sdk/cwl/tests/secondaryFiles/hello.txt b/sdk/cwl/tests/secondaryFiles/hello.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondaryFiles/hello.txt.idx b/sdk/cwl/tests/secondaryFiles/hello.txt.idx
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondaryFiles/inp3.yml b/sdk/cwl/tests/secondaryFiles/inp3.yml
new file mode 100644 (file)
index 0000000..2e61ee3
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+toplevel_input:
+  class: File
+  location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
\ No newline at end of file
index 883d24e857342d98a04ff59ba9d3fe80978d78f2..254ed91b81abaf5de60d6b90de4613f3c10d35ed 100644 (file)
@@ -1 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 pw: blorp
index 522946a4f49ee2acd68588c6100d45bcb097cbe8..ae234414a3df90888cfbe9028c06aa5efbba9f55 100644 (file)
@@ -3,6 +3,7 @@
 # SPDX-License-Identifier: Apache-2.0
 
 import arvados_cwl
+import arvados_cwl.context
 from arvados_cwl.arvdocker import arv_docker_clear_cache
 import logging
 import mock
@@ -20,9 +21,30 @@ if not os.getenv('ARVADOS_DEBUG'):
     logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
     logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
 
-
 class TestContainer(unittest.TestCase):
 
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": Loader({}),
+             "metadata": {"cwlVersion": "v1.0"}})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "containers",
+             "basedir": "",
+             "name": "test_run_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "tmpdir": "/tmp",
+             "enable_reuse": enable_reuse,
+             "priority": 500})
+
+        return loadingContext, runtimeContext
+
     # The test passes no builder.resources
     # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
     @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
@@ -40,8 +62,6 @@ class TestContainer(unittest.TestCase):
             runner.api.collections().get().execute.return_value = {
                 "portable_data_hash": "99999999999999999999999999999993+99"}
 
-            document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
-
             tool = cmap({
                 "inputs": [],
                 "outputs": [],
@@ -50,14 +70,14 @@ class TestContainer(unittest.TestCase):
                 "id": "#",
                 "class": "CommandLineTool"
             })
-            make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
-                                                     basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+
+            loadingContext, runtimeContext = self.helper(runner, enable_reuse)
+
+            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
             arvtool.formatgraph = None
-            for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_"+str(enable_reuse),
-                                 make_fs_access=make_fs_access, tmpdir="/tmp"):
-                j.run(enable_reuse=enable_reuse, priority=500)
+
+            for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+                j.run(runtimeContext)
                 runner.api.container_requests().create.assert_called_with(
                     body=JsonDiffMatcher({
                         'environment': {
@@ -79,6 +99,7 @@ class TestContainer(unittest.TestCase):
                                                "capacity": 1073741824 }
                         },
                         'state': 'Committed',
+                        'output_name': 'Output for step test_run_'+str(enable_reuse),
                         'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
                         'output_path': '/var/spool/cwl',
                         'output_ttl': 0,
@@ -101,8 +122,6 @@ class TestContainer(unittest.TestCase):
         runner.intermediate_output_ttl = 3600
         runner.secret_store = cwltool.secrets.SecretStore()
 
-        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
-
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
             "portable_data_hash": "99999999999999999999999999999993+99"}
@@ -135,15 +154,14 @@ class TestContainer(unittest.TestCase):
             "id": "#",
             "class": "CommandLineTool"
         })
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers",
-                                                 avsc_names=avsc_names, make_fs_access=make_fs_access,
-                                                 loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_resource_requirements"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_resource_requirements",
-                             make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(enable_reuse=True, priority=500)
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
 
         call_args, call_kwargs = runner.api.container_requests().create.call_args
 
@@ -168,6 +186,7 @@ class TestContainer(unittest.TestCase):
                                    "capacity": 5242880000 }
             },
             'state': 'Committed',
+            'output_name': 'Output for step test_resource_requirements',
             'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
             'output_path': '/var/spool/cwl',
             'output_ttl': 7200,
@@ -199,8 +218,6 @@ class TestContainer(unittest.TestCase):
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
 
-        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
-
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
             "portable_data_hash": "99999999999999999999999999999993+99"}
@@ -247,15 +264,14 @@ class TestContainer(unittest.TestCase):
             "id": "#",
             "class": "CommandLineTool"
         })
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers",
-                                                 avsc_names=avsc_names, make_fs_access=make_fs_access,
-                                                 loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_initial_work_dir"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_initial_work_dir",
-                             make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(priority=500)
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
 
         call_args, call_kwargs = runner.api.container_requests().create.call_args
 
@@ -303,6 +319,7 @@ class TestContainer(unittest.TestCase):
                 }
             },
             'state': 'Committed',
+            'output_name': 'Output for step test_initial_work_dir',
             'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
             'output_path': '/var/spool/cwl',
             'output_ttl': 0,
@@ -349,14 +366,14 @@ class TestContainer(unittest.TestCase):
             "id": "#",
             "class": "CommandLineTool"
         })
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
-                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_run_redirect"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_redirect",
-                             make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(priority=500)
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
             runner.api.container_requests().create.assert_called_with(
                 body=JsonDiffMatcher({
                     'environment': {
@@ -390,6 +407,7 @@ class TestContainer(unittest.TestCase):
                         },
                     },
                     'state': 'Committed',
+                    "output_name": "Output for step test_run_redirect",
                     'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
                     'output_path': '/var/spool/cwl',
                     'output_ttl': 0,
@@ -419,9 +437,13 @@ class TestContainer(unittest.TestCase):
 
         col().open.return_value = []
 
-        arvjob = arvados_cwl.ArvadosContainer(runner)
-        arvjob.name = "testjob"
-        arvjob.builder = mock.MagicMock()
+        arvjob = arvados_cwl.ArvadosContainer(runner,
+                                              mock.MagicMock(),
+                                              {},
+                                              None,
+                                              [],
+                                              [],
+                                              "testjob")
         arvjob.output_callback = mock.MagicMock()
         arvjob.collect_outputs = mock.MagicMock()
         arvjob.successCodes = [0]
@@ -474,10 +496,11 @@ class TestContainer(unittest.TestCase):
             "id": "#",
             "class": "CommandLineTool"
         })
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                     collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
-                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_run_mounts"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
         job_order = {
             "p1": {
@@ -495,9 +518,8 @@ class TestContainer(unittest.TestCase):
                 ]
             }
         }
-        for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_run_mounts",
-                             make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(priority=500)
+        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
             runner.api.container_requests().create.assert_called_with(
                 body=JsonDiffMatcher({
                     'environment': {
@@ -522,6 +544,7 @@ class TestContainer(unittest.TestCase):
                                            "capacity": 1073741824 }
                     },
                     'state': 'Committed',
+                    'output_name': 'Output for step test_run_mounts',
                     'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
                     'output_path': '/var/spool/cwl',
                     'output_ttl': 0,
@@ -581,18 +604,18 @@ class TestContainer(unittest.TestCase):
                              ]
                          }
                      ]})
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                     collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
-                                                 basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_secrets"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
 
         job_order = {"pw": "blorp"}
         runner.secret_store.store(["pw"], job_order)
 
-        for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_secrets",
-                             make_fs_access=make_fs_access, tmpdir="/tmp"):
-            j.run(enable_reuse=True, priority=500)
+        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
             runner.api.container_requests().create.assert_called_with(
                 body=JsonDiffMatcher({
                     'environment': {
@@ -614,6 +637,7 @@ class TestContainer(unittest.TestCase):
                                            "capacity": 1073741824 }
                     },
                     'state': 'Committed',
+                    'output_name': 'Output for step test_secrets',
                     'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
                     'output_path': '/var/spool/cwl',
                     'output_ttl': 0,
@@ -629,3 +653,46 @@ class TestContainer(unittest.TestCase):
                         }
                     }
                 }))
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_timelimit(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+            "id": "#",
+            "class": "CommandLineTool",
+            "hints": [
+                {
+                    "class": "http://commonwl.org/cwltool#TimeLimit",
+                    "timelimit": 42
+                }
+            ]
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_timelimit"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+
+        _, kwargs = runner.api.container_requests().create.call_args
+        self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))
diff --git a/sdk/cwl/tests/test_http.py b/sdk/cwl/tests/test_http.py
new file mode 100644 (file)
index 0000000..0c66c39
--- /dev/null
@@ -0,0 +1,286 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import copy
+import cStringIO
+import functools
+import hashlib
+import json
+import logging
+import mock
+import sys
+import unittest
+import datetime
+
+import arvados
+import arvados.collection
+import arvados_cwl
+import arvados_cwl.runner
+import arvados.keep
+
+from .matcher import JsonDiffMatcher, StripYAMLComments
+from .mock_discovery import get_rootDesc
+
+import arvados_cwl.http
+
+import ruamel.yaml as yaml
+
+
+class TestHttpToKeep(unittest.TestCase):
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_get(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": []
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "w")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+        ])
+
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_expires(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 17 May 2018 00:00:00 GMT'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_cache_control(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Cache-Control': 'max-age=172800'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+
+
+    @mock.patch("requests.get")
+    @mock.patch("requests.head")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_expired(self, collectionmock, headmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz4"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999997+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}
+        req.iter_content.return_value = ["def"]
+        getmock.return_value = req
+        headmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999997+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "w")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}}}})
+        ])
+
+
+    @mock.patch("requests.get")
+    @mock.patch("requests.head")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_etag(self, collectionmock, headmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
+                        'ETag': '123456'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {
+            'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+            'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+            'ETag': '123456'
+        }
+        headmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+        cm.open.assert_not_called()
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {
+                          'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+                          'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+                          'ETag': '123456'
+                      }}}})
+                      ])
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_content_disp(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": []
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {"Content-Disposition": "attachment; filename=file1.txt"}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/download?fn=/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "w")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/download?fn=/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {"http://example.com/download?fn=/file1.txt": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+        ])
index 1dfd86b8c0f7cd6a5c51a414cbf8bc2335236e72..c110bc5d53cd4634656d93fab2937954be973d07 100644 (file)
@@ -26,6 +26,28 @@ if not os.getenv('ARVADOS_DEBUG'):
 
 class TestJob(unittest.TestCase):
 
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": Loader({}),
+             "metadata": {"cwlVersion": "v1.0"},
+             "makeTool": runner.arv_make_tool})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "jobs",
+             "basedir": "",
+             "name": "test_run_job_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "enable_reuse": enable_reuse,
+             "priority": 500})
+
+        return loadingContext, runtimeContext
+
     # The test passes no builder.resources
     # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
     @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
@@ -35,7 +57,6 @@ class TestJob(unittest.TestCase):
             runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
             runner.ignore_docker_for_reuse = False
             runner.num_retries = 0
-            document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
             list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
             runner.api.collections().get().execute.return_value = {"portable_data_hash": "99999999999999999999999999999993+99"}
@@ -56,13 +77,13 @@ class TestJob(unittest.TestCase):
                 "id": "#",
                 "class": "CommandLineTool"
             })
-            make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names,
-                                                     basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+
+            loadingContext, runtimeContext = self.helper(runner, enable_reuse)
+
+            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
             arvtool.formatgraph = None
-            for j in arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access):
-                j.run(enable_reuse=enable_reuse)
+            for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+                j.run(runtimeContext)
                 runner.api.jobs().create.assert_called_with(
                     body=JsonDiffMatcher({
                         'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
@@ -104,8 +125,7 @@ class TestJob(unittest.TestCase):
                     runner.api.links().create.side_effect = ApiError(
                         mock.MagicMock(return_value={'status': 403}),
                         'Permission denied')
-                    j.run(enable_reuse=enable_reuse)
-                    j.output_callback.assert_called_with({}, 'success')
+                    j.run(runtimeContext)
                 else:
                     assert not runner.api.links().create.called
 
@@ -122,9 +142,6 @@ class TestJob(unittest.TestCase):
         list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
         runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
 
-        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
-
-
         tool = {
             "inputs": [],
             "outputs": [],
@@ -148,13 +165,13 @@ class TestJob(unittest.TestCase):
             "id": "#",
             "class": "CommandLineTool"
         }
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="jobs", avsc_names=avsc_names,
-                                                 make_fs_access=make_fs_access, loader=Loader({}))
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        for j in arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access):
-            j.run(enable_reuse=True)
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
         runner.api.jobs().create.assert_called_with(
             body=JsonDiffMatcher({
                 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
@@ -204,9 +221,13 @@ class TestJob(unittest.TestCase):
                                                         {"items": []},
                                                         {"items": [{"manifest_text": "ABC"}]})
 
-        arvjob = arvados_cwl.ArvadosJob(runner)
-        arvjob.name = "testjob"
-        arvjob.builder = mock.MagicMock()
+        arvjob = arvados_cwl.ArvadosJob(runner,
+                                        mock.MagicMock(),
+                                        {},
+                                        None,
+                                        [],
+                                        [],
+                                        "testjob")
         arvjob.output_callback = mock.MagicMock()
         arvjob.collect_outputs = mock.MagicMock()
         arvjob.collect_outputs.return_value = {"out": "stuff"}
@@ -274,9 +295,13 @@ class TestJob(unittest.TestCase):
             {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2"}]},
         )
 
-        arvjob = arvados_cwl.ArvadosJob(runner)
-        arvjob.name = "testjob"
-        arvjob.builder = mock.MagicMock()
+        arvjob = arvados_cwl.ArvadosJob(runner,
+                                        mock.MagicMock(),
+                                        {},
+                                        None,
+                                        [],
+                                        [],
+                                        "testjob")
         arvjob.output_callback = mock.MagicMock()
         arvjob.collect_outputs = mock.MagicMock()
         arvjob.collect_outputs.return_value = {"out": "stuff"}
@@ -308,6 +333,34 @@ class TestJob(unittest.TestCase):
 
 
 class TestWorkflow(unittest.TestCase):
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+
+        document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=make_fs_access(""))
+        document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)
+        document_loader.fetch_text = document_loader.fetcher.fetch_text
+        document_loader.check_exists = document_loader.fetcher.check_exists
+
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": document_loader,
+             "metadata": {"cwlVersion": "v1.0"},
+             "construct_tool_object": runner.arv_make_tool})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "jobs",
+             "basedir": "",
+             "name": "test_run_wf_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "enable_reuse": enable_reuse,
+             "priority": 500})
+
+        return loadingContext, runtimeContext
+
     # The test passes no builder.resources
     # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
     @mock.patch("arvados.collection.CollectionReader")
@@ -329,27 +382,20 @@ class TestWorkflow(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.num_retries = 0
-        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=api, fs_access=make_fs_access(""))
-        document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)
-        document_loader.fetch_text = document_loader.fetcher.fetch_text
-        document_loader.check_exists = document_loader.fetcher.check_exists
+        loadingContext, runtimeContext = self.helper(runner)
 
-        tool, metadata = document_loader.resolve_ref("tests/wf/scatter2.cwl")
+        tool, metadata = loadingContext.loader.resolve_ref("tests/wf/scatter2.cwl")
         metadata["cwlVersion"] = tool["cwlVersion"]
 
         mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
 
-        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, work_api="jobs", avsc_names=avsc_names,
-                                              basedir="", make_fs_access=make_fs_access, loader=document_loader,
-                                              makeTool=runner.arv_make_tool, metadata=metadata)
+        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        it = arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access)
-        it.next().run()
-        it.next().run()
+        it = arvtool.job({}, mock.MagicMock(), runtimeContext)
+
+        it.next().run(runtimeContext)
+        it.next().run(runtimeContext)
 
         with open("tests/wf/scatter2_subwf.cwl") as f:
             subwf = StripYAMLComments(f.read())
@@ -415,27 +461,19 @@ class TestWorkflow(unittest.TestCase):
         runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
         runner.ignore_docker_for_reuse = False
         runner.num_retries = 0
-        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
 
-        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
-                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
-        document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=api, fs_access=make_fs_access(""))
-        document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)
-        document_loader.fetch_text = document_loader.fetcher.fetch_text
-        document_loader.check_exists = document_loader.fetcher.check_exists
+        loadingContext, runtimeContext = self.helper(runner)
 
-        tool, metadata = document_loader.resolve_ref("tests/wf/echo-wf.cwl")
+        tool, metadata = loadingContext.loader.resolve_ref("tests/wf/echo-wf.cwl")
         metadata["cwlVersion"] = tool["cwlVersion"]
 
         mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
 
-        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, work_api="jobs", avsc_names=avsc_names,
-                                              basedir="", make_fs_access=make_fs_access, loader=document_loader,
-                                              makeTool=runner.arv_make_tool, metadata=metadata)
+        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
         arvtool.formatgraph = None
-        it = arvtool.job({}, mock.MagicMock(), basedir="", make_fs_access=make_fs_access)
-        it.next().run()
-        it.next().run()
+        it = arvtool.job({}, mock.MagicMock(), runtimeContext)
+        it.next().run(runtimeContext)
+        it.next().run(runtimeContext)
 
         with open("tests/wf/echo-subwf.cwl") as f:
             subwf = StripYAMLComments(f.read())
index 806d63ab85f3f1a9a08c73f9ea76f3dc7b3ecc09..590c82d207d590784c677a5831721ce577c99554 100644 (file)
@@ -39,7 +39,7 @@ class TestMakeOutput(unittest.TestCase):
         final.open.return_value = openmock
         openmock.__enter__.return_value = cwlout
 
-        _, runner.final_output_collection = runner.make_output_collection("Test output", "tag0,tag1,tag2", {
+        _, runner.final_output_collection = runner.make_output_collection("Test output", ["foo"], "tag0,tag1,tag2", {
             "foo": {
                 "class": "File",
                 "location": "keep:99999999999999999999999999999991+99/foo.txt",
@@ -56,7 +56,7 @@ class TestMakeOutput(unittest.TestCase):
 
         final.copy.assert_has_calls([mock.call('bar.txt', 'baz.txt', overwrite=False, source_collection=readermock)])
         final.copy.assert_has_calls([mock.call('foo.txt', 'foo.txt', overwrite=False, source_collection=readermock)])
-        final.save_new.assert_has_calls([mock.call(ensure_unique_name=True, name='Test output', owner_uuid='zzzzz-j7d0g-zzzzzzzzzzzzzzz')])
+        final.save_new.assert_has_calls([mock.call(ensure_unique_name=True, name='Test output', owner_uuid='zzzzz-j7d0g-zzzzzzzzzzzzzzz', storage_classes=['foo'])])
         self.assertEqual("""{
     "bar": {
         "basename": "baz.txt",
index 9649b838726d845ebe56418934852fe080f2dfc7..eaa57114222233d6bcbd02ff2674c89f5169b168 100644 (file)
@@ -20,7 +20,7 @@ from .mock_discovery import get_rootDesc
 
 from arvados_cwl.pathmapper import ArvPathMapper
 
-def upload_mock(files, api, dry_run=False, num_retries=0, project=None, fnPattern="$(file %s/%s)", name=None, collection=None):
+def upload_mock(files, api, dry_run=False, num_retries=0, project=None, fnPattern="$(file %s/%s)", name=None, collection=None, packed=None):
     pdh = "99999999999999999999999999999991+99"
     for c in files:
         c.keepref = "%s/%s" % (pdh, os.path.basename(c.fn))
@@ -66,23 +66,6 @@ class TestPathmap(unittest.TestCase):
         self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},
                          p._pathmap)
 
-    @mock.patch("arvados.commands.run.uploadfiles")
-    def test_prev_uploaded(self, upl):
-        """Test pathmapper handling previously uploaded files."""
-
-        arvrunner = arvados_cwl.ArvCwlRunner(self.api)
-        arvrunner.add_uploaded('file:tests/hw.py', MapperEnt(resolved='keep:99999999999999999999999999999992+99/hw.py', target='', type='File', staged=True))
-
-        upl.side_effect = upload_mock
-
-        p = ArvPathMapper(arvrunner, [{
-            "class": "File",
-            "location": "file:tests/hw.py"
-        }], "", "/test/%s", "/test/%s/%s")
-
-        self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999992+99/hw.py', target='/test/99999999999999999999999999999992+99/hw.py', type='File', staged=True)},
-                         p._pathmap)
-
     @mock.patch("arvados.commands.run.uploadfiles")
     @mock.patch("arvados.commands.run.statfile")
     def test_statfile(self, statfile, upl):
index a7f49208bf81ab0bce2c394a54bde6ee56edb5d0..cd46251300dfb95862cb7957f510e108dd78b281 100644 (file)
@@ -168,6 +168,7 @@ def stubs(func):
                                   }
                               ]}},
                         'cwl:tool': '3fffdeaa75e018172e1b583425f4ebff+60/workflow.cwl#main',
+                        'arv:debug': True,
                         'arv:enable_reuse': True,
                         'arv:on_error': 'continue'
                     },
@@ -233,10 +234,10 @@ def stubs(func):
             },
             'secret_mounts': {},
             'state': 'Committed',
-            'owner_uuid': None,
             'command': ['arvados-cwl-runner', '--local', '--api=containers',
                         '--no-log-timestamps', '--disable-validate',
-                        '--enable-reuse', '--on-error=continue', '--eval-timeout=20',
+                        '--eval-timeout=20', '--thread-count=4',
+                        '--enable-reuse', '--debug', '--on-error=continue',
                         '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
             'name': 'submit_wf.cwl',
             'container_image': 'arvados/jobs:'+arvados_cwl.__version__,
@@ -331,6 +332,15 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(capture_stdout.getvalue(),
                          stubs.expect_pipeline_uuid + '\n')
 
+    @stubs
+    def test_error_when_multiple_storage_classes_specified(self, stubs):
+        storage_classes = "foo,bar"
+        exited = arvados_cwl.main(
+                ["--debug", "--storage-classes", storage_classes,
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                sys.stdin, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 1)
+
     @mock.patch("time.sleep")
     @stubs
     def test_submit_on_error(self, stubs, tm):
@@ -444,7 +454,7 @@ class TestSubmit(unittest.TestCase):
         project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
 
         exited = arvados_cwl.main(
-            ["--submit", "--no-wait",
+            ["--submit", "--no-wait", "--debug",
              "--project-uuid", project_uuid,
              "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
             sys.stdout, sys.stderr, api_client=stubs.api)
@@ -503,7 +513,8 @@ class TestSubmit(unittest.TestCase):
         expect_container["command"] = [
             'arvados-cwl-runner', '--local', '--api=containers',
             '--no-log-timestamps', '--disable-validate',
-            '--disable-reuse', '--on-error=continue', '--eval-timeout=20',
+            '--eval-timeout=20', '--thread-count=4',
+            '--disable-reuse', '--debug', '--on-error=continue',
             '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
         expect_container["use_existing"] = False
 
@@ -527,7 +538,8 @@ class TestSubmit(unittest.TestCase):
         expect_container["command"] = [
             'arvados-cwl-runner', '--local', '--api=containers',
             '--no-log-timestamps', '--disable-validate',
-            '--disable-reuse', '--on-error=continue', '--eval-timeout=20',
+            '--eval-timeout=20', '--thread-count=4',
+            '--disable-reuse', '--debug', '--on-error=continue',
             '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
         expect_container["use_existing"] = False
         expect_container["name"] = "submit_wf_no_reuse.cwl"
@@ -563,7 +575,8 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       '--enable-reuse', '--on-error=stop', '--eval-timeout=20',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse', '--debug', '--on-error=stop',
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -588,7 +601,9 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       "--output-name="+output_name, '--enable-reuse', '--on-error=continue', '--eval-timeout=20',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse',
+                                       "--output-name="+output_name, '--debug', '--on-error=continue',
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
         expect_container["output_name"] = output_name
 
@@ -597,6 +612,72 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(capture_stdout.getvalue(),
                          stubs.expect_container_request_uuid + '\n')
 
+    @stubs
+    def test_submit_storage_classes(self, stubs):
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=foo",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse', "--debug",
+                                       "--storage-classes=foo", '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
+    @mock.patch("arvados_cwl.task_queue.TaskQueue")
+    @mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
+    @mock.patch("arvados_cwl.ArvCwlRunner.make_output_collection", return_value = (None, None))
+    @stubs
+    def test_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
+        def set_final_output(job_order, output_callback, runtimeContext):
+            output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
+            return []
+        job.side_effect = set_final_output
+
+        try:
+            exited = arvados_cwl.main(
+                ["--debug", "--local", "--storage-classes=foo",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                sys.stdin, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        make_output.assert_called_with(u'Output of submit_wf.cwl', ['foo'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
+
+    @mock.patch("arvados_cwl.task_queue.TaskQueue")
+    @mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
+    @mock.patch("arvados_cwl.ArvCwlRunner.make_output_collection", return_value = (None, None))
+    @stubs
+    def test_default_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
+        def set_final_output(job_order, output_callback, runtimeContext):
+            output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
+            return []
+        job.side_effect = set_final_output
+
+        try:
+            exited = arvados_cwl.main(
+                ["--debug", "--local",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                sys.stdin, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        make_output.assert_called_with(u'Output of submit_wf.cwl', ['default'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
 
     @stubs
     def test_submit_container_output_ttl(self, stubs):
@@ -613,8 +694,9 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       '--enable-reuse', '--on-error=continue',
-                                       "--intermediate-output-ttl=3600", '--eval-timeout=20',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse', '--debug', '--on-error=continue',
+                                       "--intermediate-output-ttl=3600",
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -637,8 +719,9 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       '--enable-reuse', '--on-error=continue',
-                                       "--trash-intermediate", '--eval-timeout=20',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse', '--debug', '--on-error=continue',
+                                       "--trash-intermediate",
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -663,7 +746,9 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       "--output-tags="+output_tags, '--enable-reuse', '--on-error=continue', '--eval-timeout=20',
+                                       '--eval-timeout=20', '--thread-count=4',
+                                       '--enable-reuse',
+                                       "--output-tags="+output_tags, '--debug', '--on-error=continue',
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -740,13 +825,13 @@ class TestSubmit(unittest.TestCase):
                     'kind': 'json'
                 }
             }, 'state': 'Committed',
-            'owner_uuid': None,
             'output_path': '/var/spool/cwl',
             'name': 'expect_arvworkflow.cwl#main',
             'container_image': 'arvados/jobs:'+arvados_cwl.__version__,
             'command': ['arvados-cwl-runner', '--local', '--api=containers',
                         '--no-log-timestamps', '--disable-validate',
-                        '--enable-reuse', '--on-error=continue', '--eval-timeout=20',
+                        '--eval-timeout=20', '--thread-count=4',
+                        '--enable-reuse', '--debug', '--on-error=continue',
                         '/var/lib/cwl/workflow/expect_arvworkflow.cwl#main', '/var/lib/cwl/cwl.input.json'],
             'cwd': '/var/spool/cwl',
             'runtime_constraints': {
@@ -858,13 +943,13 @@ class TestSubmit(unittest.TestCase):
                     'kind': 'json'
                 }
             }, 'state': 'Committed',
-            'owner_uuid': None,
             'output_path': '/var/spool/cwl',
             'name': 'a test workflow',
             'container_image': 'arvados/jobs:'+arvados_cwl.__version__,
             'command': ['arvados-cwl-runner', '--local', '--api=containers',
                         '--no-log-timestamps', '--disable-validate',
-                        '--enable-reuse', '--on-error=continue', '--eval-timeout=20',
+                        '--eval-timeout=20', '--thread-count=4',
+                        '--enable-reuse', '--debug', '--on-error=continue',
                         '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
             'cwd': '/var/spool/cwl',
             'runtime_constraints': {
@@ -923,7 +1008,9 @@ class TestSubmit(unittest.TestCase):
         expect_container["owner_uuid"] = project_uuid
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       '--enable-reuse', '--on-error=continue', '--project-uuid='+project_uuid, '--eval-timeout=20',
+                                       "--eval-timeout=20", "--thread-count=4",
+                                       '--enable-reuse', '--debug', '--on-error=continue',
+                                       '--project-uuid='+project_uuid,
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -947,7 +1034,34 @@ class TestSubmit(unittest.TestCase):
         expect_container = copy.deepcopy(stubs.expect_container_spec)
         expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
                                        '--no-log-timestamps', '--disable-validate',
-                                       '--enable-reuse', '--on-error=continue', '--eval-timeout=60.0',
+                                       '--eval-timeout=60.0', '--thread-count=4',
+                                       '--enable-reuse', '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
+
+    @stubs
+    def test_submit_container_thread_count(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--submit", "--no-wait", "--api=containers", "--debug", "--thread-count=20",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=20',
+                                       '--enable-reuse', '--debug', '--on-error=continue',
                                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
 
         stubs.api.container_requests().create.assert_called_with(
@@ -1017,6 +1131,42 @@ class TestSubmit(unittest.TestCase):
                          stubs.expect_container_request_uuid + '\n')
 
 
+    @stubs
+    def test_submit_wf_runner_resources(self, stubs):
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--submit", "--no-wait", "--api=containers", "--debug",
+                 "tests/wf/submit_wf_runner_resources.cwl", "tests/submit_test_job.json"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["runtime_constraints"] = {
+            "API": True,
+            "vcpus": 2,
+            "ram": 2000 * 2**20
+        }
+        expect_container["name"] = "submit_wf_runner_resources.cwl"
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+            {
+                "class": "http://arvados.org/cwl#WorkflowRunnerResources",
+                "coresMin": 2,
+                "ramMin": 2000
+            }
+        ]
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+            "arv": "http://arvados.org/cwl#",
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
+
     @mock.patch("arvados.commands.keepdocker.find_one_image_hash")
     @mock.patch("cwltool.docker.DockerCommandLineJob.get_image")
     @mock.patch("arvados.api")
@@ -1069,9 +1219,11 @@ class TestSubmit(unittest.TestCase):
                 "--api=containers",
                 "--no-log-timestamps",
                 "--disable-validate",
+                "--eval-timeout=20",
+                '--thread-count=4',
                 "--enable-reuse",
+                '--debug',
                 "--on-error=continue",
-                "--eval-timeout=20",
                 "/var/lib/cwl/workflow.json#main",
                 "/var/lib/cwl/cwl.input.json"
             ],
@@ -1192,7 +1344,6 @@ class TestSubmit(unittest.TestCase):
             },
             "name": "secret_wf.cwl",
             "output_path": "/var/spool/cwl",
-            "owner_uuid": None,
             "priority": 500,
             "properties": {},
             "runtime_constraints": {
@@ -1215,6 +1366,31 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(capture_stdout.getvalue(),
                          stubs.expect_container_request_uuid + '\n')
 
+    @stubs
+    def test_submit_request_uuid(self, stubs):
+        stubs.expect_container_request_uuid = "zzzzz-xvhdp-yyyyyyyyyyyyyyy"
+
+        stubs.api.container_requests().update().execute.return_value = {
+            "uuid": stubs.expect_container_request_uuid,
+            "container_uuid": "zzzzz-dz642-zzzzzzzzzzzzzzz",
+            "state": "Queued"
+        }
+
+        capture_stdout = cStringIO.StringIO()
+        try:
+            exited = arvados_cwl.main(
+                ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-request-uuid=zzzzz-xvhdp-yyyyyyyyyyyyyyy",
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+            self.assertEqual(exited, 0)
+        except:
+            logging.exception("")
+
+        stubs.api.container_requests().update.assert_called_with(
+            uuid="zzzzz-xvhdp-yyyyyyyyyyyyyyy", body=JsonDiffMatcher(stubs.expect_container_spec))
+        self.assertEqual(capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
 
 class TestCreateTemplate(unittest.TestCase):
     existing_template_uuid = "zzzzz-d1hrv-validworkfloyml"
diff --git a/sdk/cwl/tests/test_tq.py b/sdk/cwl/tests/test_tq.py
new file mode 100644 (file)
index 0000000..2afbe0c
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import functools
+import mock
+import sys
+import unittest
+import json
+import logging
+import os
+import threading
+
+from arvados_cwl.task_queue import TaskQueue
+
+def success_task():
+    pass
+
+def fail_task():
+    raise Exception("Testing error handling")
+
+class TestTaskQueue(unittest.TestCase):
+    def test_tq(self):
+        tq = TaskQueue(threading.Lock(), 2)
+
+        self.assertIsNone(tq.error)
+
+        tq.add(success_task)
+        tq.add(success_task)
+        tq.add(success_task)
+        tq.add(success_task)
+
+        tq.join()
+
+        self.assertIsNone(tq.error)
+
+
+    def test_tq_error(self):
+        tq = TaskQueue(threading.Lock(), 2)
+
+        self.assertIsNone(tq.error)
+
+        tq.add(success_task)
+        tq.add(success_task)
+        tq.add(fail_task)
+        tq.add(success_task)
+
+        tq.join()
+
+        self.assertIsNotNone(tq.error)
diff --git a/sdk/cwl/tests/test_util.py b/sdk/cwl/tests/test_util.py
new file mode 100644 (file)
index 0000000..2532bd5
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import unittest
+import mock
+import datetime
+import httplib2
+
+from arvados_cwl.util import *
+from arvados.errors import ApiError
+
+class MockDateTime(datetime.datetime):
+    @classmethod
+    def utcnow(cls):
+        return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)
+
+datetime.datetime = MockDateTime
+
+class TestUtil(unittest.TestCase):
+    def test_get_intermediate_collection_info(self):
+        name = "one"
+        current_container = {"uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
+        intermediate_output_ttl = 120
+
+        info = get_intermediate_collection_info(name, current_container, intermediate_output_ttl)
+
+        self.assertEqual(info["name"], "Intermediate collection for step one")
+        self.assertEqual(info["trash_at"], datetime.datetime(2018, 1, 1, 0, 2, 0, 0))
+        self.assertEqual(info["properties"], {"type" : "intermediate", "container" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
+
+    def test_get_current_container_success(self):
+        api = mock.MagicMock()
+        api.containers().current().execute.return_value = {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
+
+        current_container = get_current_container(api)
+
+        self.assertEqual(current_container, {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
+
+    def test_get_current_container_error(self):
+        api = mock.MagicMock()
+        api.containers().current().execute.side_effect = ApiError(httplib2.Response({"status": 300}), "")
+        logger = mock.MagicMock()
+
+        self.assertRaises(ApiError, get_current_container(api, num_retries=0, logger=logger))
diff --git a/sdk/cwl/tests/wf-defaults/default-dir1.cwl b/sdk/cwl/tests/wf-defaults/default-dir1.cwl
new file mode 100644 (file)
index 0000000..fdd56be
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+  inp1:
+    type: File
+    default:
+      class: File
+      location: inp1/hello.txt
+outputs: []
+arguments: [echo, $(inputs.inp1), $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir2.cwl b/sdk/cwl/tests/wf-defaults/default-dir2.cwl
new file mode 100644 (file)
index 0000000..98931ab
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      basename: inp2
+      listing:
+        - class: File
+          basename: "hello.txt"
+          contents: "hello world"
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir3.cwl b/sdk/cwl/tests/wf-defaults/default-dir3.cwl
new file mode 100644 (file)
index 0000000..3d0fe22
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      listing:
+        - class: File
+          location: "inp1/hello.txt"
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir4.cwl b/sdk/cwl/tests/wf-defaults/default-dir4.cwl
new file mode 100644 (file)
index 0000000..8bfc5d6
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run:
+      class: CommandLineTool
+      inputs:
+        inp2:
+          type: Directory
+          default:
+            class: Directory
+            location: inp1
+      outputs: []
+      arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir5.cwl b/sdk/cwl/tests/wf-defaults/default-dir5.cwl
new file mode 100644 (file)
index 0000000..2e66b10
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run:
+      id: stepid
+      class: CommandLineTool
+      inputs:
+        inp2:
+          type: Directory
+          default:
+            class: Directory
+            location: inp1
+      outputs: []
+      arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir6.cwl b/sdk/cwl/tests/wf-defaults/default-dir6.cwl
new file mode 100644 (file)
index 0000000..f779aef
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir6a.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir6a.cwl b/sdk/cwl/tests/wf-defaults/default-dir6a.cwl
new file mode 100644 (file)
index 0000000..ccc0ceb
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir7.cwl b/sdk/cwl/tests/wf-defaults/default-dir7.cwl
new file mode 100644 (file)
index 0000000..5c74ef0
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in:
+      inp2: inp2
+    out: []
+    run: default-dir7a.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir7a.cwl b/sdk/cwl/tests/wf-defaults/default-dir7a.cwl
new file mode 100644 (file)
index 0000000..4b71c13
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/inp1/hello.txt b/sdk/cwl/tests/wf-defaults/inp1/hello.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/wf-defaults/wf1.cwl b/sdk/cwl/tests/wf-defaults/wf1.cwl
new file mode 100644 (file)
index 0000000..0133c7a
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir1.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf2.cwl b/sdk/cwl/tests/wf-defaults/wf2.cwl
new file mode 100644 (file)
index 0000000..ffe8731
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir2.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf3.cwl b/sdk/cwl/tests/wf-defaults/wf3.cwl
new file mode 100644 (file)
index 0000000..0292d13
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir3.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf4.cwl b/sdk/cwl/tests/wf-defaults/wf4.cwl
new file mode 100644 (file)
index 0000000..6e562e4
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir4.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf5.cwl b/sdk/cwl/tests/wf-defaults/wf5.cwl
new file mode 100644 (file)
index 0000000..de2748c
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir5.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf6.cwl b/sdk/cwl/tests/wf-defaults/wf6.cwl
new file mode 100644 (file)
index 0000000..6bcf69e
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in: []
+    out: []
+    run: default-dir6.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf7.cwl b/sdk/cwl/tests/wf-defaults/wf7.cwl
new file mode 100644 (file)
index 0000000..715f1ef
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in: []
+    out: []
+    run: default-dir7.cwl
\ No newline at end of file
index 55b7b19430dc6a05a6ffde432ffb33a8d7aaa8c1..355872232bc7f430a8b61f7e8f8dffbe09cc5530 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 import arvados
 import sys
 import os
index 29dc3d6aea63701cdcedf8d4226127d6a2c24d99..d7c8037588c2ff64e43da8f2e59f991e974141ab 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 requirements:
index 63a543881c230941db6ee45b177ab706e057e384..5cdd80dbdbc7a145d9e712d596b1165c91520dcc 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 $namespaces:
index b7893e221104308771c334c984ea0ad303646eb6..0a734b326355361f6b8ccc4bc296384e58a57d2e 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: CommandLineTool
 requirements:
index 4db11ccdf2eb9ff26dcd5305ee528b418cb065ce..7a052f86cf36168343eab48b3c9738f5da55497e 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: CommandLineTool
 requirements:
index f45077197fef194662c206a72045b2a26ddaae24..7b3b4503efc239661f5b03b2afb0cfac3ca8cc4d 100644 (file)
@@ -2,43 +2,91 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
-$graph:
-- class: CommandLineTool
-  requirements:
-  - class: DockerRequirement
-    dockerPull: debian:8
-  inputs:
-  - id: '#submit_tool.cwl/x'
-    type: File
-    default:
-      class: File
-      location: keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt
-    inputBinding:
-      position: 1
-  outputs: []
-  baseCommand: cat
-  id: '#submit_tool.cwl'
-- class: Workflow
-  inputs:
-  - id: '#main/x'
-    type: File
-    default: {class: File, location: 'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
-      size: 16, basename: blorp.txt, nameroot: blorp, nameext: .txt}
-  - id: '#main/y'
-    type: Directory
-    default: {class: Directory, location: 'keep:99999999999999999999999999999998+99',
-      basename: 99999999999999999999999999999998+99}
-  - id: '#main/z'
-    type: Directory
-    default: {class: Directory, basename: anonymous, listing: [{basename: renamed.txt,
-          class: File, location: 'keep:99999999999999999999999999999998+99/file1.txt',
-          nameroot: renamed, nameext: .txt}]}
-  outputs: []
-  steps:
-  - id: '#main/step1'
-    in:
-    - {id: '#main/step1/x', source: '#main/x'}
-    out: []
-    run: '#submit_tool.cwl'
-  id: '#main'
+{
+    "$graph": [
+        {
+            "baseCommand": "cat",
+            "class": "CommandLineTool",
+            "id": "#submit_tool.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt"
+                    },
+                    "id": "#submit_tool.cwl/x",
+                    "inputBinding": {
+                        "position": 1
+                    },
+                    "type": "File"
+                }
+            ],
+            "outputs": [],
+            "requirements": [
+                {
+                    "class": "DockerRequirement",
+                    "dockerPull": "debian:8"
+                }
+            ]
+        },
+        {
+            "class": "Workflow",
+            "id": "#main",
+            "inputs": [
+                {
+                    "default": {
+                        "basename": "blorp.txt",
+                        "class": "File",
+                        "location": "keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt",
+                        "nameext": ".txt",
+                        "nameroot": "blorp",
+                        "size": 16
+                    },
+                    "id": "#main/x",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "basename": "99999999999999999999999999999998+99",
+                        "class": "Directory",
+                        "location": "keep:99999999999999999999999999999998+99"
+                    },
+                    "id": "#main/y",
+                    "type": "Directory"
+                },
+                {
+                    "default": {
+                        "basename": "anonymous",
+                        "class": "Directory",
+                        "listing": [
+                            {
+                                "basename": "renamed.txt",
+                                "class": "File",
+                                "location": "keep:99999999999999999999999999999998+99/file1.txt",
+                                "nameext": ".txt",
+                                "nameroot": "renamed"
+                            }
+                        ]
+                    },
+                    "id": "#main/z",
+                    "type": "Directory"
+                }
+            ],
+            "outputs": [],
+            "steps": [
+                {
+                    "id": "#main/step1",
+                    "in": [
+                        {
+                            "id": "#main/step1/x",
+                            "source": "#main/x"
+                        }
+                    ],
+                    "out": [],
+                    "run": "#submit_tool.cwl"
+                }
+            ]
+        }
+    ],
+    "cwlVersion": "v1.0"
+}
\ No newline at end of file
index 9032e26dd6787598370e1a57fa107038159e5c32..acaebb5d58b491a390b48e3064701b8a52331c31 100644 (file)
@@ -31,7 +31,7 @@ steps:
     hints:
       - class: arv:RunInSingleContainer
       - class: ResourceRequirement
-        ramMin: $(inputs.count*4)
+        ramMin: $(inputs.count*128)
       - class: arv:APIRequirement
     scatter: count
     run:
@@ -55,4 +55,4 @@ steps:
                 type: int
               script: File
             outputs: []
-            arguments: [python, $(inputs.script), $(inputs.count * 4)]
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
index cc1321aec1a68c274f8f6e6cdb0d5e530df6da36..5795759d9fcf30a33bc86f6f222d5363d34df71f 100644 (file)
@@ -41,7 +41,7 @@ steps:
       outputs: []
       hints:
         - class: ResourceRequirement
-          ramMin: $(inputs.count*4)
+          ramMin: $(inputs.count*128)
       steps:
         sleep1:
           in:
@@ -56,4 +56,4 @@ steps:
                 type: int
               script: File
             outputs: []
-            arguments: [python, $(inputs.script), $(inputs.count * 4)]
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
index 92bf482a80a37e9d351a8db960697d582229b1cb..3accb324fb339fa3a1f8993f32719fc3526203a8 100644 (file)
@@ -50,10 +50,10 @@ steps:
             id: subtool
             hints:
               - class: ResourceRequirement
-                ramMin: $(inputs.count*4)
+                ramMin: $(inputs.count*128)
             inputs:
               count:
                 type: int
               script: File
             outputs: []
-            arguments: [python, $(inputs.script), $(inputs.count * 4)]
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
index b7a977998fe2f1a9ae66db2c470a843133c1648c..fc06fb30a6157a5a393764029a7d9d9d22c3c8e3 100644 (file)
@@ -50,10 +50,10 @@ steps:
             id: subtool
             hints:
               - class: ResourceRequirement
-                ramMin: 8
+                ramMin: 128
             inputs:
               count:
                 type: int
               script: File
             outputs: []
-            arguments: [python, $(inputs.script), "8"]
+            arguments: [python, $(inputs.script), "128"]
index 0ddeb645022374effe499c1f164b7c61834effa0..2be74b2658b1b67ebdb231e22c6204e53aabc96b 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: CommandLineTool
 $namespaces:
index 17c92d678e3edbe9db442ab74e6c4c23fbee8333..05d950d18c08be14ea72ea297585412136f8f198 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
 cwlVersion: v1.0
 class: Workflow
 $namespaces:
diff --git a/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl b/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl
new file mode 100644 (file)
index 0000000..9e27121
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+hints:
+  arv:WorkflowRunnerResources:
+    ramMin: 2000
+    coresMin: 2
+inputs:
+  - id: x
+    type: File
+  - id: y
+    type: Directory
+  - id: z
+    type: Directory
+outputs: []
+steps:
+  - id: step1
+    in:
+      - { id: x, source: "#x" }
+    out: []
+    run: ../tool/submit_tool.cwl
diff --git a/sdk/go/arvados/byte_size.go b/sdk/go/arvados/byte_size.go
new file mode 100644 (file)
index 0000000..08cc83e
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "fmt"
+       "math"
+       "strings"
+)
+
+type ByteSize int64
+
+var prefixValue = map[string]int64{
+       "":   1,
+       "K":  1000,
+       "Ki": 1 << 10,
+       "M":  1000000,
+       "Mi": 1 << 20,
+       "G":  1000000000,
+       "Gi": 1 << 30,
+       "T":  1000000000000,
+       "Ti": 1 << 40,
+       "P":  1000000000000000,
+       "Pi": 1 << 50,
+       "E":  1000000000000000000,
+       "Ei": 1 << 60,
+}
+
+func (n *ByteSize) UnmarshalJSON(data []byte) error {
+       if len(data) == 0 || data[0] != '"' {
+               var i int64
+               err := json.Unmarshal(data, &i)
+               if err != nil {
+                       return err
+               }
+               *n = ByteSize(i)
+               return nil
+       }
+       var s string
+       err := json.Unmarshal(data, &s)
+       if err != nil {
+               return err
+       }
+       split := strings.LastIndexAny(s, "0123456789.+-eE") + 1
+       if split == 0 {
+               return fmt.Errorf("invalid byte size %q", s)
+       }
+       if s[split-1] == 'E' {
+               // We accepted an E as if it started the exponent part
+               // of a json number, but if the next char isn't +, -,
+               // or digit, then the E must have meant Exa. Instead
+               // of "4.5E"+"iB" we want "4.5"+"EiB".
+               split--
+       }
+       var val json.Number
+       dec := json.NewDecoder(strings.NewReader(s[:split]))
+       dec.UseNumber()
+       err = dec.Decode(&val)
+       if err != nil {
+               return err
+       }
+       if split == len(s) {
+               return nil
+       }
+       prefix := strings.Trim(s[split:], " ")
+       if strings.HasSuffix(prefix, "B") {
+               prefix = prefix[:len(prefix)-1]
+       }
+       pval, ok := prefixValue[prefix]
+       if !ok {
+               return fmt.Errorf("invalid unit %q", strings.Trim(s[split:], " "))
+       }
+       if intval, err := val.Int64(); err == nil {
+               if pval > 1 && (intval*pval)/pval != intval {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(intval * pval)
+               return nil
+       } else if floatval, err := val.Float64(); err == nil {
+               if floatval*float64(pval) > math.MaxInt64 {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(int64(floatval * float64(pval)))
+               return nil
+       } else {
+               return fmt.Errorf("bug: json.Number for %q is not int64 or float64: %s", s, err)
+       }
+}
diff --git a/sdk/go/arvados/byte_size_test.go b/sdk/go/arvados/byte_size_test.go
new file mode 100644 (file)
index 0000000..7c4aff2
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ByteSizeSuite{})
+
+type ByteSizeSuite struct{}
+
+func (s *ByteSizeSuite) TestUnmarshal(c *check.C) {
+       for _, testcase := range []struct {
+               in  string
+               out int64
+       }{
+               {"0", 0},
+               {"5", 5},
+               {"5B", 5},
+               {"5 B", 5},
+               {" 4 KiB ", 4096},
+               {"0K", 0},
+               {"0Ki", 0},
+               {"0 KiB", 0},
+               {"4K", 4000},
+               {"4KB", 4000},
+               {"4Ki", 4096},
+               {"4KiB", 4096},
+               {"4MB", 4000000},
+               {"4MiB", 4194304},
+               {"4GB", 4000000000},
+               {"4 GiB", 4294967296},
+               {"4TB", 4000000000000},
+               {"4TiB", 4398046511104},
+               {"4PB", 4000000000000000},
+               {"4PiB", 4503599627370496},
+               {"4EB", 4000000000000000000},
+               {"4EiB", 4611686018427387904},
+               {"4.5EiB", 5188146770730811392},
+               {"1.5 GB", 1500000000},
+               {"1.5 GiB", 1610612736},
+               {"1.234 GiB", 1324997410}, // rounds down from 1324997410.816
+               {"1e2 KB", 100000},
+               {"20E-1 KiB", 2048},
+               {"1E0EB", 1000000000000000000},
+               {"1E-1EB", 100000000000000000},
+               {"1E-1EiB", 115292150460684704},
+               {"4.5E15 K", 4500000000000000000},
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase.in+"\n"), &n)
+               c.Logf("%v => %v: %v", testcase.in, testcase.out, n)
+               c.Check(err, check.IsNil)
+               c.Check(int64(n), check.Equals, testcase.out)
+       }
+       for _, testcase := range []string{
+               "B", "K", "KB", "KiB", "4BK", "4iB", "4A", "b", "4b", "4mB", "4m", "4mib", "4KIB", "4K iB", "4Ki B", "BB", "4BB",
+               "400000 EB", // overflows int64
+               "4.11e4 EB", // ok as float64, but overflows int64
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase+"\n"), &n)
+               c.Logf("%v => error: %v", n, err)
+               c.Check(err, check.NotNil)
+       }
+}
index 24f3faac16053fd6b40457a6111a7ac4d954f994..cca9f9bf1be8e946b7b9594f1ed839e92aa73485 100644 (file)
@@ -6,6 +6,7 @@ package arvados
 
 import (
        "bytes"
+       "context"
        "crypto/tls"
        "encoding/json"
        "fmt"
@@ -19,6 +20,8 @@ import (
        "regexp"
        "strings"
        "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
 )
 
 // A Client is an HTTP client with an API endpoint and a set of
@@ -50,6 +53,8 @@ type Client struct {
        KeepServiceURIs []string `json:",omitempty"`
 
        dd *DiscoveryDocument
+
+       ctx context.Context
 }
 
 // The default http.Client used by a Client with Insecure==true and
@@ -92,11 +97,26 @@ func NewClientFromEnv() *Client {
        }
 }
 
-// Do adds authentication headers and then calls (*http.Client)Do().
+var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
+
+// Do adds Authorization and X-Request-Id headers and then calls
+// (*http.Client)Do().
 func (c *Client) Do(req *http.Request) (*http.Response, error) {
        if c.AuthToken != "" {
                req.Header.Add("Authorization", "OAuth2 "+c.AuthToken)
        }
+
+       if req.Header.Get("X-Request-Id") == "" {
+               reqid, _ := c.context().Value(contextKeyRequestID).(string)
+               if reqid == "" {
+                       reqid = reqIDGen.Next()
+               }
+               if req.Header == nil {
+                       req.Header = http.Header{"X-Request-Id": {reqid}}
+               } else {
+                       req.Header.Set("X-Request-Id", reqid)
+               }
+       }
        return c.httpClient().Do(req)
 }
 
@@ -225,6 +245,23 @@ func (c *Client) UpdateBody(rsc resource) io.Reader {
        return bytes.NewBufferString(v.Encode())
 }
 
+type contextKey string
+
+var contextKeyRequestID contextKey = "X-Request-Id"
+
+func (c *Client) WithRequestID(reqid string) *Client {
+       cc := *c
+       cc.ctx = context.WithValue(cc.context(), contextKeyRequestID, reqid)
+       return &cc
+}
+
+func (c *Client) context() context.Context {
+       if c.ctx == nil {
+               return context.Background()
+       }
+       return c.ctx
+}
+
 func (c *Client) httpClient() *http.Client {
        switch {
        case c.Client != nil:
index b0627fd27a665bf26250892c3fabd3319ff4e489..df938008d49756b850ca6e5ce5abee8a0510e2a3 100644 (file)
@@ -12,6 +12,7 @@ import (
        "net/url"
        "sync"
        "testing"
+       "testing/iotest"
 )
 
 type stubTransport struct {
@@ -51,6 +52,22 @@ func (stub *errorTransport) RoundTrip(req *http.Request) (*http.Response, error)
        return nil, fmt.Errorf("something awful happened")
 }
 
+type timeoutTransport struct {
+       response []byte
+}
+
+func (stub *timeoutTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+       return &http.Response{
+               Status:     "200 OK",
+               StatusCode: 200,
+               Proto:      "HTTP/1.1",
+               ProtoMajor: 1,
+               ProtoMinor: 1,
+               Request:    req,
+               Body:       ioutil.NopCloser(iotest.TimeoutReader(bytes.NewReader(stub.response))),
+       }, nil
+}
+
 func TestCurrentUser(t *testing.T) {
        t.Parallel()
        stub := &stubTransport{
index 999b4e9d483454ace177cad829e90f85ddccc44c..79be2f3f1d27d515f03b166573fd3c3c5fb0eb9b 100644 (file)
@@ -15,19 +15,23 @@ import (
 
 // Collection is an arvados#collection resource.
 type Collection struct {
-       UUID                   string     `json:"uuid,omitempty"`
-       TrashAt                *time.Time `json:"trash_at,omitempty"`
-       ManifestText           string     `json:"manifest_text,omitempty"`
-       UnsignedManifestText   string     `json:"unsigned_manifest_text,omitempty"`
-       Name                   string     `json:"name,omitempty"`
-       CreatedAt              *time.Time `json:"created_at,omitempty"`
-       ModifiedAt             *time.Time `json:"modified_at,omitempty"`
-       PortableDataHash       string     `json:"portable_data_hash,omitempty"`
-       ReplicationConfirmed   *int       `json:"replication_confirmed,omitempty"`
-       ReplicationConfirmedAt *time.Time `json:"replication_confirmed_at,omitempty"`
-       ReplicationDesired     *int       `json:"replication_desired,omitempty"`
-       DeleteAt               *time.Time `json:"delete_at,omitempty"`
-       IsTrashed              bool       `json:"is_trashed,omitempty"`
+       UUID                      string     `json:"uuid,omitempty"`
+       OwnerUUID                 string     `json:"owner_uuid,omitempty"`
+       TrashAt                   *time.Time `json:"trash_at,omitempty"`
+       ManifestText              string     `json:"manifest_text,omitempty"`
+       UnsignedManifestText      string     `json:"unsigned_manifest_text,omitempty"`
+       Name                      string     `json:"name,omitempty"`
+       CreatedAt                 *time.Time `json:"created_at,omitempty"`
+       ModifiedAt                *time.Time `json:"modified_at,omitempty"`
+       PortableDataHash          string     `json:"portable_data_hash,omitempty"`
+       ReplicationConfirmed      *int       `json:"replication_confirmed,omitempty"`
+       ReplicationConfirmedAt    *time.Time `json:"replication_confirmed_at,omitempty"`
+       ReplicationDesired        *int       `json:"replication_desired,omitempty"`
+       StorageClassesDesired     []string   `json:"storage_classes_desired,omitempty"`
+       StorageClassesConfirmed   []string   `json:"storage_classes_confirmed,omitempty"`
+       StorageClassesConfirmedAt *time.Time `json:"storage_classes_confirmed_at,omitempty"`
+       DeleteAt                  *time.Time `json:"delete_at,omitempty"`
+       IsTrashed                 bool       `json:"is_trashed,omitempty"`
 }
 
 func (c Collection) resourceName() string {
index 9ed0eacf23e6d753c1b6c2a0f781282c96dde8cc..353901855683f296811a42e64b008568071dbdad 100644 (file)
@@ -5,6 +5,8 @@
 package arvados
 
 import (
+       "encoding/json"
+       "errors"
        "fmt"
        "os"
 
@@ -49,47 +51,88 @@ func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
 }
 
 type Cluster struct {
-       ClusterID       string `json:"-"`
-       ManagementToken string
-       SystemNodes     map[string]SystemNode
-       InstanceTypes   []InstanceType
+       ClusterID          string `json:"-"`
+       ManagementToken    string
+       NodeProfiles       map[string]NodeProfile
+       InstanceTypes      InstanceTypeMap
+       HTTPRequestTimeout Duration
 }
 
 type InstanceType struct {
        Name         string
        ProviderType string
        VCPUs        int
-       RAM          int64
-       Scratch      int64
+       RAM          ByteSize
+       Scratch      ByteSize
        Price        float64
+       Preemptible  bool
 }
 
-// GetThisSystemNode returns a SystemNode for the node we're running
-// on right now.
-func (cc *Cluster) GetThisSystemNode() (*SystemNode, error) {
-       hostname, err := os.Hostname()
+type InstanceTypeMap map[string]InstanceType
+
+var errDuplicateInstanceTypeName = errors.New("duplicate instance type name")
+
+// UnmarshalJSON handles old config files that provide an array of
+// instance types instead of a hash.
+func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
+       if len(data) > 0 && data[0] == '[' {
+               var arr []InstanceType
+               err := json.Unmarshal(data, &arr)
+               if err != nil {
+                       return err
+               }
+               if len(arr) == 0 {
+                       *it = nil
+                       return nil
+               }
+               *it = make(map[string]InstanceType, len(arr))
+               for _, t := range arr {
+                       if _, ok := (*it)[t.Name]; ok {
+                               return errDuplicateInstanceTypeName
+                       }
+                       (*it)[t.Name] = t
+               }
+               return nil
+       }
+       var hash map[string]InstanceType
+       err := json.Unmarshal(data, &hash)
        if err != nil {
-               return nil, err
+               return err
        }
-       return cc.GetSystemNode(hostname)
+       // Fill in Name field using hash key.
+       *it = InstanceTypeMap(hash)
+       for name, t := range *it {
+               t.Name = name
+               (*it)[name] = t
+       }
+       return nil
 }
 
-// GetSystemNode returns a SystemNode for the given hostname. An error
-// is returned if the appropriate configuration can't be determined
-// (e.g., this does not appear to be a system node).
-func (cc *Cluster) GetSystemNode(node string) (*SystemNode, error) {
-       if cfg, ok := cc.SystemNodes[node]; ok {
+// GetNodeProfile returns a NodeProfile for the given hostname. An
+// error is returned if the appropriate configuration can't be
+// determined (e.g., this does not appear to be a system node). If
+// node is empty, use the OS-reported hostname.
+func (cc *Cluster) GetNodeProfile(node string) (*NodeProfile, error) {
+       if node == "" {
+               hostname, err := os.Hostname()
+               if err != nil {
+                       return nil, err
+               }
+               node = hostname
+       }
+       if cfg, ok := cc.NodeProfiles[node]; ok {
                return &cfg, nil
        }
        // If node is not listed, but "*" gives a default system node
        // config, use the default config.
-       if cfg, ok := cc.SystemNodes["*"]; ok {
+       if cfg, ok := cc.NodeProfiles["*"]; ok {
                return &cfg, nil
        }
        return nil, fmt.Errorf("config does not provision host %q as a system node", node)
 }
 
-type SystemNode struct {
+type NodeProfile struct {
+       Controller  SystemServiceInstance `json:"arvados-controller"`
        Health      SystemServiceInstance `json:"arvados-health"`
        Keepproxy   SystemServiceInstance `json:"keepproxy"`
        Keepstore   SystemServiceInstance `json:"keepstore"`
@@ -100,20 +143,35 @@ type SystemNode struct {
        Workbench   SystemServiceInstance `json:"arvados-workbench"`
 }
 
+type ServiceName string
+
+const (
+       ServiceNameRailsAPI    ServiceName = "arvados-api-server"
+       ServiceNameController  ServiceName = "arvados-controller"
+       ServiceNameNodemanager ServiceName = "arvados-node-manager"
+       ServiceNameWorkbench   ServiceName = "arvados-workbench"
+       ServiceNameWebsocket   ServiceName = "arvados-ws"
+       ServiceNameKeepweb     ServiceName = "keep-web"
+       ServiceNameKeepproxy   ServiceName = "keepproxy"
+       ServiceNameKeepstore   ServiceName = "keepstore"
+)
+
 // ServicePorts returns the configured listening address (or "" if
 // disabled) for each service on the node.
-func (sn *SystemNode) ServicePorts() map[string]string {
-       return map[string]string{
-               "arvados-api-server":   sn.RailsAPI.Listen,
-               "arvados-node-manager": sn.Nodemanager.Listen,
-               "arvados-workbench":    sn.Workbench.Listen,
-               "arvados-ws":           sn.Websocket.Listen,
-               "keep-web":             sn.Keepweb.Listen,
-               "keepproxy":            sn.Keepproxy.Listen,
-               "keepstore":            sn.Keepstore.Listen,
+func (np *NodeProfile) ServicePorts() map[ServiceName]string {
+       return map[ServiceName]string{
+               ServiceNameRailsAPI:    np.RailsAPI.Listen,
+               ServiceNameController:  np.Controller.Listen,
+               ServiceNameNodemanager: np.Nodemanager.Listen,
+               ServiceNameWorkbench:   np.Workbench.Listen,
+               ServiceNameWebsocket:   np.Websocket.Listen,
+               ServiceNameKeepweb:     np.Keepweb.Listen,
+               ServiceNameKeepproxy:   np.Keepproxy.Listen,
+               ServiceNameKeepstore:   np.Keepstore.Listen,
        }
 }
 
 type SystemServiceInstance struct {
        Listen string
+       TLS    bool
 }
diff --git a/sdk/go/arvados/config_test.go b/sdk/go/arvados/config_test.go
new file mode 100644 (file)
index 0000000..59c7432
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ConfigSuite{})
+
+type ConfigSuite struct{}
+
+func (s *ConfigSuite) TestInstanceTypesAsArray(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n- Name: foo\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+}
+
+func (s *ConfigSuite) TestInstanceTypesAsHash(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n  foo:\n    ProviderType: bar\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+       c.Check(cluster.InstanceTypes["foo"].ProviderType, check.Equals, "bar")
+}
+
+func (s *ConfigSuite) TestInstanceTypeSize(c *check.C) {
+       var it InstanceType
+       err := yaml.Unmarshal([]byte("Name: foo\nScratch: 4GB\nRAM: 4GiB\n"), &it)
+       c.Check(err, check.IsNil)
+       c.Check(int64(it.Scratch), check.Equals, int64(4000000000))
+       c.Check(int64(it.RAM), check.Equals, int64(4294967296))
+}
index daafc4995448524f7fe3794b9facd13e01480823..210ed9981c07292ec3c1508da978eaac351acae7 100644 (file)
@@ -52,7 +52,9 @@ type RuntimeConstraints struct {
 // SchedulingParameters specify a container's scheduling parameters
 // such as Partitions
 type SchedulingParameters struct {
-       Partitions []string `json:"partitions"`
+       Partitions  []string `json:"partitions"`
+       Preemptible bool     `json:"preemptible"`
+       MaxRunTime  int      `json:"max_run_time"`
 }
 
 // ContainerList is an arvados#containerList resource.
diff --git a/sdk/go/arvados/fs_backend.go b/sdk/go/arvados/fs_backend.go
new file mode 100644 (file)
index 0000000..301f0b4
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "io"
+
+type fsBackend interface {
+       keepClient
+       apiClient
+}
+
+// Ideally *Client would do everything; meanwhile keepBackend
+// implements fsBackend by merging the two kinds of arvados client.
+type keepBackend struct {
+       keepClient
+       apiClient
+}
+
+type keepClient interface {
+       ReadAt(locator string, p []byte, off int) (int, error)
+       PutB(p []byte) (string, int, error)
+}
+
+type apiClient interface {
+       RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error
+       UpdateBody(rsc resource) io.Reader
+}
diff --git a/sdk/go/arvados/fs_base.go b/sdk/go/arvados/fs_base.go
new file mode 100644 (file)
index 0000000..3058a76
--- /dev/null
@@ -0,0 +1,595 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "net/http"
+       "os"
+       "path"
+       "strings"
+       "sync"
+       "time"
+)
+
+var (
+       ErrReadOnlyFile      = errors.New("read-only file")
+       ErrNegativeOffset    = errors.New("cannot seek to negative offset")
+       ErrFileExists        = errors.New("file exists")
+       ErrInvalidOperation  = errors.New("invalid operation")
+       ErrInvalidArgument   = errors.New("invalid argument")
+       ErrDirectoryNotEmpty = errors.New("directory not empty")
+       ErrWriteOnlyMode     = errors.New("file is O_WRONLY")
+       ErrSyncNotSupported  = errors.New("O_SYNC flag is not supported")
+       ErrIsDirectory       = errors.New("cannot rename file to overwrite existing directory")
+       ErrNotADirectory     = errors.New("not a directory")
+       ErrPermission        = os.ErrPermission
+)
+
+// A File is an *os.File-like interface for reading and writing files
+// in a FileSystem.
+type File interface {
+       io.Reader
+       io.Writer
+       io.Closer
+       io.Seeker
+       Size() int64
+       Readdir(int) ([]os.FileInfo, error)
+       Stat() (os.FileInfo, error)
+       Truncate(int64) error
+       Sync() error
+}
+
+// A FileSystem is an http.Filesystem plus Stat() and support for
+// opening writable files. All methods are safe to call from multiple
+// goroutines.
+type FileSystem interface {
+       http.FileSystem
+       fsBackend
+
+       rootnode() inode
+
+       // filesystem-wide lock: used by Rename() to prevent deadlock
+       // while locking multiple inodes.
+       locker() sync.Locker
+
+       // create a new node with nil parent.
+       newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error)
+
+       // analogous to os.Stat()
+       Stat(name string) (os.FileInfo, error)
+
+       // analogous to os.Create(): create/truncate a file and open it O_RDWR.
+       Create(name string) (File, error)
+
+       // Like os.OpenFile(): create or open a file or directory.
+       //
+       // If flag&os.O_EXCL==0, it opens an existing file or
+       // directory if one exists. If flag&os.O_CREATE!=0, it creates
+       // a new empty file or directory if one does not already
+       // exist.
+       //
+       // When creating a new item, perm&os.ModeDir determines
+       // whether it is a file or a directory.
+       //
+       // A file can be opened multiple times and used concurrently
+       // from multiple goroutines. However, each File object should
+       // be used by only one goroutine at a time.
+       OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+       Mkdir(name string, perm os.FileMode) error
+       Remove(name string) error
+       RemoveAll(name string) error
+       Rename(oldname, newname string) error
+       Sync() error
+}
+
+type inode interface {
+       SetParent(parent inode, name string)
+       Parent() inode
+       FS() FileSystem
+       Read([]byte, filenodePtr) (int, filenodePtr, error)
+       Write([]byte, filenodePtr) (int, filenodePtr, error)
+       Truncate(int64) error
+       IsDir() bool
+       Readdir() ([]os.FileInfo, error)
+       Size() int64
+       FileInfo() os.FileInfo
+
+       // Child() performs lookups and updates of named child nodes.
+       //
+       // (The term "child" here is used strictly. This means name is
+       // not "." or "..", and name does not contain "/".)
+       //
+       // If replace is non-nil, Child calls replace(x) where x is
+       // the current child inode with the given name. If possible,
+       // the child inode is replaced with the one returned by
+       // replace().
+       //
+       // If replace(x) returns an inode (besides x or nil) that is
+       // subsequently returned by Child(), then Child()'s caller
+       // must ensure the new child's name and parent are set/updated
+       // to Child()'s name argument and its receiver respectively.
+       // This is not necessarily done before replace(x) returns, but
+       // it must be done before Child()'s caller releases the
+       // parent's lock.
+       //
+       // Nil represents "no child". replace(nil) signifies that no
+       // child with this name exists yet. If replace() returns nil,
+       // the existing child should be deleted if possible.
+       //
+       // An implementation of Child() is permitted to ignore
+       // replace() or its return value. For example, a regular file
+       // inode does not have children, so Child() always returns
+       // nil.
+       //
+       // Child() returns the child, if any, with the given name: if
+       // a child was added or changed, the new child is returned.
+       //
+       // Caller must have lock (or rlock if replace is nil).
+       Child(name string, replace func(inode) (inode, error)) (inode, error)
+
+       sync.Locker
+       RLock()
+       RUnlock()
+}
+
+type fileinfo struct {
+       name    string
+       mode    os.FileMode
+       size    int64
+       modTime time.Time
+}
+
+// Name implements os.FileInfo.
+func (fi fileinfo) Name() string {
+       return fi.name
+}
+
+// ModTime implements os.FileInfo.
+func (fi fileinfo) ModTime() time.Time {
+       return fi.modTime
+}
+
+// Mode implements os.FileInfo.
+func (fi fileinfo) Mode() os.FileMode {
+       return fi.mode
+}
+
+// IsDir implements os.FileInfo.
+func (fi fileinfo) IsDir() bool {
+       return fi.mode&os.ModeDir != 0
+}
+
+// Size implements os.FileInfo.
+func (fi fileinfo) Size() int64 {
+       return fi.size
+}
+
+// Sys implements os.FileInfo.
+func (fi fileinfo) Sys() interface{} {
+       return nil
+}
+
+type nullnode struct{}
+
+func (*nullnode) Mkdir(string, os.FileMode) error {
+       return ErrInvalidOperation
+}
+
+func (*nullnode) Read([]byte, filenodePtr) (int, filenodePtr, error) {
+       return 0, filenodePtr{}, ErrInvalidOperation
+}
+
+func (*nullnode) Write([]byte, filenodePtr) (int, filenodePtr, error) {
+       return 0, filenodePtr{}, ErrInvalidOperation
+}
+
+func (*nullnode) Truncate(int64) error {
+       return ErrInvalidOperation
+}
+
+func (*nullnode) FileInfo() os.FileInfo {
+       return fileinfo{}
+}
+
+func (*nullnode) IsDir() bool {
+       return false
+}
+
+func (*nullnode) Readdir() ([]os.FileInfo, error) {
+       return nil, ErrInvalidOperation
+}
+
+func (*nullnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return nil, ErrNotADirectory
+}
+
+type treenode struct {
+       fs       FileSystem
+       parent   inode
+       inodes   map[string]inode
+       fileinfo fileinfo
+       sync.RWMutex
+       nullnode
+}
+
+func (n *treenode) FS() FileSystem {
+       return n.fs
+}
+
+func (n *treenode) SetParent(p inode, name string) {
+       n.Lock()
+       defer n.Unlock()
+       n.parent = p
+       n.fileinfo.name = name
+}
+
+func (n *treenode) Parent() inode {
+       n.RLock()
+       defer n.RUnlock()
+       return n.parent
+}
+
+func (n *treenode) IsDir() bool {
+       return true
+}
+
+func (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {
+       child = n.inodes[name]
+       if name == "" || name == "." || name == ".." {
+               err = ErrInvalidArgument
+               return
+       }
+       if replace == nil {
+               return
+       }
+       newchild, err := replace(child)
+       if err != nil {
+               return
+       }
+       if newchild == nil {
+               delete(n.inodes, name)
+       } else if newchild != child {
+               n.inodes[name] = newchild
+               n.fileinfo.modTime = time.Now()
+               child = newchild
+       }
+       return
+}
+
+func (n *treenode) Size() int64 {
+       return n.FileInfo().Size()
+}
+
+func (n *treenode) FileInfo() os.FileInfo {
+       n.Lock()
+       defer n.Unlock()
+       n.fileinfo.size = int64(len(n.inodes))
+       return n.fileinfo
+}
+
+func (n *treenode) Readdir() (fi []os.FileInfo, err error) {
+       n.RLock()
+       defer n.RUnlock()
+       fi = make([]os.FileInfo, 0, len(n.inodes))
+       for _, inode := range n.inodes {
+               fi = append(fi, inode.FileInfo())
+       }
+       return
+}
+
+type fileSystem struct {
+       root inode
+       fsBackend
+       mutex sync.Mutex
+}
+
+func (fs *fileSystem) rootnode() inode {
+       return fs.root
+}
+
+func (fs *fileSystem) locker() sync.Locker {
+       return &fs.mutex
+}
+
+// OpenFile is analogous to os.OpenFile().
+func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+       return fs.openFile(name, flag, perm)
+}
+
+func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*filehandle, error) {
+       if flag&os.O_SYNC != 0 {
+               return nil, ErrSyncNotSupported
+       }
+       dirname, name := path.Split(name)
+       parent, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return nil, err
+       }
+       var readable, writable bool
+       switch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) {
+       case os.O_RDWR:
+               readable = true
+               writable = true
+       case os.O_RDONLY:
+               readable = true
+       case os.O_WRONLY:
+               writable = true
+       default:
+               return nil, fmt.Errorf("invalid flags 0x%x", flag)
+       }
+       if !writable && parent.IsDir() {
+               // A directory can be opened via "foo/", "foo/.", or
+               // "foo/..".
+               switch name {
+               case ".", "":
+                       return &filehandle{inode: parent}, nil
+               case "..":
+                       return &filehandle{inode: parent.Parent()}, nil
+               }
+       }
+       createMode := flag&os.O_CREATE != 0
+       if createMode {
+               parent.Lock()
+               defer parent.Unlock()
+       } else {
+               parent.RLock()
+               defer parent.RUnlock()
+       }
+       n, err := parent.Child(name, nil)
+       if err != nil {
+               return nil, err
+       } else if n == nil {
+               if !createMode {
+                       return nil, os.ErrNotExist
+               }
+               n, err = parent.Child(name, func(inode) (repl inode, err error) {
+                       repl, err = parent.FS().newNode(name, perm|0755, time.Now())
+                       if err != nil {
+                               return
+                       }
+                       repl.SetParent(parent, name)
+                       return
+               })
+               if err != nil {
+                       return nil, err
+               } else if n == nil {
+                       // Parent rejected new child, but returned no error
+                       return nil, ErrInvalidArgument
+               }
+       } else if flag&os.O_EXCL != 0 {
+               return nil, ErrFileExists
+       } else if flag&os.O_TRUNC != 0 {
+               if !writable {
+                       return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode")
+               } else if n.IsDir() {
+                       return nil, fmt.Errorf("invalid flag O_TRUNC when opening directory")
+               } else if err := n.Truncate(0); err != nil {
+                       return nil, err
+               }
+       }
+       return &filehandle{
+               inode:    n,
+               append:   flag&os.O_APPEND != 0,
+               readable: readable,
+               writable: writable,
+       }, nil
+}
+
+func (fs *fileSystem) Open(name string) (http.File, error) {
+       return fs.OpenFile(name, os.O_RDONLY, 0)
+}
+
+func (fs *fileSystem) Create(name string) (File, error) {
+       return fs.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
+}
+
+func (fs *fileSystem) Mkdir(name string, perm os.FileMode) error {
+       dirname, name := path.Split(name)
+       n, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return err
+       }
+       n.Lock()
+       defer n.Unlock()
+       if child, err := n.Child(name, nil); err != nil {
+               return err
+       } else if child != nil {
+               return os.ErrExist
+       }
+
+       _, err = n.Child(name, func(inode) (repl inode, err error) {
+               repl, err = n.FS().newNode(name, perm|os.ModeDir, time.Now())
+               if err != nil {
+                       return
+               }
+               repl.SetParent(n, name)
+               return
+       })
+       return err
+}
+
+func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
+       node, err := rlookup(fs.root, name)
+       if err != nil {
+               return nil, err
+       }
+       return node.FileInfo(), nil
+}
+
+func (fs *fileSystem) Rename(oldname, newname string) error {
+       olddir, oldname := path.Split(oldname)
+       if oldname == "" || oldname == "." || oldname == ".." {
+               return ErrInvalidArgument
+       }
+       olddirf, err := fs.openFile(olddir+".", os.O_RDONLY, 0)
+       if err != nil {
+               return fmt.Errorf("%q: %s", olddir, err)
+       }
+       defer olddirf.Close()
+
+       newdir, newname := path.Split(newname)
+       if newname == "." || newname == ".." {
+               return ErrInvalidArgument
+       } else if newname == "" {
+               // Rename("a/b", "c/") means Rename("a/b", "c/b")
+               newname = oldname
+       }
+       newdirf, err := fs.openFile(newdir+".", os.O_RDONLY, 0)
+       if err != nil {
+               return fmt.Errorf("%q: %s", newdir, err)
+       }
+       defer newdirf.Close()
+
+       // TODO: If the nearest common ancestor ("nca") of olddirf and
+       // newdirf is on a different filesystem than fs, we should
+       // call nca.FS().Rename() instead of proceeding. Until then
+       // it's awkward for filesystems to implement their own Rename
+       // methods effectively: the only one that runs is the one on
+       // the root FileSystem exposed to the caller (webdav, fuse,
+       // etc).
+
+       // When acquiring locks on multiple inodes, avoid deadlock by
+       // locking the entire containing filesystem first.
+       cfs := olddirf.inode.FS()
+       cfs.locker().Lock()
+       defer cfs.locker().Unlock()
+
+       if cfs != newdirf.inode.FS() {
+               // Moving inodes across filesystems is not (yet)
+               // supported. Locking inodes from different
+               // filesystems could deadlock, so we must error out
+               // now.
+               return ErrInvalidArgument
+       }
+
+       // To ensure we can test reliably whether we're about to move
+       // a directory into itself, lock all potential common
+       // ancestors of olddir and newdir.
+       needLock := []sync.Locker{}
+       for _, node := range []inode{olddirf.inode, newdirf.inode} {
+               needLock = append(needLock, node)
+               for node.Parent() != node && node.Parent().FS() == node.FS() {
+                       node = node.Parent()
+                       needLock = append(needLock, node)
+               }
+       }
+       locked := map[sync.Locker]bool{}
+       for i := len(needLock) - 1; i >= 0; i-- {
+               if n := needLock[i]; !locked[n] {
+                       n.Lock()
+                       defer n.Unlock()
+                       locked[n] = true
+               }
+       }
+
+       _, err = olddirf.inode.Child(oldname, func(oldinode inode) (inode, error) {
+               if oldinode == nil {
+                       return oldinode, os.ErrNotExist
+               }
+               if locked[oldinode] {
+                       // oldinode cannot become a descendant of itself.
+                       return oldinode, ErrInvalidArgument
+               }
+               if oldinode.FS() != cfs && newdirf.inode != olddirf.inode {
+                       // moving a mount point to a different parent
+                       // is not (yet) supported.
+                       return oldinode, ErrInvalidArgument
+               }
+               accepted, err := newdirf.inode.Child(newname, func(existing inode) (inode, error) {
+                       if existing != nil && existing.IsDir() {
+                               return existing, ErrIsDirectory
+                       }
+                       return oldinode, nil
+               })
+               if err != nil {
+                       // Leave oldinode in olddir.
+                       return oldinode, err
+               }
+               accepted.SetParent(newdirf.inode, newname)
+               return nil, nil
+       })
+       return err
+}
+
+func (fs *fileSystem) Remove(name string) error {
+       return fs.remove(strings.TrimRight(name, "/"), false)
+}
+
+func (fs *fileSystem) RemoveAll(name string) error {
+       err := fs.remove(strings.TrimRight(name, "/"), true)
+       if os.IsNotExist(err) {
+               // "If the path does not exist, RemoveAll returns
+               // nil." (see "os" pkg)
+               err = nil
+       }
+       return err
+}
+
+func (fs *fileSystem) remove(name string, recursive bool) error {
+       dirname, name := path.Split(name)
+       if name == "" || name == "." || name == ".." {
+               return ErrInvalidArgument
+       }
+       dir, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return err
+       }
+       dir.Lock()
+       defer dir.Unlock()
+       _, err = dir.Child(name, func(node inode) (inode, error) {
+               if node == nil {
+                       return nil, os.ErrNotExist
+               }
+               if !recursive && node.IsDir() && node.Size() > 0 {
+                       return node, ErrDirectoryNotEmpty
+               }
+               return nil, nil
+       })
+       return err
+}
+
+func (fs *fileSystem) Sync() error {
+       log.Printf("TODO: sync fileSystem")
+       return ErrInvalidOperation
+}
+
+// rlookup (recursive lookup) returns the inode for the file/directory
+// with the given name (which may contain "/" separators). If no such
+// file/directory exists, the returned node is nil.
+func rlookup(start inode, path string) (node inode, err error) {
+       node = start
+       for _, name := range strings.Split(path, "/") {
+               if node.IsDir() {
+                       if name == "." || name == "" {
+                               continue
+                       }
+                       if name == ".." {
+                               node = node.Parent()
+                               continue
+                       }
+               }
+               node, err = func() (inode, error) {
+                       node.RLock()
+                       defer node.RUnlock()
+                       return node.Child(name, nil)
+               }()
+               if node == nil || err != nil {
+                       break
+               }
+       }
+       if node == nil && err == nil {
+               err = os.ErrNotExist
+       }
+       return
+}
+
+func permittedName(name string) bool {
+       return name != "" && name != "." && name != ".." && !strings.Contains(name, "/")
+}
similarity index 60%
rename from sdk/go/arvados/collection_fs.go
rename to sdk/go/arvados/fs_collection.go
index d8ee2a2b1c5175697bf39369274ff6c0a42e7310..7ce37aa24e7b35bfbabec9508af3b2e308d4cc76 100644 (file)
@@ -5,10 +5,10 @@
 package arvados
 
 import (
-       "errors"
+       "encoding/json"
        "fmt"
        "io"
-       "net/http"
+       "log"
        "os"
        "path"
        "regexp"
@@ -19,107 +19,12 @@ import (
        "time"
 )
 
-var (
-       ErrReadOnlyFile      = errors.New("read-only file")
-       ErrNegativeOffset    = errors.New("cannot seek to negative offset")
-       ErrFileExists        = errors.New("file exists")
-       ErrInvalidOperation  = errors.New("invalid operation")
-       ErrInvalidArgument   = errors.New("invalid argument")
-       ErrDirectoryNotEmpty = errors.New("directory not empty")
-       ErrWriteOnlyMode     = errors.New("file is O_WRONLY")
-       ErrSyncNotSupported  = errors.New("O_SYNC flag is not supported")
-       ErrIsDirectory       = errors.New("cannot rename file to overwrite existing directory")
-       ErrPermission        = os.ErrPermission
+var maxBlockSize = 1 << 26
 
-       maxBlockSize = 1 << 26
-)
-
-// A File is an *os.File-like interface for reading and writing files
-// in a CollectionFileSystem.
-type File interface {
-       io.Reader
-       io.Writer
-       io.Closer
-       io.Seeker
-       Size() int64
-       Readdir(int) ([]os.FileInfo, error)
-       Stat() (os.FileInfo, error)
-       Truncate(int64) error
-}
-
-type keepClient interface {
-       ReadAt(locator string, p []byte, off int) (int, error)
-       PutB(p []byte) (string, int, error)
-}
-
-type fileinfo struct {
-       name    string
-       mode    os.FileMode
-       size    int64
-       modTime time.Time
-}
-
-// Name implements os.FileInfo.
-func (fi fileinfo) Name() string {
-       return fi.name
-}
-
-// ModTime implements os.FileInfo.
-func (fi fileinfo) ModTime() time.Time {
-       return fi.modTime
-}
-
-// Mode implements os.FileInfo.
-func (fi fileinfo) Mode() os.FileMode {
-       return fi.mode
-}
-
-// IsDir implements os.FileInfo.
-func (fi fileinfo) IsDir() bool {
-       return fi.mode&os.ModeDir != 0
-}
-
-// Size implements os.FileInfo.
-func (fi fileinfo) Size() int64 {
-       return fi.size
-}
-
-// Sys implements os.FileInfo.
-func (fi fileinfo) Sys() interface{} {
-       return nil
-}
-
-// A CollectionFileSystem is an http.Filesystem plus Stat() and
-// support for opening writable files. All methods are safe to call
-// from multiple goroutines.
+// A CollectionFileSystem is a FileSystem that can be serialized as a
+// manifest and stored as a collection.
 type CollectionFileSystem interface {
-       http.FileSystem
-
-       // analogous to os.Stat()
-       Stat(name string) (os.FileInfo, error)
-
-       // analogous to os.Create(): create/truncate a file and open it O_RDWR.
-       Create(name string) (File, error)
-
-       // Like os.OpenFile(): create or open a file or directory.
-       //
-       // If flag&os.O_EXCL==0, it opens an existing file or
-       // directory if one exists. If flag&os.O_CREATE!=0, it creates
-       // a new empty file or directory if one does not already
-       // exist.
-       //
-       // When creating a new item, perm&os.ModeDir determines
-       // whether it is a file or a directory.
-       //
-       // A file can be opened multiple times and used concurrently
-       // from multiple goroutines. However, each File object should
-       // be used by only one goroutine at a time.
-       OpenFile(name string, flag int, perm os.FileMode) (File, error)
-
-       Mkdir(name string, perm os.FileMode) error
-       Remove(name string) error
-       RemoveAll(name string) error
-       Rename(oldname, newname string) error
+       FileSystem
 
        // Flush all file data to Keep and return a snapshot of the
        // filesystem suitable for saving as (Collection)ManifestText.
@@ -128,55 +33,110 @@ type CollectionFileSystem interface {
        MarshalManifest(prefix string) (string, error)
 }
 
-type fileSystem struct {
-       dirnode
-}
-
-func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
-       return fs.dirnode.OpenFile(name, flag, perm)
+type collectionFileSystem struct {
+       fileSystem
+       uuid string
 }
 
-func (fs *fileSystem) Open(name string) (http.File, error) {
-       return fs.dirnode.OpenFile(name, os.O_RDONLY, 0)
+// FileSystem returns a CollectionFileSystem for the collection.
+func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFileSystem, error) {
+       var modTime time.Time
+       if c.ModifiedAt == nil {
+               modTime = time.Now()
+       } else {
+               modTime = *c.ModifiedAt
+       }
+       fs := &collectionFileSystem{
+               uuid: c.UUID,
+               fileSystem: fileSystem{
+                       fsBackend: keepBackend{apiClient: client, keepClient: kc},
+               },
+       }
+       root := &dirnode{
+               fs: fs,
+               treenode: treenode{
+                       fileinfo: fileinfo{
+                               name:    ".",
+                               mode:    os.ModeDir | 0755,
+                               modTime: modTime,
+                       },
+                       inodes: make(map[string]inode),
+               },
+       }
+       root.SetParent(root, ".")
+       if err := root.loadManifest(c.ManifestText); err != nil {
+               return nil, err
+       }
+       backdateTree(root, modTime)
+       fs.root = root
+       return fs, nil
 }
 
-func (fs *fileSystem) Create(name string) (File, error) {
-       return fs.dirnode.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
+func backdateTree(n inode, modTime time.Time) {
+       switch n := n.(type) {
+       case *filenode:
+               n.fileinfo.modTime = modTime
+       case *dirnode:
+               n.fileinfo.modTime = modTime
+               for _, n := range n.inodes {
+                       backdateTree(n, modTime)
+               }
+       }
 }
 
-func (fs *fileSystem) Stat(name string) (fi os.FileInfo, err error) {
-       node := fs.dirnode.lookupPath(name)
-       if node == nil {
-               err = os.ErrNotExist
+func (fs *collectionFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {
+       if name == "" || name == "." || name == ".." {
+               return nil, ErrInvalidArgument
+       }
+       if perm.IsDir() {
+               return &dirnode{
+                       fs: fs,
+                       treenode: treenode{
+                               fileinfo: fileinfo{
+                                       name:    name,
+                                       mode:    perm | os.ModeDir,
+                                       modTime: modTime,
+                               },
+                               inodes: make(map[string]inode),
+                       },
+               }, nil
        } else {
-               fi = node.Stat()
+               return &filenode{
+                       fs: fs,
+                       fileinfo: fileinfo{
+                               name:    name,
+                               mode:    perm & ^os.ModeDir,
+                               modTime: modTime,
+                       },
+               }, nil
        }
-       return
 }
 
-type inode interface {
-       Parent() inode
-       Read([]byte, filenodePtr) (int, filenodePtr, error)
-       Write([]byte, filenodePtr) (int, filenodePtr, error)
-       Truncate(int64) error
-       Readdir() []os.FileInfo
-       Size() int64
-       Stat() os.FileInfo
-       sync.Locker
-       RLock()
-       RUnlock()
+func (fs *collectionFileSystem) Sync() error {
+       log.Printf("cfs.Sync()")
+       if fs.uuid == "" {
+               return nil
+       }
+       txt, err := fs.MarshalManifest(".")
+       if err != nil {
+               log.Printf("WARNING: (collectionFileSystem)Sync() failed: %s", err)
+               return err
+       }
+       coll := &Collection{
+               UUID:         fs.uuid,
+               ManifestText: txt,
+       }
+       err = fs.RequestAndDecode(nil, "PUT", "arvados/v1/collections/"+fs.uuid, fs.UpdateBody(coll), map[string]interface{}{"select": []string{"uuid"}})
+       if err != nil {
+               log.Printf("WARNING: (collectionFileSystem)Sync() failed: %s", err)
+       }
+       return err
 }
 
-// filenode implements inode.
-type filenode struct {
-       fileinfo fileinfo
-       parent   *dirnode
-       segments []segment
-       // number of times `segments` has changed in a
-       // way that might invalidate a filenodePtr
-       repacked int64
-       memsize  int64 // bytes in memSegments
-       sync.RWMutex
+func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
+       fs.fileSystem.root.Lock()
+       defer fs.fileSystem.root.Unlock()
+       return fs.fileSystem.root.(*dirnode).marshalManifest(prefix)
 }
 
 // filenodePtr is an offset into a file that is (usually) efficient to
@@ -252,20 +212,41 @@ func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
        return
 }
 
+// filenode implements inode.
+type filenode struct {
+       parent   inode
+       fs       FileSystem
+       fileinfo fileinfo
+       segments []segment
+       // number of times `segments` has changed in a
+       // way that might invalidate a filenodePtr
+       repacked int64
+       memsize  int64 // bytes in memSegments
+       sync.RWMutex
+       nullnode
+}
+
 // caller must have lock
 func (fn *filenode) appendSegment(e segment) {
        fn.segments = append(fn.segments, e)
        fn.fileinfo.size += int64(e.Len())
 }
 
+func (fn *filenode) SetParent(p inode, name string) {
+       fn.Lock()
+       defer fn.Unlock()
+       fn.parent = p
+       fn.fileinfo.name = name
+}
+
 func (fn *filenode) Parent() inode {
        fn.RLock()
        defer fn.RUnlock()
        return fn.parent
 }
 
-func (fn *filenode) Readdir() []os.FileInfo {
-       return nil
+func (fn *filenode) FS() FileSystem {
+       return fn.fs
 }
 
 // Read reads file data from a single segment, starting at startPtr,
@@ -302,7 +283,7 @@ func (fn *filenode) Size() int64 {
        return fn.fileinfo.Size()
 }
 
-func (fn *filenode) Stat() os.FileInfo {
+func (fn *filenode) FileInfo() os.FileInfo {
        fn.RLock()
        defer fn.RUnlock()
        return fn.fileinfo
@@ -513,7 +494,7 @@ func (fn *filenode) pruneMemSegments() {
                if !ok || seg.Len() < maxBlockSize {
                        continue
                }
-               locator, _, err := fn.parent.kc.PutB(seg.buf)
+               locator, _, err := fn.FS().PutB(seg.buf)
                if err != nil {
                        // TODO: stall (or return errors from)
                        // subsequent writes until flushing
@@ -522,7 +503,7 @@ func (fn *filenode) pruneMemSegments() {
                }
                fn.memsize -= int64(seg.Len())
                fn.segments[idx] = storedSegment{
-                       kc:      fn.parent.kc,
+                       kc:      fn.FS(),
                        locator: locator,
                        size:    seg.Len(),
                        offset:  0,
@@ -531,132 +512,34 @@ func (fn *filenode) pruneMemSegments() {
        }
 }
 
-// FileSystem returns a CollectionFileSystem for the collection.
-func (c *Collection) FileSystem(client *Client, kc keepClient) (CollectionFileSystem, error) {
-       var modTime time.Time
-       if c.ModifiedAt == nil {
-               modTime = time.Now()
-       } else {
-               modTime = *c.ModifiedAt
-       }
-       fs := &fileSystem{dirnode: dirnode{
-               client: client,
-               kc:     kc,
-               fileinfo: fileinfo{
-                       name:    ".",
-                       mode:    os.ModeDir | 0755,
-                       modTime: modTime,
-               },
-               parent: nil,
-               inodes: make(map[string]inode),
-       }}
-       fs.dirnode.parent = &fs.dirnode
-       if err := fs.dirnode.loadManifest(c.ManifestText); err != nil {
-               return nil, err
-       }
-       return fs, nil
-}
-
-type filehandle struct {
-       inode
-       ptr        filenodePtr
-       append     bool
-       readable   bool
-       writable   bool
-       unreaddirs []os.FileInfo
-}
-
-func (f *filehandle) Read(p []byte) (n int, err error) {
-       if !f.readable {
-               return 0, ErrWriteOnlyMode
-       }
-       f.inode.RLock()
-       defer f.inode.RUnlock()
-       n, f.ptr, err = f.inode.Read(p, f.ptr)
-       return
-}
-
-func (f *filehandle) Seek(off int64, whence int) (pos int64, err error) {
-       size := f.inode.Size()
-       ptr := f.ptr
-       switch whence {
-       case io.SeekStart:
-               ptr.off = off
-       case io.SeekCurrent:
-               ptr.off += off
-       case io.SeekEnd:
-               ptr.off = size + off
-       }
-       if ptr.off < 0 {
-               return f.ptr.off, ErrNegativeOffset
-       }
-       if ptr.off != f.ptr.off {
-               f.ptr = ptr
-               // force filenode to recompute f.ptr fields on next
-               // use
-               f.ptr.repacked = -1
-       }
-       return f.ptr.off, nil
-}
-
-func (f *filehandle) Truncate(size int64) error {
-       return f.inode.Truncate(size)
+type dirnode struct {
+       fs *collectionFileSystem
+       treenode
 }
 
-func (f *filehandle) Write(p []byte) (n int, err error) {
-       if !f.writable {
-               return 0, ErrReadOnlyFile
-       }
-       f.inode.Lock()
-       defer f.inode.Unlock()
-       if fn, ok := f.inode.(*filenode); ok && f.append {
-               f.ptr = filenodePtr{
-                       off:        fn.fileinfo.size,
-                       segmentIdx: len(fn.segments),
-                       segmentOff: 0,
-                       repacked:   fn.repacked,
-               }
-       }
-       n, f.ptr, err = f.inode.Write(p, f.ptr)
-       return
+func (dn *dirnode) FS() FileSystem {
+       return dn.fs
 }
 
-func (f *filehandle) Readdir(count int) ([]os.FileInfo, error) {
-       if !f.inode.Stat().IsDir() {
-               return nil, ErrInvalidOperation
-       }
-       if count <= 0 {
-               return f.inode.Readdir(), nil
-       }
-       if f.unreaddirs == nil {
-               f.unreaddirs = f.inode.Readdir()
-       }
-       if len(f.unreaddirs) == 0 {
-               return nil, io.EOF
-       }
-       if count > len(f.unreaddirs) {
-               count = len(f.unreaddirs)
+func (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       if dn == dn.fs.rootnode() && name == ".arvados#collection" {
+               gn := &getternode{Getter: func() ([]byte, error) {
+                       var coll Collection
+                       var err error
+                       coll.ManifestText, err = dn.fs.MarshalManifest(".")
+                       if err != nil {
+                               return nil, err
+                       }
+                       data, err := json.Marshal(&coll)
+                       if err == nil {
+                               data = append(data, '\n')
+                       }
+                       return data, err
+               }}
+               gn.SetParent(dn, name)
+               return gn, nil
        }
-       ret := f.unreaddirs[:count]
-       f.unreaddirs = f.unreaddirs[count:]
-       return ret, nil
-}
-
-func (f *filehandle) Stat() (os.FileInfo, error) {
-       return f.inode.Stat(), nil
-}
-
-func (f *filehandle) Close() error {
-       return nil
-}
-
-type dirnode struct {
-       fileinfo fileinfo
-       parent   *dirnode
-       client   *Client
-       kc       keepClient
-       inodes   map[string]inode
-       sync.RWMutex
+       return dn.treenode.Child(name, replace)
 }
 
 // sync flushes in-memory data (for all files in the tree rooted at
@@ -677,7 +560,7 @@ func (dn *dirnode) sync() error {
                for _, sb := range sbs {
                        block = append(block, sb.fn.segments[sb.idx].(*memSegment).buf...)
                }
-               locator, _, err := dn.kc.PutB(block)
+               locator, _, err := dn.fs.PutB(block)
                if err != nil {
                        return err
                }
@@ -685,7 +568,7 @@ func (dn *dirnode) sync() error {
                for _, sb := range sbs {
                        data := sb.fn.segments[sb.idx].(*memSegment).buf
                        sb.fn.segments[sb.idx] = storedSegment{
-                               kc:      dn.kc,
+                               kc:      dn.fs,
                                locator: locator,
                                size:    len(block),
                                offset:  off,
@@ -735,12 +618,6 @@ func (dn *dirnode) sync() error {
        return flush(pending)
 }
 
-func (dn *dirnode) MarshalManifest(prefix string) (string, error) {
-       dn.Lock()
-       defer dn.Unlock()
-       return dn.marshalManifest(prefix)
-}
-
 // caller must have read lock.
 func (dn *dirnode) marshalManifest(prefix string) (string, error) {
        var streamLen int64
@@ -912,7 +789,7 @@ func (dn *dirnode) loadManifest(txt string) error {
                                        blkLen = int(offset + length - pos - int64(blkOff))
                                }
                                fnode.appendSegment(storedSegment{
-                                       kc:      dn.kc,
+                                       kc:      dn.fs,
                                        locator: seg.locator,
                                        size:    seg.size,
                                        offset:  blkOff,
@@ -941,348 +818,65 @@ func (dn *dirnode) loadManifest(txt string) error {
 
 // only safe to call from loadManifest -- no locking
 func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
+       var node inode = dn
        names := strings.Split(path, "/")
        basename := names[len(names)-1]
-       if basename == "" || basename == "." || basename == ".." {
-               err = fmt.Errorf("invalid filename")
+       if !permittedName(basename) {
+               err = fmt.Errorf("invalid file part %q in path %q", basename, path)
                return
        }
        for _, name := range names[:len(names)-1] {
                switch name {
                case "", ".":
+                       continue
                case "..":
-                       dn = dn.parent
-               default:
-                       switch node := dn.inodes[name].(type) {
-                       case nil:
-                               dn = dn.newDirnode(name, 0755, dn.fileinfo.modTime)
-                       case *dirnode:
-                               dn = node
-                       case *filenode:
-                               err = ErrFileExists
-                               return
+                       if node == dn {
+                               // can't be sure parent will be a *dirnode
+                               return nil, ErrInvalidArgument
                        }
-               }
-       }
-       switch node := dn.inodes[basename].(type) {
-       case nil:
-               fn = dn.newFilenode(basename, 0755, dn.fileinfo.modTime)
-       case *filenode:
-               fn = node
-       case *dirnode:
-               err = ErrIsDirectory
-       }
-       return
-}
-
-func (dn *dirnode) mkdir(name string) (*filehandle, error) {
-       return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755)
-}
-
-func (dn *dirnode) Mkdir(name string, perm os.FileMode) error {
-       f, err := dn.mkdir(name)
-       if err == nil {
-               err = f.Close()
-       }
-       return err
-}
-
-func (dn *dirnode) Remove(name string) error {
-       return dn.remove(strings.TrimRight(name, "/"), false)
-}
-
-func (dn *dirnode) RemoveAll(name string) error {
-       err := dn.remove(strings.TrimRight(name, "/"), true)
-       if os.IsNotExist(err) {
-               // "If the path does not exist, RemoveAll returns
-               // nil." (see "os" pkg)
-               err = nil
-       }
-       return err
-}
-
-func (dn *dirnode) remove(name string, recursive bool) error {
-       dirname, name := path.Split(name)
-       if name == "" || name == "." || name == ".." {
-               return ErrInvalidArgument
-       }
-       dn, ok := dn.lookupPath(dirname).(*dirnode)
-       if !ok {
-               return os.ErrNotExist
-       }
-       dn.Lock()
-       defer dn.Unlock()
-       switch node := dn.inodes[name].(type) {
-       case nil:
-               return os.ErrNotExist
-       case *dirnode:
-               node.RLock()
-               defer node.RUnlock()
-               if !recursive && len(node.inodes) > 0 {
-                       return ErrDirectoryNotEmpty
-               }
-       }
-       delete(dn.inodes, name)
-       return nil
-}
-
-func (dn *dirnode) Rename(oldname, newname string) error {
-       olddir, oldname := path.Split(oldname)
-       if oldname == "" || oldname == "." || oldname == ".." {
-               return ErrInvalidArgument
-       }
-       olddirf, err := dn.OpenFile(olddir+".", os.O_RDONLY, 0)
-       if err != nil {
-               return fmt.Errorf("%q: %s", olddir, err)
-       }
-       defer olddirf.Close()
-       newdir, newname := path.Split(newname)
-       if newname == "." || newname == ".." {
-               return ErrInvalidArgument
-       } else if newname == "" {
-               // Rename("a/b", "c/") means Rename("a/b", "c/b")
-               newname = oldname
-       }
-       newdirf, err := dn.OpenFile(newdir+".", os.O_RDONLY, 0)
-       if err != nil {
-               return fmt.Errorf("%q: %s", newdir, err)
-       }
-       defer newdirf.Close()
-
-       // When acquiring locks on multiple nodes, all common
-       // ancestors must be locked first in order to avoid
-       // deadlock. This is assured by locking the path from root to
-       // newdir, then locking the path from root to olddir, skipping
-       // any already-locked nodes.
-       needLock := []sync.Locker{}
-       for _, f := range []*filehandle{olddirf, newdirf} {
-               node := f.inode
-               needLock = append(needLock, node)
-               for node.Parent() != node {
                        node = node.Parent()
-                       needLock = append(needLock, node)
-               }
-       }
-       locked := map[sync.Locker]bool{}
-       for i := len(needLock) - 1; i >= 0; i-- {
-               if n := needLock[i]; !locked[n] {
-                       n.Lock()
-                       defer n.Unlock()
-                       locked[n] = true
+                       continue
                }
-       }
-
-       olddn := olddirf.inode.(*dirnode)
-       newdn := newdirf.inode.(*dirnode)
-       oldinode, ok := olddn.inodes[oldname]
-       if !ok {
-               return os.ErrNotExist
-       }
-       if locked[oldinode] {
-               // oldinode cannot become a descendant of itself.
-               return ErrInvalidArgument
-       }
-       if existing, ok := newdn.inodes[newname]; ok {
-               // overwriting an existing file or dir
-               if dn, ok := existing.(*dirnode); ok {
-                       if !oldinode.Stat().IsDir() {
-                               return ErrIsDirectory
-                       }
-                       dn.RLock()
-                       defer dn.RUnlock()
-                       if len(dn.inodes) > 0 {
-                               return ErrDirectoryNotEmpty
+               node, err = node.Child(name, func(child inode) (inode, error) {
+                       if child == nil {
+                               child, err := node.FS().newNode(name, 0755|os.ModeDir, node.Parent().FileInfo().ModTime())
+                               if err != nil {
+                                       return nil, err
+                               }
+                               child.SetParent(node, name)
+                               return child, nil
+                       } else if !child.IsDir() {
+                               return child, ErrFileExists
+                       } else {
+                               return child, nil
                        }
+               })
+               if err != nil {
+                       return
                }
-       } else {
-               if newdn.inodes == nil {
-                       newdn.inodes = make(map[string]inode)
-               }
-               newdn.fileinfo.size++
        }
-       newdn.inodes[newname] = oldinode
-       switch n := oldinode.(type) {
-       case *dirnode:
-               n.parent = newdn
-       case *filenode:
-               n.parent = newdn
-       default:
-               panic(fmt.Sprintf("bad inode type %T", n))
-       }
-       delete(olddn.inodes, oldname)
-       olddn.fileinfo.size--
-       return nil
-}
-
-func (dn *dirnode) Parent() inode {
-       dn.RLock()
-       defer dn.RUnlock()
-       return dn.parent
-}
-
-func (dn *dirnode) Readdir() (fi []os.FileInfo) {
-       dn.RLock()
-       defer dn.RUnlock()
-       fi = make([]os.FileInfo, 0, len(dn.inodes))
-       for _, inode := range dn.inodes {
-               fi = append(fi, inode.Stat())
-       }
-       return
-}
-
-func (dn *dirnode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
-       return 0, ptr, ErrInvalidOperation
-}
-
-func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
-       return 0, ptr, ErrInvalidOperation
-}
-
-func (dn *dirnode) Size() int64 {
-       dn.RLock()
-       defer dn.RUnlock()
-       return dn.fileinfo.Size()
-}
-
-func (dn *dirnode) Stat() os.FileInfo {
-       dn.RLock()
-       defer dn.RUnlock()
-       return dn.fileinfo
-}
-
-func (dn *dirnode) Truncate(int64) error {
-       return ErrInvalidOperation
-}
-
-// lookupPath returns the inode for the file/directory with the given
-// name (which may contain "/" separators), along with its parent
-// node. If no such file/directory exists, the returned node is nil.
-func (dn *dirnode) lookupPath(path string) (node inode) {
-       node = dn
-       for _, name := range strings.Split(path, "/") {
-               dn, ok := node.(*dirnode)
-               if !ok {
-                       return nil
-               }
-               if name == "." || name == "" {
-                       continue
-               }
-               if name == ".." {
-                       node = node.Parent()
-                       continue
+       _, err = node.Child(basename, func(child inode) (inode, error) {
+               switch child := child.(type) {
+               case nil:
+                       child, err = node.FS().newNode(basename, 0755, node.FileInfo().ModTime())
+                       if err != nil {
+                               return nil, err
+                       }
+                       child.SetParent(node, basename)
+                       fn = child.(*filenode)
+                       return child, nil
+               case *filenode:
+                       fn = child
+                       return child, nil
+               case *dirnode:
+                       return child, ErrIsDirectory
+               default:
+                       return child, ErrInvalidArgument
                }
-               dn.RLock()
-               node = dn.inodes[name]
-               dn.RUnlock()
-       }
+       })
        return
 }
 
-func (dn *dirnode) newDirnode(name string, perm os.FileMode, modTime time.Time) *dirnode {
-       child := &dirnode{
-               parent: dn,
-               client: dn.client,
-               kc:     dn.kc,
-               fileinfo: fileinfo{
-                       name:    name,
-                       mode:    os.ModeDir | perm,
-                       modTime: modTime,
-               },
-       }
-       if dn.inodes == nil {
-               dn.inodes = make(map[string]inode)
-       }
-       dn.inodes[name] = child
-       dn.fileinfo.size++
-       return child
-}
-
-func (dn *dirnode) newFilenode(name string, perm os.FileMode, modTime time.Time) *filenode {
-       child := &filenode{
-               parent: dn,
-               fileinfo: fileinfo{
-                       name:    name,
-                       mode:    perm,
-                       modTime: modTime,
-               },
-       }
-       if dn.inodes == nil {
-               dn.inodes = make(map[string]inode)
-       }
-       dn.inodes[name] = child
-       dn.fileinfo.size++
-       return child
-}
-
-// OpenFile is analogous to os.OpenFile().
-func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*filehandle, error) {
-       if flag&os.O_SYNC != 0 {
-               return nil, ErrSyncNotSupported
-       }
-       dirname, name := path.Split(name)
-       dn, ok := dn.lookupPath(dirname).(*dirnode)
-       if !ok {
-               return nil, os.ErrNotExist
-       }
-       var readable, writable bool
-       switch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) {
-       case os.O_RDWR:
-               readable = true
-               writable = true
-       case os.O_RDONLY:
-               readable = true
-       case os.O_WRONLY:
-               writable = true
-       default:
-               return nil, fmt.Errorf("invalid flags 0x%x", flag)
-       }
-       if !writable {
-               // A directory can be opened via "foo/", "foo/.", or
-               // "foo/..".
-               switch name {
-               case ".", "":
-                       return &filehandle{inode: dn}, nil
-               case "..":
-                       return &filehandle{inode: dn.Parent()}, nil
-               }
-       }
-       createMode := flag&os.O_CREATE != 0
-       if createMode {
-               dn.Lock()
-               defer dn.Unlock()
-       } else {
-               dn.RLock()
-               defer dn.RUnlock()
-       }
-       n, ok := dn.inodes[name]
-       if !ok {
-               if !createMode {
-                       return nil, os.ErrNotExist
-               }
-               if perm.IsDir() {
-                       n = dn.newDirnode(name, 0755, time.Now())
-               } else {
-                       n = dn.newFilenode(name, 0755, time.Now())
-               }
-       } else if flag&os.O_EXCL != 0 {
-               return nil, ErrFileExists
-       } else if flag&os.O_TRUNC != 0 {
-               if !writable {
-                       return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode")
-               } else if fn, ok := n.(*filenode); !ok {
-                       return nil, fmt.Errorf("invalid flag O_TRUNC when opening directory")
-               } else {
-                       fn.Truncate(0)
-               }
-       }
-       return &filehandle{
-               inode:    n,
-               append:   flag&os.O_APPEND != 0,
-               readable: readable,
-               writable: writable,
-       }, nil
-}
-
 type segment interface {
        io.ReaderAt
        Len() int
@@ -1347,7 +941,7 @@ func (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) {
 }
 
 type storedSegment struct {
-       kc      keepClient
+       kc      fsBackend
        locator string
        size    int // size of stored block (also encoded in locator)
        offset  int // position of segment within the stored block
similarity index 99%
rename from sdk/go/arvados/collection_fs_test.go
rename to sdk/go/arvados/fs_collection_test.go
index 2604cefc4e55241a81f3d1a13f228f208c368609..d2f55d0e37d80502919f6385c2a0d457374a26c2 100644 (file)
@@ -474,6 +474,10 @@ func (s *CollectionFSSuite) TestMkdir(c *check.C) {
 }
 
 func (s *CollectionFSSuite) TestConcurrentWriters(c *check.C) {
+       if testing.Short() {
+               c.Skip("slow")
+       }
+
        maxBlockSize = 8
        defer func() { maxBlockSize = 2 << 26 }()
 
@@ -693,13 +697,13 @@ func (s *CollectionFSSuite) TestRename(c *check.C) {
                                err = fs.Rename(
                                        fmt.Sprintf("dir%d/file%d/patherror", i, j),
                                        fmt.Sprintf("dir%d/irrelevant", i))
-                               c.Check(err, check.ErrorMatches, `.*does not exist`)
+                               c.Check(err, check.ErrorMatches, `.*not a directory`)
 
                                // newname parent dir is a file
                                err = fs.Rename(
                                        fmt.Sprintf("dir%d/dir%d/file%d", i, j, j),
                                        fmt.Sprintf("dir%d/file%d/patherror", i, inner-j-1))
-                               c.Check(err, check.ErrorMatches, `.*does not exist`)
+                               c.Check(err, check.ErrorMatches, `.*not a directory`)
                        }(i, j)
                }
        }
@@ -1026,6 +1030,10 @@ var _ = check.Suite(&CollectionFSUnitSuite{})
 
 // expect ~2 seconds to load a manifest with 256K files
 func (s *CollectionFSUnitSuite) TestLargeManifest(c *check.C) {
+       if testing.Short() {
+               c.Skip("slow")
+       }
+
        const (
                dirCount  = 512
                fileCount = 512
diff --git a/sdk/go/arvados/fs_deferred.go b/sdk/go/arvados/fs_deferred.go
new file mode 100644 (file)
index 0000000..a84f64f
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "log"
+       "os"
+       "sync"
+       "time"
+)
+
+func deferredCollectionFS(fs FileSystem, parent inode, coll Collection) inode {
+       var modTime time.Time
+       if coll.ModifiedAt != nil {
+               modTime = *coll.ModifiedAt
+       } else {
+               modTime = time.Now()
+       }
+       placeholder := &treenode{
+               fs:     fs,
+               parent: parent,
+               inodes: nil,
+               fileinfo: fileinfo{
+                       name:    coll.Name,
+                       modTime: modTime,
+                       mode:    0755 | os.ModeDir,
+               },
+       }
+       return &deferrednode{wrapped: placeholder, create: func() inode {
+               err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
+               if err != nil {
+                       log.Printf("BUG: unhandled error: %s", err)
+                       return placeholder
+               }
+               cfs, err := coll.FileSystem(fs, fs)
+               if err != nil {
+                       log.Printf("BUG: unhandled error: %s", err)
+                       return placeholder
+               }
+               root := cfs.rootnode()
+               root.SetParent(parent, coll.Name)
+               return root
+       }}
+}
+
+// A deferrednode wraps an inode that's expensive to build. Initially,
+// it responds to basic directory functions by proxying to the given
+// placeholder. If a caller uses a read/write/lock operation,
+// deferrednode calls the create() func to create the real inode, and
+// proxies to the real inode from then on.
+//
+// In practice, this means a deferrednode's parent's directory listing
+// can be generated using only the placeholder, instead of waiting for
+// create().
+type deferrednode struct {
+       wrapped inode
+       create  func() inode
+       mtx     sync.Mutex
+       created bool
+}
+
+func (dn *deferrednode) realinode() inode {
+       dn.mtx.Lock()
+       defer dn.mtx.Unlock()
+       if !dn.created {
+               dn.wrapped = dn.create()
+               dn.created = true
+       }
+       return dn.wrapped
+}
+
+func (dn *deferrednode) currentinode() inode {
+       dn.mtx.Lock()
+       defer dn.mtx.Unlock()
+       return dn.wrapped
+}
+
+func (dn *deferrednode) Read(p []byte, pos filenodePtr) (int, filenodePtr, error) {
+       return dn.realinode().Read(p, pos)
+}
+
+func (dn *deferrednode) Write(p []byte, pos filenodePtr) (int, filenodePtr, error) {
+       return dn.realinode().Write(p, pos)
+}
+
+func (dn *deferrednode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return dn.realinode().Child(name, replace)
+}
+
+func (dn *deferrednode) Truncate(size int64) error       { return dn.realinode().Truncate(size) }
+func (dn *deferrednode) SetParent(p inode, name string)  { dn.realinode().SetParent(p, name) }
+func (dn *deferrednode) IsDir() bool                     { return dn.currentinode().IsDir() }
+func (dn *deferrednode) Readdir() ([]os.FileInfo, error) { return dn.realinode().Readdir() }
+func (dn *deferrednode) Size() int64                     { return dn.currentinode().Size() }
+func (dn *deferrednode) FileInfo() os.FileInfo           { return dn.currentinode().FileInfo() }
+func (dn *deferrednode) Lock()                           { dn.realinode().Lock() }
+func (dn *deferrednode) Unlock()                         { dn.realinode().Unlock() }
+func (dn *deferrednode) RLock()                          { dn.realinode().RLock() }
+func (dn *deferrednode) RUnlock()                        { dn.realinode().RUnlock() }
+func (dn *deferrednode) FS() FileSystem                  { return dn.currentinode().FS() }
+func (dn *deferrednode) Parent() inode                   { return dn.currentinode().Parent() }
diff --git a/sdk/go/arvados/fs_filehandle.go b/sdk/go/arvados/fs_filehandle.go
new file mode 100644 (file)
index 0000000..9af8d0a
--- /dev/null
@@ -0,0 +1,112 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "io"
+       "os"
+)
+
+type filehandle struct {
+       inode
+       ptr        filenodePtr
+       append     bool
+       readable   bool
+       writable   bool
+       unreaddirs []os.FileInfo
+}
+
+func (f *filehandle) Read(p []byte) (n int, err error) {
+       if !f.readable {
+               return 0, ErrWriteOnlyMode
+       }
+       f.inode.RLock()
+       defer f.inode.RUnlock()
+       n, f.ptr, err = f.inode.Read(p, f.ptr)
+       return
+}
+
+func (f *filehandle) Seek(off int64, whence int) (pos int64, err error) {
+       size := f.inode.Size()
+       ptr := f.ptr
+       switch whence {
+       case io.SeekStart:
+               ptr.off = off
+       case io.SeekCurrent:
+               ptr.off += off
+       case io.SeekEnd:
+               ptr.off = size + off
+       }
+       if ptr.off < 0 {
+               return f.ptr.off, ErrNegativeOffset
+       }
+       if ptr.off != f.ptr.off {
+               f.ptr = ptr
+               // force filenode to recompute f.ptr fields on next
+               // use
+               f.ptr.repacked = -1
+       }
+       return f.ptr.off, nil
+}
+
+func (f *filehandle) Truncate(size int64) error {
+       return f.inode.Truncate(size)
+}
+
+func (f *filehandle) Write(p []byte) (n int, err error) {
+       if !f.writable {
+               return 0, ErrReadOnlyFile
+       }
+       f.inode.Lock()
+       defer f.inode.Unlock()
+       if fn, ok := f.inode.(*filenode); ok && f.append {
+               f.ptr = filenodePtr{
+                       off:        fn.fileinfo.size,
+                       segmentIdx: len(fn.segments),
+                       segmentOff: 0,
+                       repacked:   fn.repacked,
+               }
+       }
+       n, f.ptr, err = f.inode.Write(p, f.ptr)
+       return
+}
+
+func (f *filehandle) Readdir(count int) ([]os.FileInfo, error) {
+       if !f.inode.IsDir() {
+               return nil, ErrInvalidOperation
+       }
+       if count <= 0 {
+               return f.inode.Readdir()
+       }
+       if f.unreaddirs == nil {
+               var err error
+               f.unreaddirs, err = f.inode.Readdir()
+               if err != nil {
+                       return nil, err
+               }
+       }
+       if len(f.unreaddirs) == 0 {
+               return nil, io.EOF
+       }
+       if count > len(f.unreaddirs) {
+               count = len(f.unreaddirs)
+       }
+       ret := f.unreaddirs[:count]
+       f.unreaddirs = f.unreaddirs[count:]
+       return ret, nil
+}
+
+func (f *filehandle) Stat() (os.FileInfo, error) {
+       return f.inode.FileInfo(), nil
+}
+
+func (f *filehandle) Close() error {
+       return nil
+}
+
+func (f *filehandle) Sync() error {
+       // Sync the containing filesystem.
+       return f.FS().Sync()
+}
diff --git a/sdk/go/arvados/fs_getternode.go b/sdk/go/arvados/fs_getternode.go
new file mode 100644 (file)
index 0000000..966fe9d
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "os"
+       "time"
+)
+
+// A getternode is a read-only character device that returns whatever
+// data is returned by the supplied function.
+type getternode struct {
+       Getter func() ([]byte, error)
+
+       treenode
+       data *bytes.Reader
+}
+
+func (*getternode) IsDir() bool {
+       return false
+}
+
+func (*getternode) Child(string, func(inode) (inode, error)) (inode, error) {
+       return nil, ErrInvalidArgument
+}
+
+func (gn *getternode) get() error {
+       if gn.data != nil {
+               return nil
+       }
+       data, err := gn.Getter()
+       if err != nil {
+               return err
+       }
+       gn.data = bytes.NewReader(data)
+       return nil
+}
+
+func (gn *getternode) Size() int64 {
+       return gn.FileInfo().Size()
+}
+
+func (gn *getternode) FileInfo() os.FileInfo {
+       gn.Lock()
+       defer gn.Unlock()
+       var size int64
+       if gn.get() == nil {
+               size = gn.data.Size()
+       }
+       return fileinfo{
+               modTime: time.Now(),
+               mode:    0444,
+               size:    size,
+       }
+}
+
+func (gn *getternode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
+       if err := gn.get(); err != nil {
+               return 0, ptr, err
+       }
+       n, err := gn.data.ReadAt(p, ptr.off)
+       return n, filenodePtr{off: ptr.off + int64(n)}, err
+}
diff --git a/sdk/go/arvados/fs_lookup.go b/sdk/go/arvados/fs_lookup.go
new file mode 100644 (file)
index 0000000..42322a1
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+       "sync"
+       "time"
+)
+
+// lookupnode is a caching tree node that is initially empty and calls
+// loadOne and loadAll to load/update child nodes as needed.
+//
+// See (*customFileSystem)MountUsers for example usage.
+type lookupnode struct {
+       inode
+       loadOne func(parent inode, name string) (inode, error)
+       loadAll func(parent inode) ([]inode, error)
+       stale   func(time.Time) bool
+
+       // internal fields
+       staleLock sync.Mutex
+       staleAll  time.Time
+       staleOne  map[string]time.Time
+}
+
+func (ln *lookupnode) Readdir() ([]os.FileInfo, error) {
+       ln.staleLock.Lock()
+       defer ln.staleLock.Unlock()
+       checkTime := time.Now()
+       if ln.stale(ln.staleAll) {
+               all, err := ln.loadAll(ln)
+               if err != nil {
+                       return nil, err
+               }
+               for _, child := range all {
+                       _, err = ln.inode.Child(child.FileInfo().Name(), func(inode) (inode, error) {
+                               return child, nil
+                       })
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               ln.staleAll = checkTime
+               // No value in ln.staleOne can make a difference to an
+               // "entry is stale?" test now, because no value is
+               // newer than ln.staleAll. Reclaim memory.
+               ln.staleOne = nil
+       }
+       return ln.inode.Readdir()
+}
+
+func (ln *lookupnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       ln.staleLock.Lock()
+       defer ln.staleLock.Unlock()
+       checkTime := time.Now()
+       if ln.stale(ln.staleAll) && ln.stale(ln.staleOne[name]) {
+               _, err := ln.inode.Child(name, func(inode) (inode, error) {
+                       return ln.loadOne(ln, name)
+               })
+               if err != nil {
+                       return nil, err
+               }
+               if ln.staleOne == nil {
+                       ln.staleOne = map[string]time.Time{name: checkTime}
+               } else {
+                       ln.staleOne[name] = checkTime
+               }
+       }
+       return ln.inode.Child(name, replace)
+}
diff --git a/sdk/go/arvados/fs_project.go b/sdk/go/arvados/fs_project.go
new file mode 100644 (file)
index 0000000..9299551
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "log"
+       "os"
+       "strings"
+)
+
+func (fs *customFileSystem) defaultUUID(uuid string) (string, error) {
+       if uuid != "" {
+               return uuid, nil
+       }
+       var resp User
+       err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users/current", nil, nil)
+       if err != nil {
+               return "", err
+       }
+       return resp.UUID, nil
+}
+
+// loadOneChild loads only the named child, if it exists.
+func (fs *customFileSystem) projectsLoadOne(parent inode, uuid, name string) (inode, error) {
+       uuid, err := fs.defaultUUID(uuid)
+       if err != nil {
+               return nil, err
+       }
+
+       var contents CollectionList
+       err = fs.RequestAndDecode(&contents, "GET", "arvados/v1/groups/"+uuid+"/contents", nil, ResourceListParams{
+               Count: "none",
+               Filters: []Filter{
+                       {"name", "=", name},
+                       {"uuid", "is_a", []string{"arvados#collection", "arvados#group"}},
+                       {"groups.group_class", "=", "project"},
+               },
+       })
+       if err != nil {
+               return nil, err
+       }
+       if len(contents.Items) == 0 {
+               return nil, os.ErrNotExist
+       }
+       coll := contents.Items[0]
+
+       if strings.Contains(coll.UUID, "-j7d0g-") {
+               // Group item was loaded into a Collection var -- but
+               // we only need the Name and UUID anyway, so it's OK.
+               return fs.newProjectNode(parent, coll.Name, coll.UUID), nil
+       } else if strings.Contains(coll.UUID, "-4zz18-") {
+               return deferredCollectionFS(fs, parent, coll), nil
+       } else {
+               log.Printf("projectnode: unrecognized UUID in response: %q", coll.UUID)
+               return nil, ErrInvalidArgument
+       }
+}
+
+func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode, error) {
+       uuid, err := fs.defaultUUID(uuid)
+       if err != nil {
+               return nil, err
+       }
+
+       var inodes []inode
+
+       // Note: the "filters" slice's backing array might be reused
+       // by append(filters,...) below. This isn't goroutine safe,
+       // but all accesses are in the same goroutine, so it's OK.
+       filters := []Filter{{"owner_uuid", "=", uuid}}
+       params := ResourceListParams{
+               Count:   "none",
+               Filters: filters,
+               Order:   "uuid",
+       }
+       for {
+               var resp CollectionList
+               err = fs.RequestAndDecode(&resp, "GET", "arvados/v1/collections", nil, params)
+               if err != nil {
+                       return nil, err
+               }
+               if len(resp.Items) == 0 {
+                       break
+               }
+               for _, i := range resp.Items {
+                       coll := i
+                       if !permittedName(coll.Name) {
+                               continue
+                       }
+                       inodes = append(inodes, deferredCollectionFS(fs, parent, coll))
+               }
+               params.Filters = append(filters, Filter{"uuid", ">", resp.Items[len(resp.Items)-1].UUID})
+       }
+
+       filters = append(filters, Filter{"group_class", "=", "project"})
+       params.Filters = filters
+       for {
+               var resp GroupList
+               err = fs.RequestAndDecode(&resp, "GET", "arvados/v1/groups", nil, params)
+               if err != nil {
+                       return nil, err
+               }
+               if len(resp.Items) == 0 {
+                       break
+               }
+               for _, group := range resp.Items {
+                       if !permittedName(group.Name) {
+                               continue
+                       }
+                       inodes = append(inodes, fs.newProjectNode(parent, group.Name, group.UUID))
+               }
+               params.Filters = append(filters, Filter{"uuid", ">", resp.Items[len(resp.Items)-1].UUID})
+       }
+       return inodes, nil
+}
diff --git a/sdk/go/arvados/fs_project_test.go b/sdk/go/arvados/fs_project_test.go
new file mode 100644 (file)
index 0000000..1a06ce1
--- /dev/null
@@ -0,0 +1,201 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "encoding/json"
+       "io"
+       "os"
+       "path/filepath"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+type spiedRequest struct {
+       method string
+       path   string
+       params map[string]interface{}
+}
+
+type spyingClient struct {
+       *Client
+       calls []spiedRequest
+}
+
+func (sc *spyingClient) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {
+       var paramsCopy map[string]interface{}
+       var buf bytes.Buffer
+       json.NewEncoder(&buf).Encode(params)
+       json.NewDecoder(&buf).Decode(&paramsCopy)
+       sc.calls = append(sc.calls, spiedRequest{
+               method: method,
+               path:   path,
+               params: paramsCopy,
+       })
+       return sc.Client.RequestAndDecode(dst, method, path, body, params)
+}
+
+func (s *SiteFSSuite) TestCurrentUserHome(c *check.C) {
+       s.fs.MountProject("home", "")
+       s.testHomeProject(c, "/home")
+}
+
+func (s *SiteFSSuite) TestUsersDir(c *check.C) {
+       s.testHomeProject(c, "/users/active")
+}
+
+func (s *SiteFSSuite) testHomeProject(c *check.C, path string) {
+       f, err := s.fs.Open(path)
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(len(fis), check.Not(check.Equals), 0)
+
+       ok := false
+       for _, fi := range fis {
+               c.Check(fi.Name(), check.Not(check.Equals), "")
+               if fi.Name() == "A Project" {
+                       ok = true
+               }
+       }
+       c.Check(ok, check.Equals, true)
+
+       f, err = s.fs.Open(path + "/A Project/..")
+       c.Assert(err, check.IsNil)
+       fi, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+       _, basename := filepath.Split(path)
+       c.Check(fi.Name(), check.Equals, basename)
+
+       f, err = s.fs.Open(path + "/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       fi, err = f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+
+       for _, nx := range []string{
+               path + "/Unrestricted public data",
+               path + "/Unrestricted public data/does not exist",
+               path + "/A Project/does not exist",
+       } {
+               c.Log(nx)
+               f, err = s.fs.Open(nx)
+               c.Check(err, check.NotNil)
+               c.Check(os.IsNotExist(err), check.Equals, true)
+       }
+}
+
+func (s *SiteFSSuite) TestProjectReaddirAfterLoadOne(c *check.C) {
+       f, err := s.fs.Open("/users/active/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       f, err = s.fs.Open("/users/active/A Project/Project does not exist")
+       c.Assert(err, check.NotNil)
+       f, err = s.fs.Open("/users/active/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       f, err = s.fs.Open("/users/active/A Project")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       fis, err := f.Readdir(-1)
+       c.Assert(err, check.IsNil)
+       c.Logf("%#v", fis)
+       var foundSubproject, foundCollection bool
+       for _, fi := range fis {
+               switch fi.Name() {
+               case "A Subproject":
+                       foundSubproject = true
+               case "collection_to_move_around":
+                       foundCollection = true
+               }
+       }
+       c.Check(foundSubproject, check.Equals, true)
+       c.Check(foundCollection, check.Equals, true)
+}
+
+func (s *SiteFSSuite) TestSlashInName(c *check.C) {
+       badCollection := Collection{
+               Name:      "bad/collection",
+               OwnerUUID: arvadostest.AProjectUUID,
+       }
+       err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", s.client.UpdateBody(&badCollection), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+badCollection.UUID, nil, nil)
+
+       badProject := Group{
+               Name:       "bad/project",
+               GroupClass: "project",
+               OwnerUUID:  arvadostest.AProjectUUID,
+       }
+       err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", s.client.UpdateBody(&badProject), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/groups/"+badProject.UUID, nil, nil)
+
+       dir, err := s.fs.Open("/users/active/A Project")
+       c.Assert(err, check.IsNil)
+       fis, err := dir.Readdir(-1)
+       c.Check(err, check.IsNil)
+       for _, fi := range fis {
+               c.Logf("fi.Name() == %q", fi.Name())
+               c.Check(strings.Contains(fi.Name(), "/"), check.Equals, false)
+       }
+}
+
+func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
+       s.fs.MountProject("home", "")
+
+       project, err := s.fs.OpenFile("/home/A Project", 0, 0)
+       c.Assert(err, check.IsNil)
+
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.NotNil)
+
+       oob := Collection{
+               Name:      "oob",
+               OwnerUUID: arvadostest.AProjectUUID,
+       }
+       err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", s.client.UpdateBody(&oob), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       f, err := s.fs.Open("/home/A Project/oob")
+       c.Assert(err, check.IsNil)
+       fi, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+       f.Close()
+
+       wf, err := s.fs.OpenFile("/home/A Project/oob/test.txt", os.O_CREATE|os.O_RDWR, 0700)
+       c.Assert(err, check.IsNil)
+       _, err = wf.Write([]byte("hello oob\n"))
+       c.Check(err, check.IsNil)
+       err = wf.Close()
+       c.Check(err, check.IsNil)
+
+       // Delete test.txt behind s.fs's back by updating the
+       // collection record with the old (empty) ManifestText.
+       err = s.client.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+oob.UUID, s.client.UpdateBody(&oob), nil)
+       c.Assert(err, check.IsNil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       _, err = s.fs.Open("/home/A Project/oob/test.txt")
+       c.Check(err, check.NotNil)
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.IsNil)
+
+       err = s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
+       c.Assert(err, check.IsNil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.NotNil)
+}
diff --git a/sdk/go/arvados/fs_site.go b/sdk/go/arvados/fs_site.go
new file mode 100644 (file)
index 0000000..82114e2
--- /dev/null
@@ -0,0 +1,200 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+       "strings"
+       "sync"
+       "time"
+)
+
+type CustomFileSystem interface {
+       FileSystem
+       MountByID(mount string)
+       MountProject(mount, uuid string)
+       MountUsers(mount string)
+}
+
+type customFileSystem struct {
+       fileSystem
+       root *vdirnode
+
+       staleThreshold time.Time
+       staleLock      sync.Mutex
+}
+
+func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
+       root := &vdirnode{}
+       fs := &customFileSystem{
+               root: root,
+               fileSystem: fileSystem{
+                       fsBackend: keepBackend{apiClient: c, keepClient: kc},
+                       root:      root,
+               },
+       }
+       root.inode = &treenode{
+               fs:     fs,
+               parent: root,
+               fileinfo: fileinfo{
+                       name:    "/",
+                       mode:    os.ModeDir | 0755,
+                       modTime: time.Now(),
+               },
+               inodes: make(map[string]inode),
+       }
+       return fs
+}
+
+func (fs *customFileSystem) MountByID(mount string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return &vdirnode{
+                       inode: &treenode{
+                               fs:     fs,
+                               parent: fs.root,
+                               inodes: make(map[string]inode),
+                               fileinfo: fileinfo{
+                                       name:    mount,
+                                       modTime: time.Now(),
+                                       mode:    0755 | os.ModeDir,
+                               },
+                       },
+                       create: fs.mountByID,
+               }, nil
+       })
+}
+
+func (fs *customFileSystem) MountProject(mount, uuid string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return fs.newProjectNode(fs.root, mount, uuid), nil
+       })
+}
+
+func (fs *customFileSystem) MountUsers(mount string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return &lookupnode{
+                       stale:   fs.Stale,
+                       loadOne: fs.usersLoadOne,
+                       loadAll: fs.usersLoadAll,
+                       inode: &treenode{
+                               fs:     fs,
+                               parent: fs.root,
+                               inodes: make(map[string]inode),
+                               fileinfo: fileinfo{
+                                       name:    mount,
+                                       modTime: time.Now(),
+                                       mode:    0755 | os.ModeDir,
+                               },
+                       },
+               }, nil
+       })
+}
+
+// SiteFileSystem returns a FileSystem that maps collections and other
+// Arvados objects onto a filesystem layout.
+//
+// This is experimental: the filesystem layout is not stable, and
+// there are significant known bugs and shortcomings. For example,
+// writes are not persisted until Sync() is called.
+func (c *Client) SiteFileSystem(kc keepClient) CustomFileSystem {
+       fs := c.CustomFileSystem(kc)
+       fs.MountByID("by_id")
+       fs.MountUsers("users")
+       return fs
+}
+
+func (fs *customFileSystem) Sync() error {
+       fs.staleLock.Lock()
+       defer fs.staleLock.Unlock()
+       fs.staleThreshold = time.Now()
+       return nil
+}
+
+// Stale returns true if information obtained at time t should be
+// considered stale.
+func (fs *customFileSystem) Stale(t time.Time) bool {
+       fs.staleLock.Lock()
+       defer fs.staleLock.Unlock()
+       return !fs.staleThreshold.Before(t)
+}
+
+func (fs *customFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {
+       return nil, ErrInvalidOperation
+}
+
+func (fs *customFileSystem) mountByID(parent inode, id string) inode {
+       if strings.Contains(id, "-4zz18-") || pdhRegexp.MatchString(id) {
+               return fs.mountCollection(parent, id)
+       } else if strings.Contains(id, "-j7d0g-") {
+               return fs.newProjectNode(fs.root, id, id)
+       } else {
+               return nil
+       }
+}
+
+func (fs *customFileSystem) mountCollection(parent inode, id string) inode {
+       var coll Collection
+       err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, nil)
+       if err != nil {
+               return nil
+       }
+       cfs, err := coll.FileSystem(fs, fs)
+       if err != nil {
+               return nil
+       }
+       root := cfs.rootnode()
+       root.SetParent(parent, id)
+       return root
+}
+
+func (fs *customFileSystem) newProjectNode(root inode, name, uuid string) inode {
+       return &lookupnode{
+               stale:   fs.Stale,
+               loadOne: func(parent inode, name string) (inode, error) { return fs.projectsLoadOne(parent, uuid, name) },
+               loadAll: func(parent inode) ([]inode, error) { return fs.projectsLoadAll(parent, uuid) },
+               inode: &treenode{
+                       fs:     fs,
+                       parent: root,
+                       inodes: make(map[string]inode),
+                       fileinfo: fileinfo{
+                               name:    name,
+                               modTime: time.Now(),
+                               mode:    0755 | os.ModeDir,
+                       },
+               },
+       }
+}
+
+// vdirnode wraps an inode by ignoring any requests to add/replace
+// children, and calling a create() func when a non-existing child is
+// looked up.
+//
+// create() can return either a new node, which will be added to the
+// treenode, or nil for ENOENT.
+type vdirnode struct {
+       inode
+       create func(parent inode, name string) inode
+}
+
+func (vn *vdirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return vn.inode.Child(name, func(existing inode) (inode, error) {
+               if existing == nil && vn.create != nil {
+                       existing = vn.create(vn, name)
+                       if existing != nil {
+                               existing.SetParent(vn, name)
+                               vn.inode.(*treenode).fileinfo.modTime = time.Now()
+                       }
+               }
+               if replace == nil {
+                       return existing, nil
+               } else if tryRepl, err := replace(existing); err != nil {
+                       return existing, err
+               } else if tryRepl != existing {
+                       return existing, ErrInvalidArgument
+               } else {
+                       return existing, nil
+               }
+       })
+}
diff --git a/sdk/go/arvados/fs_site_test.go b/sdk/go/arvados/fs_site_test.go
new file mode 100644 (file)
index 0000000..80028dc
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "net/http"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&SiteFSSuite{})
+
+type SiteFSSuite struct {
+       client *Client
+       fs     CustomFileSystem
+       kc     keepClient
+}
+
+func (s *SiteFSSuite) SetUpTest(c *check.C) {
+       s.client = &Client{
+               APIHost:   os.Getenv("ARVADOS_API_HOST"),
+               AuthToken: arvadostest.ActiveToken,
+               Insecure:  true,
+       }
+       s.kc = &keepClientStub{
+               blocks: map[string][]byte{
+                       "3858f62230ac3c915f300c664312c63f": []byte("foobar"),
+               }}
+       s.fs = s.client.SiteFileSystem(s.kc)
+}
+
+func (s *SiteFSSuite) TestHttpFileSystemInterface(c *check.C) {
+       _, ok := s.fs.(http.FileSystem)
+       c.Check(ok, check.Equals, true)
+}
+
+func (s *SiteFSSuite) TestByIDEmpty(c *check.C) {
+       f, err := s.fs.Open("/by_id")
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 0)
+}
+
+func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
+       f, err := s.fs.Open("/by_id")
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 0)
+
+       err = s.fs.Mkdir("/by_id/"+arvadostest.FooCollection, 0755)
+       c.Check(err, check.Equals, os.ErrExist)
+
+       f, err = s.fs.Open("/by_id/" + arvadostest.NonexistentCollection)
+       c.Assert(err, check.Equals, os.ErrNotExist)
+
+       for _, path := range []string{
+               arvadostest.FooCollection,
+               arvadostest.FooPdh,
+               arvadostest.AProjectUUID + "/" + arvadostest.FooCollectionName,
+       } {
+               f, err = s.fs.Open("/by_id/" + path)
+               c.Assert(err, check.IsNil)
+               fis, err = f.Readdir(-1)
+               var names []string
+               for _, fi := range fis {
+                       names = append(names, fi.Name())
+               }
+               c.Check(names, check.DeepEquals, []string{"foo"})
+       }
+
+       f, err = s.fs.Open("/by_id/" + arvadostest.AProjectUUID + "/A Subproject/baz_file")
+       c.Assert(err, check.IsNil)
+       fis, err = f.Readdir(-1)
+       var names []string
+       for _, fi := range fis {
+               names = append(names, fi.Name())
+       }
+       c.Check(names, check.DeepEquals, []string{"baz"})
+
+       _, err = s.fs.OpenFile("/by_id/"+arvadostest.NonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
+       c.Check(err, check.Equals, ErrInvalidOperation)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection, "/by_id/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       _, err = s.fs.Stat("/by_id/beep")
+       c.Check(err, check.Equals, os.ErrNotExist)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/"+arvadostest.FooCollection+"/bar")
+       c.Check(err, check.IsNil)
+
+       err = s.fs.Rename("/by_id", "/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+}
diff --git a/sdk/go/arvados/fs_users.go b/sdk/go/arvados/fs_users.go
new file mode 100644 (file)
index 0000000..00f7036
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+)
+
+func (fs *customFileSystem) usersLoadOne(parent inode, name string) (inode, error) {
+       var resp UserList
+       err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users", nil, ResourceListParams{
+               Count:   "none",
+               Filters: []Filter{{"username", "=", name}},
+       })
+       if err != nil {
+               return nil, err
+       } else if len(resp.Items) == 0 {
+               return nil, os.ErrNotExist
+       }
+       user := resp.Items[0]
+       return fs.newProjectNode(parent, user.Username, user.UUID), nil
+}
+
+func (fs *customFileSystem) usersLoadAll(parent inode) ([]inode, error) {
+       params := ResourceListParams{
+               Count: "none",
+               Order: "uuid",
+       }
+       var inodes []inode
+       for {
+               var resp UserList
+               err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users", nil, params)
+               if err != nil {
+                       return nil, err
+               } else if len(resp.Items) == 0 {
+                       return inodes, nil
+               }
+               for _, user := range resp.Items {
+                       if user.Username == "" {
+                               continue
+                       }
+                       inodes = append(inodes, fs.newProjectNode(parent, user.Username, user.UUID))
+               }
+               params.Filters = []Filter{{"uuid", ">", resp.Items[len(resp.Items)-1].UUID}}
+       }
+}
index b00809f9193a89ee8c5e91db3eeabd1a5a54ee28..6b5718a6c740e69b0fd5c4fc8f19106c7dddef11 100644 (file)
@@ -6,9 +6,10 @@ package arvados
 
 // Group is an arvados#group record
 type Group struct {
-       UUID      string `json:"uuid,omitempty"`
-       Name      string `json:"name,omitempty"`
-       OwnerUUID string `json:"owner_uuid,omitempty"`
+       UUID       string `json:"uuid,omitempty"`
+       Name       string `json:"name,omitempty"`
+       OwnerUUID  string `json:"owner_uuid,omitempty"`
+       GroupClass string `json:"group_class"`
 }
 
 // GroupList is an arvados#groupList resource.
@@ -18,3 +19,7 @@ type GroupList struct {
        Offset         int     `json:"offset"`
        Limit          int     `json:"limit"`
 }
+
+func (g Group) resourceName() string {
+       return "group"
+}
index 9797440205cf3d8396d14ec389e380b2260486b2..0c866354aa9b1e3a34833f15018b66613d40bdb4 100644 (file)
@@ -127,6 +127,13 @@ func (s *KeepService) index(c *Client, url string) ([]KeepServiceIndexEntry, err
        scanner := bufio.NewScanner(resp.Body)
        sawEOF := false
        for scanner.Scan() {
+               if scanner.Err() != nil {
+                       // If we encounter a read error (timeout,
+                       // connection failure), stop now and return it
+                       // below, so it doesn't get masked by the
+                       // ensuing "badly formatted response" error.
+                       break
+               }
                if sawEOF {
                        return nil, fmt.Errorf("Index response contained non-terminal blank line")
                }
diff --git a/sdk/go/arvados/keep_service_test.go b/sdk/go/arvados/keep_service_test.go
new file mode 100644 (file)
index 0000000..8715f74
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "net/http"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&KeepServiceSuite{})
+
+type KeepServiceSuite struct{}
+
+func (*KeepServiceSuite) TestIndexTimeout(c *check.C) {
+       client := &Client{
+               Client: &http.Client{
+                       Transport: &timeoutTransport{response: []byte("\n")},
+               },
+               APIHost:   "zzzzz.arvadosapi.com",
+               AuthToken: "xyzzy",
+       }
+       _, err := (&KeepService{}).IndexMount(client, "fake", "")
+       c.Check(err, check.ErrorMatches, `.*timeout.*`)
+}
index 9247bc4a33fd38ca4406119bb44981edd574ab36..91da5a3fd62ce6eb099e4ce0c0e206a1220268ae 100644 (file)
@@ -122,6 +122,9 @@ type ArvadosClient struct {
 
        // Number of retries
        Retries int
+
+       // X-Request-Id for outgoing requests
+       RequestID string
 }
 
 var CertFiles = []string{
@@ -266,6 +269,9 @@ func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string,
 
                // Add api token header
                req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", c.ApiToken))
+               if c.RequestID != "" {
+                       req.Header.Add("X-Request-Id", c.RequestID)
+               }
                if c.External {
                        req.Header.Add("X-External-Client", "1")
                }
index d057c09b227e9f375d2b3d04e95d9327044c4f33..a434690775089c38a092499ae79f7fa0fcdec0e0 100644 (file)
@@ -16,6 +16,7 @@ const (
        FederatedActiveUserUUID = "zbbbb-tpzed-xurymjxw79nv3jz"
        SpectatorUserUUID       = "zzzzz-tpzed-l1s2piq4t4mps8r"
        UserAgreementCollection = "zzzzz-4zz18-uukreo9rbgwsujr" // user_agreement_in_anonymously_accessible_project
+       FooCollectionName       = "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
        FooCollection           = "zzzzz-4zz18-fy296fx3hot09f7"
        FooCollectionPDH        = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
        NonexistentCollection   = "zzzzz-4zz18-totallynotexist"
@@ -25,6 +26,9 @@ const (
        FooPdh                  = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
        HelloWorldPdh           = "55713e6a34081eb03609e7ad5fcad129+62"
 
+       AProjectUUID    = "zzzzz-j7d0g-v955i6s2oi1cbso"
+       ASubprojectUUID = "zzzzz-j7d0g-axqo7eu9pwvna1x"
+
        FooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
        FooAndBarFilesInDirPDH  = "6bbac24198d09a93975f60098caf0bdf+62"
 
index dcc2fb084ee9645aca498b820f6a2df6bf20204e..490a7f3e03b470296edfb671e82c864cb23076b6 100644 (file)
@@ -104,7 +104,10 @@ func StopAPI() {
        defer os.Chdir(cwd)
        chdirToPythonTests()
 
-       bgRun(exec.Command("python", "run_test_server.py", "stop"))
+       cmd := exec.Command("python", "run_test_server.py", "stop")
+       bgRun(cmd)
+       // Without Wait, "go test" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050
+       cmd.Wait()
 }
 
 // StartKeep starts the given number of keep servers,
@@ -132,12 +135,9 @@ func StopKeep(numKeepServers int) {
        chdirToPythonTests()
 
        cmd := exec.Command("python", "run_test_server.py", "stop_keep", "--num-keep-servers", strconv.Itoa(numKeepServers))
-       cmd.Stdin = nil
-       cmd.Stderr = os.Stderr
-       cmd.Stdout = os.Stderr
-       if err := cmd.Run(); err != nil {
-               log.Fatalf("%+v: %s", cmd.Args, err)
-       }
+       bgRun(cmd)
+       // Without Wait, "go test" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050
+       cmd.Wait()
 }
 
 // Start cmd, with stderr and stdout redirected to our own
index 297a8617084b3247c7b15c18a8ae9c2176224518..a6cb8798aa328a468c1db98c3c3e5bf38773f15c 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
 package health
 
 import (
@@ -83,7 +87,7 @@ type ClusterHealthResponse struct {
        // exposes problems that can't be expressed in Checks, like
        // "service S is needed, but isn't configured to run
        // anywhere."
-       Services map[string]ServiceHealth `json:"services"`
+       Services map[arvados.ServiceName]ServiceHealth `json:"services"`
 }
 
 type CheckResult struct {
@@ -104,13 +108,13 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
        resp := ClusterHealthResponse{
                Health:   "OK",
                Checks:   make(map[string]CheckResult),
-               Services: make(map[string]ServiceHealth),
+               Services: make(map[arvados.ServiceName]ServiceHealth),
        }
 
        mtx := sync.Mutex{}
        wg := sync.WaitGroup{}
-       for node, nodeConfig := range cluster.SystemNodes {
-               for svc, addr := range nodeConfig.ServicePorts() {
+       for profileName, profile := range cluster.NodeProfiles {
+               for svc, addr := range profile.ServicePorts() {
                        // Ensure svc is listed in resp.Services.
                        mtx.Lock()
                        if _, ok := resp.Services[svc]; !ok {
@@ -124,10 +128,10 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
                        }
 
                        wg.Add(1)
-                       go func(node, svc, addr string) {
+                       go func(profileName string, svc arvados.ServiceName, addr string) {
                                defer wg.Done()
                                var result CheckResult
-                               url, err := agg.pingURL(node, addr)
+                               url, err := agg.pingURL(profileName, addr)
                                if err != nil {
                                        result = CheckResult{
                                                Health: "ERROR",
@@ -139,7 +143,7 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
 
                                mtx.Lock()
                                defer mtx.Unlock()
-                               resp.Checks[svc+"+"+url] = result
+                               resp.Checks[fmt.Sprintf("%s+%s", svc, url)] = result
                                if result.Health == "OK" {
                                        h := resp.Services[svc]
                                        h.N++
@@ -148,7 +152,7 @@ func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResp
                                } else {
                                        resp.Health = "ERROR"
                                }
-                       }(node, svc, addr)
+                       }(profileName, svc, addr)
                }
        }
        wg.Wait()
index 7e601f2e70211e30b1786edae098380ba89556e6..a96ed136cbd1539d986a1332a4914c61af335d6a 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
 package health
 
 import (
@@ -30,7 +34,7 @@ func (s *AggregatorSuite) SetUpTest(c *check.C) {
                Clusters: map[string]arvados.Cluster{
                        "zzzzz": {
                                ManagementToken: arvadostest.ManagementToken,
-                               SystemNodes:     map[string]arvados.SystemNode{},
+                               NodeProfiles:    map[string]arvados.NodeProfile{},
                        },
                },
        }}
@@ -82,7 +86,7 @@ func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request)
 func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
        srv, listen := s.stubServer(&unhealthyHandler{})
        defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
                Keepstore: arvados.SystemServiceInstance{Listen: listen},
        }
        s.handler.ServeHTTP(s.resp, s.req)
@@ -102,7 +106,8 @@ func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
 func (s *AggregatorSuite) TestHealthy(c *check.C) {
        srv, listen := s.stubServer(&healthyHandler{})
        defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Controller:  arvados.SystemServiceInstance{Listen: listen},
                Keepproxy:   arvados.SystemServiceInstance{Listen: listen},
                Keepstore:   arvados.SystemServiceInstance{Listen: listen},
                Keepweb:     arvados.SystemServiceInstance{Listen: listen},
@@ -125,7 +130,8 @@ func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
        defer srvH.Close()
        srvU, listenU := s.stubServer(&unhealthyHandler{})
        defer srvU.Close()
-       s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Controller:  arvados.SystemServiceInstance{Listen: listenH},
                Keepproxy:   arvados.SystemServiceInstance{Listen: listenH},
                Keepstore:   arvados.SystemServiceInstance{Listen: listenH},
                Keepweb:     arvados.SystemServiceInstance{Listen: listenH},
@@ -134,7 +140,7 @@ func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
                Websocket:   arvados.SystemServiceInstance{Listen: listenH},
                Workbench:   arvados.SystemServiceInstance{Listen: listenH},
        }
-       s.handler.Config.Clusters["zzzzz"].SystemNodes["127.0.0.1"] = arvados.SystemNode{
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["127.0.0.1"] = arvados.NodeProfile{
                Keepstore: arvados.SystemServiceInstance{Listen: listenU},
        }
        s.handler.ServeHTTP(s.resp, s.req)
@@ -188,7 +194,7 @@ func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
        s.handler.timeout = arvados.Duration(100 * time.Millisecond)
        srv, listen := s.stubServer(&slowHandler{})
        defer srv.Close()
-       s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
                Keepstore: arvados.SystemServiceInstance{Listen: listen},
        }
        s.handler.ServeHTTP(s.resp, s.req)
diff --git a/sdk/go/httpserver/error.go b/sdk/go/httpserver/error.go
new file mode 100644 (file)
index 0000000..398e61f
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "encoding/json"
+       "net/http"
+)
+
+type ErrorResponse struct {
+       Errors []string `json:"errors"`
+}
+
+func Error(w http.ResponseWriter, error string, code int) {
+       w.Header().Set("Content-Type", "application/json")
+       w.Header().Set("X-Content-Type-Options", "nosniff")
+       w.WriteHeader(code)
+       json.NewEncoder(w).Encode(ErrorResponse{Errors: []string{error}})
+}
index d2c3a41f2108e2bc852f56119b747a7ec9423e7a..6452136d85eede6896f1dca1648e00b4ba6ae8e7 100644 (file)
@@ -45,6 +45,9 @@ func AddRequestIDs(h http.Handler) http.Handler {
        gen := &IDGenerator{Prefix: "req-"}
        return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
                if req.Header.Get("X-Request-Id") == "" {
+                       if req.Header == nil {
+                               req.Header = http.Header{}
+                       }
                        req.Header.Set("X-Request-Id", gen.Next())
                }
                h.ServeHTTP(w, req)
index 569931a3edd732b4fb3d48a09db318622bd08075..9577718c76e45c1757297d5272c6174f5a454571 100644 (file)
@@ -17,30 +17,48 @@ type contextKey struct {
        name string
 }
 
-var requestTimeContextKey = contextKey{"requestTime"}
-
-var Logger logrus.FieldLogger = logrus.StandardLogger()
+var (
+       requestTimeContextKey = contextKey{"requestTime"}
+       loggerContextKey      = contextKey{"logger"}
+)
 
 // LogRequests wraps an http.Handler, logging each request and
-// response via logrus.
-func LogRequests(h http.Handler) http.Handler {
+// response via logger.
+func LogRequests(logger logrus.FieldLogger, h http.Handler) http.Handler {
+       if logger == nil {
+               logger = logrus.StandardLogger()
+       }
        return http.HandlerFunc(func(wrapped http.ResponseWriter, req *http.Request) {
                w := &responseTimer{ResponseWriter: WrapResponseWriter(wrapped)}
-               req = req.WithContext(context.WithValue(req.Context(), &requestTimeContextKey, time.Now()))
-               lgr := Logger.WithFields(logrus.Fields{
+               lgr := logger.WithFields(logrus.Fields{
                        "RequestID":       req.Header.Get("X-Request-Id"),
                        "remoteAddr":      req.RemoteAddr,
                        "reqForwardedFor": req.Header.Get("X-Forwarded-For"),
                        "reqMethod":       req.Method,
+                       "reqHost":         req.Host,
                        "reqPath":         req.URL.Path[1:],
+                       "reqQuery":        req.URL.RawQuery,
                        "reqBytes":        req.ContentLength,
                })
+               ctx := req.Context()
+               ctx = context.WithValue(ctx, &requestTimeContextKey, time.Now())
+               ctx = context.WithValue(ctx, &loggerContextKey, lgr)
+               req = req.WithContext(ctx)
+
                logRequest(w, req, lgr)
                defer logResponse(w, req, lgr)
                h.ServeHTTP(w, req)
        })
 }
 
+func Logger(req *http.Request) logrus.FieldLogger {
+       if lgr, ok := req.Context().Value(&loggerContextKey).(logrus.FieldLogger); ok {
+               return lgr
+       } else {
+               return logrus.StandardLogger()
+       }
+}
+
 func logRequest(w *responseTimer, req *http.Request, lgr *logrus.Entry) {
        lgr.Info("request")
 }
index bbcafa143957ae0a165840e58336f52336b8d919..bdde3303e2f97c35b45e73c1dd207f30e521e13d 100644 (file)
@@ -9,11 +9,10 @@ import (
        "encoding/json"
        "net/http"
        "net/http/httptest"
-       "os"
        "testing"
        "time"
 
-       log "github.com/Sirupsen/logrus"
+       "github.com/Sirupsen/logrus"
        check "gopkg.in/check.v1"
 )
 
@@ -26,12 +25,13 @@ var _ = check.Suite(&Suite{})
 type Suite struct{}
 
 func (s *Suite) TestLogRequests(c *check.C) {
-       defer log.SetOutput(os.Stdout)
        captured := &bytes.Buffer{}
-       log.SetOutput(captured)
-       log.SetFormatter(&log.JSONFormatter{
+       log := logrus.New()
+       log.Out = captured
+       log.Formatter = &logrus.JSONFormatter{
                TimestampFormat: time.RFC3339Nano,
-       })
+       }
+
        h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
                w.Write([]byte("hello world"))
        })
@@ -39,7 +39,7 @@ func (s *Suite) TestLogRequests(c *check.C) {
        req.Header.Set("X-Forwarded-For", "1.2.3.4:12345")
        c.Assert(err, check.IsNil)
        resp := httptest.NewRecorder()
-       AddRequestIDs(LogRequests(h)).ServeHTTP(resp, req)
+       AddRequestIDs(LogRequests(log, h)).ServeHTTP(resp, req)
 
        dec := json.NewDecoder(captured)
 
index d37822ffe3e5cd0f582a59a3ee45b1d322fed4ac..8dea759ccb9b1772b816ad565a279975ab751c8a 100644 (file)
@@ -41,6 +41,9 @@ func (w *responseWriter) WriteHeader(s int) {
 }
 
 func (w *responseWriter) Write(data []byte) (n int, err error) {
+       if w.wroteStatus == 0 {
+               w.WriteHeader(http.StatusOK)
+       }
        n, err = w.ResponseWriter.Write(data)
        w.wroteBodyBytes += n
        w.err = err
index dbdda604bb98fbff53bfdb14490e66cbbe95ab4d..95a84c063ba852812e5c7408080ff702a8bf6104 100644 (file)
@@ -19,13 +19,14 @@ import (
 func (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {
        defer os.Setenv("ARVADOS_KEEP_SERVICES", "")
 
-       hash := fmt.Sprintf("%x+3", md5.Sum([]byte("TestOverrideDiscovery")))
+       data := []byte("TestOverrideDiscovery")
+       hash := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
        st := StubGetHandler{
                c,
                hash,
                arvadostest.ActiveToken,
                http.StatusOK,
-               []byte("TestOverrideDiscovery")}
+               data}
        ks := RunSomeFakeKeepServers(st, 2)
 
        os.Setenv("ARVADOS_KEEP_SERVICES", "")
index 54a4a374b991b44c5a5e51878be980a1b78f9609..169f1457e2e06e6e3424856809c92fc5dc74d4f9 100644 (file)
@@ -22,6 +22,7 @@ import (
 
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/asyncbuf"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
 )
 
 // A Keep "block" is 64MB.
@@ -99,6 +100,8 @@ type KeepClient struct {
        HTTPClient         HTTPClient
        Retries            int
        BlockCache         *BlockCache
+       RequestID          string
+       StorageClasses     []string
 
        // set to 1 if all writable services are of disk type, otherwise 0
        replicasPerService int
@@ -200,6 +203,17 @@ func (kc *KeepClient) getOrHead(method string, locator string) (io.ReadCloser, i
                return ioutil.NopCloser(bytes.NewReader(nil)), 0, "", nil
        }
 
+       reqid := kc.getRequestID()
+
+       var expectLength int64
+       if parts := strings.SplitN(locator, "+", 3); len(parts) < 2 {
+               expectLength = -1
+       } else if n, err := strconv.ParseInt(parts[1], 10, 64); err != nil {
+               expectLength = -1
+       } else {
+               expectLength = n
+       }
+
        var errs []string
 
        tries_remaining := 1 + kc.Retries
@@ -223,14 +237,17 @@ func (kc *KeepClient) getOrHead(method string, locator string) (io.ReadCloser, i
                                errs = append(errs, fmt.Sprintf("%s: %v", url, err))
                                continue
                        }
-                       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", kc.Arvados.ApiToken))
+                       req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+                       req.Header.Add("X-Request-Id", reqid)
                        resp, err := kc.httpClient().Do(req)
                        if err != nil {
                                // Probably a network error, may be transient,
                                // can try again.
                                errs = append(errs, fmt.Sprintf("%s: %v", url, err))
                                retryList = append(retryList, host)
-                       } else if resp.StatusCode != http.StatusOK {
+                               continue
+                       }
+                       if resp.StatusCode != http.StatusOK {
                                var respbody []byte
                                respbody, _ = ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
                                resp.Body.Close()
@@ -247,24 +264,29 @@ func (kc *KeepClient) getOrHead(method string, locator string) (io.ReadCloser, i
                                } else if resp.StatusCode == 404 {
                                        count404++
                                }
-                       } else if resp.ContentLength < 0 {
-                               // Missing Content-Length
-                               resp.Body.Close()
-                               return nil, 0, "", fmt.Errorf("Missing Content-Length of block")
-                       } else {
-                               // Success.
-                               if method == "GET" {
-                                       return HashCheckingReader{
-                                               Reader: resp.Body,
-                                               Hash:   md5.New(),
-                                               Check:  locator[0:32],
-                                       }, resp.ContentLength, url, nil
-                               } else {
+                               continue
+                       }
+                       if expectLength < 0 {
+                               if resp.ContentLength < 0 {
                                        resp.Body.Close()
-                                       return nil, resp.ContentLength, url, nil
+                                       return nil, 0, "", fmt.Errorf("error reading %q: no size hint, no Content-Length header in response", locator)
                                }
+                               expectLength = resp.ContentLength
+                       } else if resp.ContentLength >= 0 && expectLength != resp.ContentLength {
+                               resp.Body.Close()
+                               return nil, 0, "", fmt.Errorf("error reading %q: size hint %d != Content-Length %d", locator, expectLength, resp.ContentLength)
+                       }
+                       // Success
+                       if method == "GET" {
+                               return HashCheckingReader{
+                                       Reader: resp.Body,
+                                       Hash:   md5.New(),
+                                       Check:  locator[0:32],
+                               }, expectLength, url, nil
+                       } else {
+                               resp.Body.Close()
+                               return nil, expectLength, url, nil
                        }
-
                }
                serversToTry = retryList
        }
@@ -334,7 +356,8 @@ func (kc *KeepClient) GetIndex(keepServiceUUID, prefix string) (io.Reader, error
                return nil, err
        }
 
-       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", kc.Arvados.ApiToken))
+       req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+       req.Header.Set("X-Request-Id", kc.getRequestID())
        resp, err := kc.httpClient().Do(req)
        if err != nil {
                return nil, err
@@ -523,6 +546,16 @@ func (kc *KeepClient) httpClient() HTTPClient {
        return c
 }
 
+var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
+
+func (kc *KeepClient) getRequestID() string {
+       if kc.RequestID != "" {
+               return kc.RequestID
+       } else {
+               return reqIDGen.Next()
+       }
+}
+
 type Locator struct {
        Hash  string
        Size  int      // -1 if data size is not known
index 392270909f344ef5ee47f5a4b8ff225394d1d295..dc80ad7e1d6378ad09da968db62cf038002d0b9c 100644 (file)
@@ -93,16 +93,18 @@ func (s *ServerRequiredSuite) TestDefaultReplications(c *C) {
 }
 
 type StubPutHandler struct {
-       c              *C
-       expectPath     string
-       expectApiToken string
-       expectBody     string
-       handled        chan string
+       c                  *C
+       expectPath         string
+       expectApiToken     string
+       expectBody         string
+       expectStorageClass string
+       handled            chan string
 }
 
 func (sph StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        sph.c.Check(req.URL.Path, Equals, "/"+sph.expectPath)
        sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sph.expectApiToken))
+       sph.c.Check(req.Header.Get("X-Keep-Storage-Classes"), Equals, sph.expectStorageClass)
        body, err := ioutil.ReadAll(req.Body)
        sph.c.Check(err, Equals, nil)
        sph.c.Check(body, DeepEquals, []byte(sph.expectBody))
@@ -148,12 +150,13 @@ func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
                "acbd18db4cc2f85cedef654fccc4a4d8",
                "abc123",
                "foo",
+               "hot",
                make(chan string)}
 
        UploadToStubHelper(c, st,
                func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, upload_status chan uploadStatus) {
-
-                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), 0)
+                       kc.StorageClasses = []string{"hot"}
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), kc.getRequestID())
 
                        writer.Write([]byte("foo"))
                        writer.Close()
@@ -170,11 +173,12 @@ func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
                "acbd18db4cc2f85cedef654fccc4a4d8",
                "abc123",
                "foo",
+               "",
                make(chan string)}
 
        UploadToStubHelper(c, st,
                func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, upload_status chan uploadStatus) {
-                       go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), upload_status, 3, 0)
+                       go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), upload_status, 3, kc.getRequestID())
 
                        <-st.handled
 
@@ -195,10 +199,12 @@ func (fh FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
 type FailThenSucceedHandler struct {
        handled        chan string
        count          int
-       successhandler StubGetHandler
+       successhandler http.Handler
+       reqIDs         []string
 }
 
 func (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
        if fh.count == 0 {
                resp.WriteHeader(500)
                fh.count += 1
@@ -227,7 +233,7 @@ func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
                func(kc *KeepClient, url string, reader io.ReadCloser,
                        writer io.WriteCloser, upload_status chan uploadStatus) {
 
-                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, 0)
+                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, kc.getRequestID())
 
                        writer.Write([]byte("foo"))
                        writer.Close()
@@ -263,6 +269,7 @@ func (s *StandaloneSuite) TestPutB(c *C) {
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
@@ -304,6 +311,7 @@ func (s *StandaloneSuite) TestPutHR(c *C) {
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
@@ -352,6 +360,7 @@ func (s *StandaloneSuite) TestPutWithFail(c *C) {
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 4)}
 
        fh := FailHandler{
@@ -410,6 +419,7 @@ func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 1)}
 
        fh := FailHandler{
@@ -560,8 +570,9 @@ func (s *StandaloneSuite) TestGetFail(c *C) {
 func (s *StandaloneSuite) TestGetFailRetry(c *C) {
        hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
 
-       st := &FailThenSucceedHandler{make(chan string, 1), 0,
-               StubGetHandler{
+       st := &FailThenSucceedHandler{
+               handled: make(chan string, 1),
+               successhandler: StubGetHandler{
                        c,
                        hash,
                        "abc123",
@@ -585,6 +596,13 @@ func (s *StandaloneSuite) TestGetFailRetry(c *C) {
        content, err2 := ioutil.ReadAll(r)
        c.Check(err2, Equals, nil)
        c.Check(content, DeepEquals, []byte("foo"))
+
+       c.Logf("%q", st.reqIDs)
+       c.Assert(len(st.reqIDs) > 1, Equals, true)
+       for _, reqid := range st.reqIDs {
+               c.Check(reqid, Not(Equals), "")
+               c.Check(reqid, Equals, st.reqIDs[0])
+       }
 }
 
 func (s *StandaloneSuite) TestGetNetError(c *C) {
@@ -979,6 +997,7 @@ func (s *StandaloneSuite) TestPutBWant2ReplicasWithOnlyOneWritableLocalRoot(c *C
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
@@ -1017,6 +1036,7 @@ func (s *StandaloneSuite) TestPutBWithNoWritableLocalRoots(c *C) {
                hash,
                "abc123",
                "foo",
+               "",
                make(chan string, 5)}
 
        arv, _ := arvadosclient.MakeArvadosClient()
@@ -1180,29 +1200,15 @@ func (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {
        c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
 }
 
-type FailThenSucceedPutHandler struct {
-       handled        chan string
-       count          int
-       successhandler StubPutHandler
-}
-
-func (h *FailThenSucceedPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
-       if h.count == 0 {
-               resp.WriteHeader(500)
-               h.count += 1
-               h.handled <- fmt.Sprintf("http://%s", req.Host)
-       } else {
-               h.successhandler.ServeHTTP(resp, req)
-       }
-}
-
 func (s *StandaloneSuite) TestPutBRetry(c *C) {
-       st := &FailThenSucceedPutHandler{make(chan string, 1), 0,
-               StubPutHandler{
+       st := &FailThenSucceedHandler{
+               handled: make(chan string, 1),
+               successhandler: StubPutHandler{
                        c,
                        Md5String("foo"),
                        "abc123",
                        "foo",
+                       "",
                        make(chan string, 5)}}
 
        arv, _ := arvadosclient.MakeArvadosClient()
index 37912506a2cb6ab7c014a0edac13e922c20526d6..542827f5e0d83c5d074942ef4546955e59b46ba5 100644 (file)
@@ -11,7 +11,6 @@ import (
        "io"
        "io/ioutil"
        "log"
-       "math/rand"
        "net/http"
        "os"
        "strings"
@@ -57,13 +56,13 @@ type uploadStatus struct {
 }
 
 func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
-       upload_status chan<- uploadStatus, expectedLength int64, requestID int32) {
+       upload_status chan<- uploadStatus, expectedLength int64, reqid string) {
 
        var req *http.Request
        var err error
        var url = fmt.Sprintf("%s/%s", host, hash)
        if req, err = http.NewRequest("PUT", url, nil); err != nil {
-               DebugPrintf("DEBUG: [%08x] Error creating request PUT %v error: %v", requestID, url, err.Error())
+               DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
                upload_status <- uploadStatus{err, url, 0, 0, ""}
                return
        }
@@ -77,13 +76,17 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
                // to be empty, so don't set req.Body.
        }
 
-       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
+       req.Header.Add("X-Request-Id", reqid)
+       req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
        req.Header.Add("Content-Type", "application/octet-stream")
        req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+       if len(this.StorageClasses) > 0 {
+               req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
+       }
 
        var resp *http.Response
        if resp, err = this.httpClient().Do(req); err != nil {
-               DebugPrintf("DEBUG: [%08x] Upload failed %v error: %v", requestID, url, err.Error())
+               DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
                upload_status <- uploadStatus{err, url, 0, 0, ""}
                return
        }
@@ -99,16 +102,16 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
        response := strings.TrimSpace(string(respbody))
        if err2 != nil && err2 != io.EOF {
-               DebugPrintf("DEBUG: [%08x] Upload %v error: %v response: %v", requestID, url, err2.Error(), response)
+               DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
                upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
        } else if resp.StatusCode == http.StatusOK {
-               DebugPrintf("DEBUG: [%08x] Upload %v success", requestID, url)
+               DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
                upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
        } else {
                if resp.StatusCode >= 300 && response == "" {
                        response = resp.Status
                }
-               DebugPrintf("DEBUG: [%08x] Upload %v error: %v response: %v", requestID, url, resp.StatusCode, response)
+               DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
                upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
        }
 }
@@ -118,9 +121,7 @@ func (this *KeepClient) putReplicas(
        getReader func() io.Reader,
        expectedLength int64) (locator string, replicas int, err error) {
 
-       // Generate an arbitrary ID to identify this specific
-       // transaction in debug logs.
-       requestID := rand.Int31()
+       reqid := this.getRequestID()
 
        // Calculate the ordering for uploading to servers
        sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
@@ -167,8 +168,8 @@ func (this *KeepClient) putReplicas(
                        for active*replicasPerThread < replicasTodo {
                                // Start some upload requests
                                if next_server < len(sv) {
-                                       DebugPrintf("DEBUG: [%08x] Begin upload %s to %s", requestID, hash, sv[next_server])
-                                       go this.uploadToKeepServer(sv[next_server], hash, getReader(), upload_status, expectedLength, requestID)
+                                       DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[next_server])
+                                       go this.uploadToKeepServer(sv[next_server], hash, getReader(), upload_status, expectedLength, reqid)
                                        next_server += 1
                                        active += 1
                                } else {
@@ -184,8 +185,8 @@ func (this *KeepClient) putReplicas(
                                        }
                                }
                        }
-                       DebugPrintf("DEBUG: [%08x] Replicas remaining to write: %v active uploads: %v",
-                               requestID, replicasTodo, active)
+                       DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v",
+                               reqid, replicasTodo, active)
 
                        // Now wait for something to happen.
                        if active > 0 {
index bb97f3c1d8186adb0da84f541997157f149c0c1a..c8c70298077092ea8c0b14707e6e6f8563ab2411 100644 (file)
@@ -18,9 +18,7 @@ import os
 import pprint
 import re
 import string
-import subprocess
 import sys
-import threading
 import time
 import types
 import zlib
index 4611a1aadf80043eb9afdeeaff727b27a09eecbc..b652db77d18a73214740672da6588f0fbaab3de3 100644 (file)
@@ -96,6 +96,10 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
                           delay, exc_info=True)
             for conn in self.connections.values():
                 conn.close()
+        except httplib2.SSLHandshakeError as e:
+            # Intercept and re-raise with a better error message.
+            raise httplib2.SSLHandshakeError("Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e))
+
         time.sleep(delay)
         delay = delay * self._retry_delay_backoff
 
@@ -254,9 +258,12 @@ def api_from_config(version=None, apiconfig=None, **kwargs):
     if apiconfig is None:
         apiconfig = config.settings()
 
+    errors = []
     for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
         if x not in apiconfig:
-            raise ValueError("%s is not set. Aborting." % x)
+            errors.append(x)
+    if errors:
+        raise ValueError(" and ".join(errors)+" not set.\nPlease set in %s or export environment variable." % config.default_config_file)
     host = apiconfig.get('ARVADOS_API_HOST')
     token = apiconfig.get('ARVADOS_API_TOKEN')
     insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
index 8fb90c944396967e6863a38daee27ffe3cb8b9ec..e38a6bd475c7b8a4aee7787a537a40656fd93b36 100644 (file)
@@ -13,6 +13,8 @@ import os
 import re
 import errno
 import hashlib
+import datetime
+import ciso8601
 import time
 import threading
 
@@ -1269,6 +1271,18 @@ class Collection(RichCollectionBase):
     def root_collection(self):
         return self
 
+    def get_properties(self):
+        if self._api_response and self._api_response["properties"]:
+            return self._api_response["properties"]
+        else:
+            return {}
+
+    def get_trash_at(self):
+        if self._api_response and self._api_response["trash_at"]:
+            return ciso8601.parse_datetime(self._api_response["trash_at"])
+        else:
+            return None
+
     def stream_name(self):
         return "."
 
@@ -1436,17 +1450,34 @@ class Collection(RichCollectionBase):
     @must_be_writable
     @synchronized
     @retry_method
-    def save(self, merge=True, num_retries=None):
+    def save(self,
+             properties=None,
+             storage_classes=None,
+             trash_at=None,
+             merge=True,
+             num_retries=None):
         """Save collection to an existing collection record.
 
         Commit pending buffer blocks to Keep, merge with remote record (if
-        merge=True, the default), and update the collection record.  Returns
+        merge=True, the default), and update the collection record. Returns
         the current manifest text.
 
         Will raise AssertionError if not associated with a collection record on
         the API server.  If you want to save a manifest to Keep only, see
         `save_new()`.
 
+        :properties:
+          Additional properties of collection. This value will replace any existing
+          properties of collection.
+
+        :storage_classes:
+          Specify desirable storage classes to be used when writing data to Keep.
+
+        :trash_at:
+          A collection is *expiring* when it has a *trash_at* time in the future.
+          An expiring collection can be accessed as normal,
+          but is scheduled to be trashed automatically at the *trash_at* time.
+
         :merge:
           Update and merge remote changes before saving.  Otherwise, any
           remote changes will be ignored and overwritten.
@@ -1455,6 +1486,24 @@ class Collection(RichCollectionBase):
           Retry count on API calls (if None,  use the collection default)
 
         """
+        if properties and type(properties) is not dict:
+            raise errors.ArgumentError("properties must be dictionary type.")
+
+        if storage_classes and type(storage_classes) is not list:
+            raise errors.ArgumentError("storage_classes must be list type.")
+
+        if trash_at and type(trash_at) is not datetime.datetime:
+            raise errors.ArgumentError("trash_at must be datetime type.")
+
+        body={}
+        if properties:
+            body["properties"] = properties
+        if storage_classes:
+            body["storage_classes_desired"] = storage_classes
+        if trash_at:
+            t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+            body["trash_at"] = t
+
         if not self.committed():
             if not self._has_collection_uuid():
                 raise AssertionError("Collection manifest_locator is not a collection uuid.  Use save_new() for new collections.")
@@ -1465,14 +1514,20 @@ class Collection(RichCollectionBase):
                 self.update()
 
             text = self.manifest_text(strip=False)
+            body['manifest_text'] = text
+
             self._remember_api_response(self._my_api().collections().update(
                 uuid=self._manifest_locator,
-                body={'manifest_text': text}
-                ).execute(
-                    num_retries=num_retries))
+                body=body
+                ).execute(num_retries=num_retries))
             self._manifest_text = self._api_response["manifest_text"]
             self._portable_data_hash = self._api_response["portable_data_hash"]
             self.set_committed(True)
+        elif body:
+            self._remember_api_response(self._my_api().collections().update(
+                uuid=self._manifest_locator,
+                body=body
+                ).execute(num_retries=num_retries))
 
         return self._manifest_text
 
@@ -1483,6 +1538,9 @@ class Collection(RichCollectionBase):
     def save_new(self, name=None,
                  create_collection_record=True,
                  owner_uuid=None,
+                 properties=None,
+                 storage_classes=None,
+                 trash_at=None,
                  ensure_unique_name=False,
                  num_retries=None):
         """Save collection to a new collection record.
@@ -1490,7 +1548,7 @@ class Collection(RichCollectionBase):
         Commit pending buffer blocks to Keep and, when create_collection_record
         is True (default), create a new collection record.  After creating a
         new collection record, this Collection object will be associated with
-        the new record used by `save()`.  Returns the current manifest text.
+        the new record used by `save()`. Returns the current manifest text.
 
         :name:
           The collection name.
@@ -1503,6 +1561,18 @@ class Collection(RichCollectionBase):
           the user, or project uuid that will own this collection.
           If None, defaults to the current user.
 
+        :properties:
+          Additional properties of collection. This value will replace any existing
+          properties of collection.
+
+        :storage_classes:
+          Specify desirable storage classes to be used when writing data to Keep.
+
+        :trash_at:
+          A collection is *expiring* when it has a *trash_at* time in the future.
+          An expiring collection can be accessed as normal,
+          but is scheduled to be trashed automatically at the *trash_at* time.
+
         :ensure_unique_name:
           If True, ask the API server to rename the collection
           if it conflicts with a collection with the same name and owner.  If
@@ -1512,6 +1582,15 @@ class Collection(RichCollectionBase):
           Retry count on API calls (if None,  use the collection default)
 
         """
+        if properties and type(properties) is not dict:
+            raise errors.ArgumentError("properties must be dictionary type.")
+
+        if storage_classes and type(storage_classes) is not list:
+            raise errors.ArgumentError("storage_classes must be list type.")
+
+        if trash_at and type(trash_at) is not datetime.datetime:
+            raise errors.ArgumentError("trash_at must be datetime type.")
+
         self._my_block_manager().commit_all()
         text = self.manifest_text(strip=False)
 
@@ -1525,6 +1604,13 @@ class Collection(RichCollectionBase):
                     "replication_desired": self.replication_desired}
             if owner_uuid:
                 body["owner_uuid"] = owner_uuid
+            if properties:
+                body["properties"] = properties
+            if storage_classes:
+                body["storage_classes_desired"] = storage_classes
+            if trash_at:
+                t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+                body["trash_at"] = t
 
             self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
             text = self._api_response["manifest_text"]
index d4d9497d3e298b0c8312243d9684c3119d79a9c0..d10d38eb5bd1d4b08b3f2f2f33b00c234bc6b5eb 100644 (file)
@@ -5,6 +5,10 @@
 import argparse
 import errno
 import os
+import logging
+import signal
+from future.utils import listitems, listvalues
+import sys
 
 def _pos_int(s):
     num = int(s)
@@ -44,3 +48,18 @@ def make_home_conf_dir(path, mode=None, errors='ignore'):
         if mode is not None:
             os.chmod(abs_path, mode)
     return abs_path
+
+CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
+
+def exit_signal_handler(sigcode, frame):
+    logging.getLogger('arvados').error("Caught signal {}, exiting.".format(sigcode))
+    sys.exit(-sigcode)
+
+def install_signal_handlers():
+    global orig_signal_handlers
+    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
+                            for sigcode in CAUGHT_SIGNALS}
+
+def restore_signal_handlers():
+    for sigcode, orig_handler in listitems(orig_signal_handlers):
+        signal.signal(sigcode, orig_handler)
index ea85b35fc5cbd4d528f1c69507ffaae9fb2717b0..55fc6b626d5de9798b989d2b252d158ca89baab6 100644 (file)
@@ -10,12 +10,13 @@ import errno
 import json
 import os
 import re
-import subprocess
+import subprocess32 as subprocess
 import sys
 import tarfile
 import tempfile
 import shutil
 import _strptime
+import fcntl
 
 from operator import itemgetter
 from stat import *
@@ -185,12 +186,15 @@ def save_image(image_hash, image_file):
     except STAT_CACHE_ERRORS:
         pass  # We won't resume from this cache.  No big deal.
 
+def get_cache_dir():
+    return arv_cmd.make_home_conf_dir(
+        os.path.join('.cache', 'arvados', 'docker'), 0o700)
+
 def prep_image_file(filename):
     # Return a file object ready to save a Docker image,
     # and a boolean indicating whether or not we need to actually save the
     # image (False if a cached save is available).
-    cache_dir = arv_cmd.make_home_conf_dir(
-        os.path.join('.cache', 'arvados', 'docker'), 0o700)
+    cache_dir = get_cache_dir()
     if cache_dir is None:
         image_file = tempfile.NamedTemporaryFile(suffix='.tar')
         need_save = True
@@ -341,9 +345,10 @@ def _uuid2pdh(api, uuid):
         select=['portable_data_hash'],
     ).execute()['items'][0]['portable_data_hash']
 
-def main(arguments=None, stdout=sys.stdout):
+def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None):
     args = arg_parser.parse_args(arguments)
-    api = arvados.api('v1')
+    if api is None:
+        api = arvados.api('v1')
 
     if args.image is None or args.image == 'images':
         fmt = "{:30}  {:10}  {:12}  {:29}  {:20}\n"
@@ -399,115 +404,131 @@ def main(arguments=None, stdout=sys.stdout):
     else:
         collection_name = args.name
 
-    if not args.force:
-        # Check if this image is already in Arvados.
-
-        # Project where everything should be owned
-        if args.project_uuid:
-            parent_project_uuid = args.project_uuid
-        else:
-            parent_project_uuid = api.users().current().execute(
-                num_retries=args.retries)['uuid']
-
-        # Find image hash tags
-        existing_links = _get_docker_links(
-            api, args.retries,
-            filters=[['link_class', '=', 'docker_image_hash'],
-                     ['name', '=', image_hash]])
-        if existing_links:
-            # get readable collections
-            collections = api.collections().list(
-                filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
-                select=["uuid", "owner_uuid", "name", "manifest_text"]
-                ).execute(num_retries=args.retries)['items']
-
-            if collections:
-                # check for repo+tag links on these collections
-                if image_repo_tag:
-                    existing_repo_tag = _get_docker_links(
-                        api, args.retries,
-                        filters=[['link_class', '=', 'docker_image_repo+tag'],
-                                 ['name', '=', image_repo_tag],
-                                 ['head_uuid', 'in', [c["uuid"] for c in collections]]])
-                else:
-                    existing_repo_tag = []
-
-                try:
-                    coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
-                except StopIteration:
-                    # create new collection owned by the project
-                    coll_uuid = api.collections().create(
-                        body={"manifest_text": collections[0]['manifest_text'],
-                              "name": collection_name,
-                              "owner_uuid": parent_project_uuid},
-                        ensure_unique_name=True
-                        ).execute(num_retries=args.retries)['uuid']
-
-                link_base = {'owner_uuid': parent_project_uuid,
-                             'head_uuid':  coll_uuid,
-                             'properties': existing_links[0]['properties']}
-
-                if not any(items_owned_by(parent_project_uuid, existing_links)):
-                    # create image link owned by the project
-                    make_link(api, args.retries,
-                              'docker_image_hash', image_hash, **link_base)
-
-                if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
-                    # create repo+tag link owned by the project
-                    make_link(api, args.retries, 'docker_image_repo+tag',
-                              image_repo_tag, **link_base)
-
-                stdout.write(coll_uuid + "\n")
-
-                sys.exit(0)
-
-    # Open a file for the saved image, and write it if needed.
+    # Acquire a lock so that only one arv-keepdocker process will
+    # dump/upload a particular docker image at a time.  Do this before
+    # checking if the image already exists in Arvados so that if there
+    # is an upload already underway, when that upload completes and
+    # this process gets a turn, it will discover the Docker image is
+    # already available and exit quickly.
     outfile_name = '{}.tar'.format(image_hash)
-    image_file, need_save = prep_image_file(outfile_name)
-    if need_save:
-        save_image(image_hash, image_file)
+    lockfile_name = '{}.lock'.format(outfile_name)
+    lockfile = None
+    cache_dir = get_cache_dir()
+    if cache_dir:
+        lockfile = open(os.path.join(cache_dir, lockfile_name), 'w+')
+        fcntl.flock(lockfile, fcntl.LOCK_EX)
 
-    # Call arv-put with switches we inherited from it
-    # (a.k.a., switches that aren't our own).
-    put_args = keepdocker_parser.parse_known_args(arguments)[1]
-
-    if args.name is None:
-        put_args += ['--name', collection_name]
+    try:
+        if not args.force:
+            # Check if this image is already in Arvados.
 
-    coll_uuid = arv_put.main(
-        put_args + ['--filename', outfile_name, image_file.name], stdout=stdout).strip()
+            # Project where everything should be owned
+            parent_project_uuid = args.project_uuid or api.users().current().execute(
+                num_retries=args.retries)['uuid']
 
-    # Read the image metadata and make Arvados links from it.
-    image_file.seek(0)
-    image_tar = tarfile.open(fileobj=image_file)
-    image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
-    if image_hash_type:
-        json_filename = raw_image_hash + '.json'
-    else:
-        json_filename = raw_image_hash + '/json'
-    json_file = image_tar.extractfile(image_tar.getmember(json_filename))
-    image_metadata = json.load(json_file)
-    json_file.close()
-    image_tar.close()
-    link_base = {'head_uuid': coll_uuid, 'properties': {}}
-    if 'created' in image_metadata:
-        link_base['properties']['image_timestamp'] = image_metadata['created']
-    if args.project_uuid is not None:
-        link_base['owner_uuid'] = args.project_uuid
-
-    make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
-    if image_repo_tag:
-        make_link(api, args.retries,
-                  'docker_image_repo+tag', image_repo_tag, **link_base)
-
-    # Clean up.
-    image_file.close()
-    for filename in [stat_cache_name(image_file), image_file.name]:
-        try:
-            os.unlink(filename)
-        except OSError as error:
-            if error.errno != errno.ENOENT:
-                raise
+            # Find image hash tags
+            existing_links = _get_docker_links(
+                api, args.retries,
+                filters=[['link_class', '=', 'docker_image_hash'],
+                         ['name', '=', image_hash]])
+            if existing_links:
+                # get readable collections
+                collections = api.collections().list(
+                    filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
+                    select=["uuid", "owner_uuid", "name", "manifest_text"]
+                    ).execute(num_retries=args.retries)['items']
+
+                if collections:
+                    # check for repo+tag links on these collections
+                    if image_repo_tag:
+                        existing_repo_tag = _get_docker_links(
+                            api, args.retries,
+                            filters=[['link_class', '=', 'docker_image_repo+tag'],
+                                     ['name', '=', image_repo_tag],
+                                     ['head_uuid', 'in', [c["uuid"] for c in collections]]])
+                    else:
+                        existing_repo_tag = []
+
+                    try:
+                        coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
+                    except StopIteration:
+                        # create new collection owned by the project
+                        coll_uuid = api.collections().create(
+                            body={"manifest_text": collections[0]['manifest_text'],
+                                  "name": collection_name,
+                                  "owner_uuid": parent_project_uuid},
+                            ensure_unique_name=True
+                            ).execute(num_retries=args.retries)['uuid']
+
+                    link_base = {'owner_uuid': parent_project_uuid,
+                                 'head_uuid':  coll_uuid,
+                                 'properties': existing_links[0]['properties']}
+
+                    if not any(items_owned_by(parent_project_uuid, existing_links)):
+                        # create image link owned by the project
+                        make_link(api, args.retries,
+                                  'docker_image_hash', image_hash, **link_base)
+
+                    if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
+                        # create repo+tag link owned by the project
+                        make_link(api, args.retries, 'docker_image_repo+tag',
+                                  image_repo_tag, **link_base)
+
+                    stdout.write(coll_uuid + "\n")
+
+                    sys.exit(0)
+
+        # Open a file for the saved image, and write it if needed.
+        image_file, need_save = prep_image_file(outfile_name)
+        if need_save:
+            save_image(image_hash, image_file)
+
+        # Call arv-put with switches we inherited from it
+        # (a.k.a., switches that aren't our own).
+        put_args = keepdocker_parser.parse_known_args(arguments)[1]
+
+        if args.name is None:
+            put_args += ['--name', collection_name]
+
+        coll_uuid = arv_put.main(
+            put_args + ['--filename', outfile_name, image_file.name], stdout=stdout,
+            install_sig_handlers=install_sig_handlers).strip()
+
+        # Read the image metadata and make Arvados links from it.
+        image_file.seek(0)
+        image_tar = tarfile.open(fileobj=image_file)
+        image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+        if image_hash_type:
+            json_filename = raw_image_hash + '.json'
+        else:
+            json_filename = raw_image_hash + '/json'
+        json_file = image_tar.extractfile(image_tar.getmember(json_filename))
+        image_metadata = json.load(json_file)
+        json_file.close()
+        image_tar.close()
+        link_base = {'head_uuid': coll_uuid, 'properties': {}}
+        if 'created' in image_metadata:
+            link_base['properties']['image_timestamp'] = image_metadata['created']
+        if args.project_uuid is not None:
+            link_base['owner_uuid'] = args.project_uuid
+
+        make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
+        if image_repo_tag:
+            make_link(api, args.retries,
+                      'docker_image_repo+tag', image_repo_tag, **link_base)
+
+        # Clean up.
+        image_file.close()
+        for filename in [stat_cache_name(image_file), image_file.name]:
+            try:
+                os.unlink(filename)
+            except OSError as error:
+                if error.errno != errno.ENOENT:
+                    raise
+    finally:
+        if lockfile is not None:
+            # Closing the lockfile unlocks it.
+            lockfile.close()
 
 if __name__ == '__main__':
     main()
index 5dde8e53c933d05b2facbf8df284941635da3b42..cba00c3c8cf153039de990d27867558d0dbc699a 100644 (file)
@@ -34,7 +34,6 @@ from arvados._version import __version__
 
 import arvados.commands._util as arv_cmd
 
-CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
 api_client = None
 
 upload_opts = argparse.ArgumentParser(add_help=False)
@@ -141,6 +140,10 @@ physical storage devices (e.g., disks) should have a copy of each data
 block. Default is to use the server-provided default (if any) or 2.
 """)
 
+upload_opts.add_argument('--storage-classes', help="""
+Specify comma separated list of storage classes to be used when saving data to Keep.
+""")
+
 upload_opts.add_argument('--threads', type=int, metavar='N', default=None,
                          help="""
 Set the number of upload threads to be used. Take into account that
@@ -419,8 +422,8 @@ class ArvPutUploadJob(object):
     def __init__(self, paths, resume=True, use_cache=True, reporter=None,
                  name=None, owner_uuid=None, api_client=None,
                  ensure_unique_name=False, num_retries=None,
-                 put_threads=None, replication_desired=None,
-                 filename=None, update_time=60.0, update_collection=None,
+                 put_threads=None, replication_desired=None, filename=None,
+                 update_time=60.0, update_collection=None, storage_classes=None,
                  logger=logging.getLogger('arvados.arv_put'), dry_run=False,
                  follow_links=True, exclude_paths=[], exclude_names=None):
         self.paths = paths
@@ -440,6 +443,7 @@ class ArvPutUploadJob(object):
         self.replication_desired = replication_desired
         self.put_threads = put_threads
         self.filename = filename
+        self.storage_classes = storage_classes
         self._api_client = api_client
         self._state_lock = threading.Lock()
         self._state = None # Previous run state (file list & manifest)
@@ -615,10 +619,14 @@ class ArvPutUploadJob(object):
                 else:
                     # The file already exist on remote collection, skip it.
                     pass
-            self._remote_collection.save(num_retries=self.num_retries)
+            self._remote_collection.save(storage_classes=self.storage_classes,
+                                         num_retries=self.num_retries)
         else:
+            if self.storage_classes is None:
+                self.storage_classes = ['default']
             self._local_collection.save_new(
                 name=self.name, owner_uuid=self.owner_uuid,
+                storage_classes=self.storage_classes,
                 ensure_unique_name=self.ensure_unique_name,
                 num_retries=self.num_retries)
 
@@ -978,10 +986,6 @@ def progress_writer(progress_func, outfile=sys.stderr):
         outfile.write(progress_func(bytes_written, bytes_expected))
     return write_progress
 
-def exit_signal_handler(sigcode, frame):
-    logging.getLogger('arvados.arv_put').error("Caught signal {}, exiting.".format(sigcode))
-    sys.exit(-sigcode)
-
 def desired_project_uuid(api_client, project_uuid, num_retries):
     if not project_uuid:
         query = api_client.users().current()
@@ -993,7 +997,8 @@ def desired_project_uuid(api_client, project_uuid, num_retries):
         raise ValueError("Not a valid project UUID: {}".format(project_uuid))
     return query.execute(num_retries=num_retries)['uuid']
 
-def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
+         install_sig_handlers=True):
     global api_client
 
     args = parse_arguments(arguments)
@@ -1012,10 +1017,8 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     if api_client is None:
         api_client = arvados.api('v1', request_id=request_id)
 
-    # Install our signal handler for each code in CAUGHT_SIGNALS, and save
-    # the originals.
-    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
-                            for sigcode in CAUGHT_SIGNALS}
+    if install_sig_handlers:
+        arv_cmd.install_signal_handlers()
 
     # Determine the name to use
     if args.name:
@@ -1051,6 +1054,15 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     else:
         reporter = None
 
+    #  Split storage-classes argument
+    storage_classes = None
+    if args.storage_classes:
+        storage_classes = args.storage_classes.strip().split(',')
+        if len(storage_classes) > 1:
+            logger.error("Multiple storage classes are not supported currently.")
+            sys.exit(1)
+
+
     # Setup exclude regex from all the --exclude arguments provided
     name_patterns = []
     exclude_paths = []
@@ -1108,6 +1120,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
                                  owner_uuid = project_uuid,
                                  ensure_unique_name = True,
                                  update_collection = args.update_collection,
+                                 storage_classes=storage_classes,
                                  logger=logger,
                                  dry_run=args.dry_run,
                                  follow_links=args.follow_links,
@@ -1182,8 +1195,8 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
         if not output.endswith('\n'):
             stdout.write('\n')
 
-    for sigcode, orig_handler in listitems(orig_signal_handlers):
-        signal.signal(sigcode, orig_handler)
+    if install_sig_handlers:
+        arv_cmd.restore_signal_handlers()
 
     if status != 0:
         sys.exit(status)
index 831e496a29786d58d5272e4cffda0e4d24838bdd..c4748fa995759ef0cc934b699a14523f8a3181f8 100644 (file)
@@ -136,20 +136,21 @@ def statfile(prefix, fn, fnPattern="$(file %s/%s)", dirPattern="$(dir %s/%s/)",
 
     return prefix+fn
 
-def write_file(collection, pathprefix, fn):
+def write_file(collection, pathprefix, fn, flush=False):
     with open(os.path.join(pathprefix, fn)) as src:
         dst = collection.open(fn, "w")
         r = src.read(1024*128)
         while r:
             dst.write(r)
             r = src.read(1024*128)
-        dst.close(flush=False)
+        dst.close(flush=flush)
 
 def uploadfiles(files, api, dry_run=False, num_retries=0,
                 project=None,
                 fnPattern="$(file %s/%s)",
                 name=None,
-                collection=None):
+                collection=None,
+                packed=True):
     # Find the smallest path prefix that includes all the files that need to be uploaded.
     # This starts at the root and iteratively removes common parent directory prefixes
     # until all file paths no longer have a common parent.
@@ -199,12 +200,12 @@ def uploadfiles(files, api, dry_run=False, num_retries=0,
                 continue
             prev = localpath
             if os.path.isfile(localpath):
-                write_file(collection, pathprefix, f.fn)
+                write_file(collection, pathprefix, f.fn, not packed)
             elif os.path.isdir(localpath):
                 for root, dirs, iterfiles in os.walk(localpath):
                     root = root[len(pathprefix):]
                     for src in iterfiles:
-                        write_file(collection, pathprefix, os.path.join(root, src))
+                        write_file(collection, pathprefix, os.path.join(root, src), not packed)
 
         filters=[["portable_data_hash", "=", collection.portable_data_hash()]]
         if name:
index e8e95afc7013650c67e753a3f2de4e7ec227fc44..71e101cf4c5073d40e78f73c0bf46a9ff231f937 100644 (file)
@@ -292,7 +292,8 @@ class KeepClient(object):
         def __init__(self, root, user_agent_pool=queue.LifoQueue(),
                      upload_counter=None,
                      download_counter=None,
-                     headers={}):
+                     headers={},
+                     insecure=False):
             self.root = root
             self._user_agent_pool = user_agent_pool
             self._result = {'error': None}
@@ -304,6 +305,7 @@ class KeepClient(object):
             self.put_headers = headers
             self.upload_counter = upload_counter
             self.download_counter = download_counter
+            self.insecure = insecure
 
         def usable(self):
             """Is it worth attempting a request?"""
@@ -371,6 +373,8 @@ class KeepClient(object):
                         '{}: {}'.format(k,v) for k,v in self.get_headers.items()])
                     curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
                     curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
                     if method == "HEAD":
                         curl.setopt(pycurl.NOBODY, True)
                     self._setcurltimeouts(curl, timeout)
@@ -463,6 +467,8 @@ class KeepClient(object):
                         '{}: {}'.format(k,v) for k,v in self.put_headers.items()])
                     curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
                     curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
                     self._setcurltimeouts(curl, timeout)
                     try:
                         curl.perform()
@@ -762,6 +768,11 @@ class KeepClient(object):
         if local_store is None:
             local_store = os.environ.get('KEEP_LOCAL_STORE')
 
+        if api_client is None:
+            self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+        else:
+            self.insecure = api_client.insecure
+
         self.block_cache = block_cache if block_cache else KeepBlockCache()
         self.timeout = timeout
         self.proxy_timeout = proxy_timeout
@@ -934,7 +945,8 @@ class KeepClient(object):
                     root, self._user_agent_pool,
                     upload_counter=self.upload_counter,
                     download_counter=self.download_counter,
-                    headers=headers)
+                    headers=headers,
+                    insecure=self.insecure)
         return local_roots
 
     @staticmethod
@@ -1035,7 +1047,8 @@ class KeepClient(object):
                 root: self.KeepService(root, self._user_agent_pool,
                                        upload_counter=self.upload_counter,
                                        download_counter=self.download_counter,
-                                       headers=headers)
+                                       headers=headers,
+                                       insecure=self.insecure)
                 for root in hint_roots
             }
 
index 5c8a8369394707418ccff484bb6c38f8a5677b7d..c6e17cae0b71a4ca0b580bbb6f8c056da8cb8988 100644 (file)
@@ -20,16 +20,24 @@ class ThreadSafeApiCache(object):
 
     """
 
-    def __init__(self, apiconfig=None, keep_params={}):
+    def __init__(self, apiconfig=None, keep_params={}, api_params={}):
         if apiconfig is None:
             apiconfig = config.settings()
         self.apiconfig = copy.copy(apiconfig)
+        self.api_params = api_params
         self.local = threading.local()
+
+        # Initialize an API object for this thread before creating
+        # KeepClient, this will report if ARVADOS_API_HOST or
+        # ARVADOS_API_TOKEN are missing.
+        self.localapi()
+
         self.keep = keep.KeepClient(api_client=self, **keep_params)
 
     def localapi(self):
         if 'api' not in self.local.__dict__:
-            self.local.api = arvados.api_from_config('v1', apiconfig=self.apiconfig)
+            self.local.api = arvados.api_from_config('v1', apiconfig=self.apiconfig,
+                                                     **self.api_params)
         return self.local.api
 
     def __getattr__(self, name):
index 4f487af0a9dbb4065ef08a42d440800e8a547e3c..5e066f014598560ed211a215ef8866150a77bac3 100644 (file)
@@ -46,14 +46,15 @@ setup(name='arvados-python-client',
           ('share/doc/arvados-python-client', ['LICENSE-2.0.txt', 'README.rst']),
       ],
       install_requires=[
-          'ciso8601 >=1.0.0, <=1.0.4',
+          'ciso8601 >=1.0.6, <2.0.0',
           'future',
           'google-api-python-client >=1.6.2, <1.7',
           'httplib2 >=0.9.2',
           'pycurl >=7.19.5.1',
-          'ruamel.yaml >=0.13.7',
+          'ruamel.yaml >=0.13.11, <0.15',
           'setuptools',
           'ws4py <0.4',
+          'subprocess32>=3.5.1',
       ],
       test_suite='tests',
       tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
index 780968cb8b16689b5f013f96809616e1f3e93e16..c21ef95f2af3a18ea8f48352a9e2b780ea1b0e1f 100644 (file)
@@ -3,21 +3,29 @@
 # SPDX-License-Identifier: Apache-2.0
 
 daemon off;
-error_log stderr info;          # Yes, must be specified here _and_ cmdline
+error_log "{{ERRORLOG}}" info;          # Yes, must be specified here _and_ cmdline
 events {
 }
 http {
-  access_log {{ACCESSLOG}} combined;
+  log_format customlog
+    '[$time_local] $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
+    '"$http_referer" "$http_user_agent"';
+  access_log "{{ACCESSLOG}}" customlog;
+  client_body_temp_path "{{TMPDIR}}";
   upstream arv-git-http {
     server localhost:{{GITPORT}};
   }
   server {
     listen *:{{GITSSLPORT}} ssl default_server;
-    server_name _;
-    ssl_certificate {{SSLCERT}};
-    ssl_certificate_key {{SSLKEY}};
+    server_name arv-git-http;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://arv-git-http;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   upstream keepproxy {
@@ -25,11 +33,15 @@ http {
   }
   server {
     listen *:{{KEEPPROXYSSLPORT}} ssl default_server;
-    server_name _;
-    ssl_certificate {{SSLCERT}};
-    ssl_certificate_key {{SSLKEY}};
+    server_name keepproxy;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keepproxy;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   upstream keep-web {
@@ -37,25 +49,44 @@ http {
   }
   server {
     listen *:{{KEEPWEBSSLPORT}} ssl default_server;
-    server_name ~^(?<request_host>.*)$;
-    ssl_certificate {{SSLCERT}};
-    ssl_certificate_key {{SSLKEY}};
+    server_name keep-web;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keep-web;
-      proxy_set_header Host $request_host:{{KEEPWEBPORT}};
+      proxy_set_header Host $http_host;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
   server {
     listen *:{{KEEPWEBDLSSLPORT}} ssl default_server;
-    server_name ~.*;
-    ssl_certificate {{SSLCERT}};
-    ssl_certificate_key {{SSLKEY}};
+    server_name keep-web-dl ~.*;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://keep-web;
-      proxy_set_header Host download:{{KEEPWEBPORT}};
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-      proxy_redirect //download:{{KEEPWEBPORT}}/ https://$host:{{KEEPWEBDLSSLPORT}}/;
+      proxy_set_header X-Forwarded-Proto https;
+
+      # Unlike other proxy sections, here we need to override the
+      # requested Host header and use proxy_redirect because of the
+      # way the test suite orchestrates services. Keep-web's "download
+      # only" behavior relies on the Host header matching a configured
+      # value, but when run_test_servers.py writes keep-web's command
+      # line, the keep-web-dl TLS port (which clients will connect to
+      # and include in their Host header) has not yet been assigned.
+      #
+      # In production, "proxy_set_header Host $http_host;
+      # proxy_redirect off;" works: keep-web's redirect URLs will
+      # match the request URL received by Nginx.
+      #
+      # Here, keep-web will issue redirects to https://download/ and
+      # Nginx will rewrite them.
+      #
+      proxy_set_header Host  download;
+      proxy_redirect https://download/ https://$host:{{KEEPWEBDLSSLPORT}}/;
     }
   }
   upstream ws {
@@ -63,15 +94,33 @@ http {
   }
   server {
     listen *:{{WSSPORT}} ssl default_server;
-    server_name ~^(?<request_host>.*)$;
-    ssl_certificate {{SSLCERT}};
-    ssl_certificate_key {{SSLKEY}};
+    server_name websocket;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
     location  / {
       proxy_pass http://ws;
       proxy_set_header Upgrade $http_upgrade;
       proxy_set_header Connection "upgrade";
-      proxy_set_header Host $request_host:{{WSPORT}};
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+  upstream controller {
+    server localhost:{{CONTROLLERPORT}};
+  }
+  server {
+    listen *:{{CONTROLLERSSLPORT}} ssl default_server;
+    server_name controller;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://controller;
+      proxy_set_header Host $http_host;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
     }
   }
 }
index 567b3b3bfaacf693e7147159bff4d3aa9ad71025..102433cd4186fbf392d8f2fc56af804bdec4d890 100644 (file)
@@ -174,7 +174,7 @@ def find_available_port():
     sock.close()
     return port
 
-def _wait_until_port_listens(port, timeout=10):
+def _wait_until_port_listens(port, timeout=10, warn=True):
     """Wait for a process to start listening on the given port.
 
     If nothing listens on the port within the specified timeout (given
@@ -196,20 +196,29 @@ def _wait_until_port_listens(port, timeout=10):
         except subprocess.CalledProcessError:
             time.sleep(0.1)
             continue
-        return
-    print(
-        "WARNING: Nothing is listening on port {} (waited {} seconds).".
-        format(port, timeout),
-        file=sys.stderr)
+        return True
+    if warn:
+        print(
+            "WARNING: Nothing is listening on port {} (waited {} seconds).".
+            format(port, timeout),
+            file=sys.stderr)
+    return False
+
+def _logfilename(label):
+    """Set up a labelled log file, and return a path to write logs to.
 
-def _fifo2stderr(label):
-    """Create a fifo, and copy it to stderr, prepending label to each line.
+    Normally, the returned path is {tmpdir}/{label}.log.
 
-    Return value is the path to the new FIFO.
+    In debug mode, logs are also written to stderr, with [label]
+    prepended to each line. The returned path is a FIFO.
 
     +label+ should contain only alphanumerics: it is also used as part
     of the FIFO filename.
+
     """
+    logfilename = os.path.join(TEST_TMPDIR, label+'.log')
+    if not os.environ.get('ARVADOS_DEBUG', ''):
+        return logfilename
     fifo = os.path.join(TEST_TMPDIR, label+'.fifo')
     try:
         os.remove(fifo)
@@ -217,8 +226,21 @@ def _fifo2stderr(label):
         if error.errno != errno.ENOENT:
             raise
     os.mkfifo(fifo, 0o700)
+    stdbuf = ['stdbuf', '-i0', '-oL', '-eL']
+    # open(fifo, 'r') would block waiting for someone to open the fifo
+    # for writing, so we need a separate cat process to open it for
+    # us.
+    cat = subprocess.Popen(
+        stdbuf+['cat', fifo],
+        stdin=open('/dev/null'),
+        stdout=subprocess.PIPE)
+    tee = subprocess.Popen(
+        stdbuf+['tee', '-a', logfilename],
+        stdin=cat.stdout,
+        stdout=subprocess.PIPE)
     subprocess.Popen(
-        ['stdbuf', '-i0', '-oL', '-eL', 'sed', '-e', 's/^/['+label+'] /', fifo],
+        stdbuf+['sed', '-e', 's/^/['+label+'] /'],
+        stdin=tee.stdout,
         stdout=sys.stderr)
     return fifo
 
@@ -355,8 +377,11 @@ def reset():
         'POST',
         headers={'Authorization': 'OAuth2 {}'.format(token)})
     os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
-    os.environ['ARVADOS_API_HOST'] = existing_api_host
     os.environ['ARVADOS_API_TOKEN'] = token
+    if _wait_until_port_listens(_getport('controller-ssl'), timeout=0.5, warn=False):
+        os.environ['ARVADOS_API_HOST'] = '0.0.0.0:'+str(_getport('controller-ssl'))
+    else:
+        os.environ['ARVADOS_API_HOST'] = existing_api_host
 
 def stop(force=False):
     """Stop the API server, if one is running.
@@ -377,6 +402,40 @@ def stop(force=False):
         kill_server_pid(_pidfile('api'))
         my_api_host = None
 
+def run_controller():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_controller()
+    rails_api_port = int(string.split(os.environ.get('ARVADOS_TEST_API_HOST', my_api_host), ':')[-1])
+    port = find_available_port()
+    conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
+    with open(conf, 'w') as f:
+        f.write("""
+Clusters:
+  zzzzz:
+    NodeProfiles:
+      "*":
+        "arvados-controller":
+          Listen: ":{}"
+        "arvados-api-server":
+          Listen: ":{}"
+          TLS: true
+        """.format(port, rails_api_port))
+    logf = open(_logfilename('controller'), 'a')
+    controller = subprocess.Popen(
+        ["arvados-server", "controller", "-config", conf],
+        stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    with open(_pidfile('controller'), 'w') as f:
+        f.write(str(controller.pid))
+    _wait_until_port_listens(port)
+    _setport('controller', port)
+    return port
+
+def stop_controller():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('controller'))
+
 def run_ws():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
         return
@@ -403,7 +462,7 @@ Postgres:
                    _dbconfig('database'),
                    _dbconfig('username'),
                    _dbconfig('password')))
-    logf = open(_fifo2stderr('ws'), 'w')
+    logf = open(_logfilename('ws'), 'a')
     ws = subprocess.Popen(
         ["ws", "-config", conf],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
@@ -429,7 +488,7 @@ def _start_keep(n, keep_args):
     for arg, val in keep_args.items():
         keep_cmd.append("{}={}".format(arg, val))
 
-    logf = open(_fifo2stderr('keep{}'.format(n)), 'w')
+    logf = open(_logfilename('keep{}'.format(n)), 'a')
     kp0 = subprocess.Popen(
         keep_cmd, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
 
@@ -513,7 +572,7 @@ def run_keep_proxy():
     port = find_available_port()
     env = os.environ.copy()
     env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
-    logf = open(_fifo2stderr('keepproxy'), 'w')
+    logf = open(_logfilename('keepproxy'), 'a')
     kp = subprocess.Popen(
         ['keepproxy',
          '-pid='+_pidfile('keepproxy'),
@@ -552,7 +611,7 @@ def run_arv_git_httpd():
     gitport = find_available_port()
     env = os.environ.copy()
     env.pop('ARVADOS_API_TOKEN', None)
-    logf = open(_fifo2stderr('arv-git-httpd'), 'w')
+    logf = open(_logfilename('arv-git-httpd'), 'a')
     agh = subprocess.Popen(
         ['arv-git-httpd',
          '-repo-root='+gitdir+'/test',
@@ -576,11 +635,11 @@ def run_keep_web():
     keepwebport = find_available_port()
     env = os.environ.copy()
     env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
-    logf = open(_fifo2stderr('keep-web'), 'w')
+    logf = open(_logfilename('keep-web'), 'a')
     keepweb = subprocess.Popen(
         ['keep-web',
          '-allow-anonymous',
-         '-attachment-only-host=download:'+str(keepwebport),
+         '-attachment-only-host=download',
          '-listen=:'+str(keepwebport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('keep-web'), 'w') as f:
@@ -598,6 +657,8 @@ def run_nginx():
         return
     stop_nginx()
     nginxconf = {}
+    nginxconf['CONTROLLERPORT'] = _getport('controller')
+    nginxconf['CONTROLLERSSLPORT'] = find_available_port()
     nginxconf['KEEPWEBPORT'] = _getport('keep-web')
     nginxconf['KEEPWEBDLSSLPORT'] = find_available_port()
     nginxconf['KEEPWEBSSLPORT'] = find_available_port()
@@ -609,7 +670,9 @@ def run_nginx():
     nginxconf['WSSPORT'] = _getport('wss')
     nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
     nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
-    nginxconf['ACCESSLOG'] = _fifo2stderr('nginx_access_log')
+    nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
+    nginxconf['ERRORLOG'] = _logfilename('nginx_error')
+    nginxconf['TMPDIR'] = TEST_TMPDIR
 
     conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
     conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
@@ -628,6 +691,7 @@ def run_nginx():
          '-g', 'pid '+_pidfile('nginx')+';',
          '-c', conffile],
         env=env, stdin=open('/dev/null'), stdout=sys.stderr)
+    _setport('controller-ssl', nginxconf['CONTROLLERSSLPORT'])
     _setport('keep-web-dl-ssl', nginxconf['KEEPWEBDLSSLPORT'])
     _setport('keep-web-ssl', nginxconf['KEEPWEBSSLPORT'])
     _setport('keepproxy-ssl', nginxconf['KEEPPROXYSSLPORT'])
@@ -766,6 +830,7 @@ if __name__ == "__main__":
     actions = [
         'start', 'stop',
         'start_ws', 'stop_ws',
+        'start_controller', 'stop_controller',
         'start_keep', 'stop_keep',
         'start_keep_proxy', 'stop_keep_proxy',
         'start_keep-web', 'stop_keep-web',
@@ -802,6 +867,10 @@ if __name__ == "__main__":
         run_ws()
     elif args.action == 'stop_ws':
         stop_ws()
+    elif args.action == 'start_controller':
+        run_controller()
+    elif args.action == 'stop_controller':
+        stop_controller()
     elif args.action == 'start_keep':
         run_keep(enforce_permissions=args.keep_enforce_permissions, num_servers=args.num_keep_servers)
     elif args.action == 'stop_keep':
@@ -820,6 +889,7 @@ if __name__ == "__main__":
         stop_keep_web()
     elif args.action == 'start_nginx':
         run_nginx()
+        print("export ARVADOS_API_HOST=0.0.0.0:{}".format(_getport('controller-ssl')))
     elif args.action == 'stop_nginx':
         stop_nginx()
     else:
index 4b1f69477e5823502b2a5396a586db82a56e6ff7..93cfdc2a36c26389a3259222304e7ba1d5de7dff 100644 (file)
@@ -730,6 +730,11 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers,
                           self.call_main_with_args,
                           ['--project-uuid', self.Z_UUID, '--stream'])
 
+    def test_error_when_multiple_storage_classes_specified(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--storage-classes', 'hot,cold'])
+
     def test_error_when_excluding_absolute_path(self):
         tmpdir = self.make_tmpdir()
         self.assertRaises(SystemExit,
@@ -1061,6 +1066,18 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
                                        '--project-uuid', self.PROJECT_UUID])
         self.assertEqual(link_name, collection['name'])
 
+    def test_put_collection_with_storage_classes_specified(self):
+        collection = self.run_and_find_collection("", ['--storage-classes', 'hot'])
+
+        self.assertEqual(len(collection['storage_classes_desired']), 1)
+        self.assertEqual(collection['storage_classes_desired'][0], 'hot')
+
+    def test_put_collection_without_storage_classes_specified(self):
+        collection = self.run_and_find_collection("")
+
+        self.assertEqual(len(collection['storage_classes_desired']), 1)
+        self.assertEqual(collection['storage_classes_desired'][0], 'default')
+
     def test_exclude_filename_pattern(self):
         tmpdir = self.make_tmpdir()
         tmpsubdir = os.path.join(tmpdir, 'subdir')
index 49c00191bebe02cc8e267b397212a893a33f246a..722cc56046c99777f864833be641e81914039af5 100644 (file)
@@ -14,6 +14,8 @@ import random
 import re
 import sys
 import tempfile
+import datetime
+import ciso8601
 import time
 import unittest
 
@@ -802,6 +804,18 @@ class CollectionMethods(run_test_server.TestCaseWithServers):
         self.assertEqual(fn0, c.items()[0][0])
         self.assertEqual(fn1, c.items()[1][0])
 
+    def test_get_properties(self):
+        c = Collection()
+        self.assertEqual(c.get_properties(), {})
+        c.save_new(properties={"foo":"bar"})
+        self.assertEqual(c.get_properties(), {"foo":"bar"})
+
+    def test_get_trash_at(self):
+        c = Collection()
+        self.assertEqual(c.get_trash_at(), None)
+        c.save_new(trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+        self.assertEqual(c.get_trash_at(), ciso8601.parse_datetime('2111-01-01T11:11:11.111111000Z'))
+
 
 class CollectionOpenModes(run_test_server.TestCaseWithServers):
 
@@ -1300,17 +1314,43 @@ class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
 
     def test_create_and_save(self):
         c = self.create_count_txt()
-        c.save()
+        c.save(properties={'type' : 'Intermediate'},
+               storage_classes=['archive'],
+               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+
         self.assertRegex(
             c.manifest_text(),
             r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['archive'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Intermediate'})
+        self.assertEqual(c.api_response()["trash_at"], '2111-01-01T11:11:11.111111000Z')
+
 
     def test_create_and_save_new(self):
         c = self.create_count_txt()
-        c.save_new()
+        c.save_new(properties={'type' : 'Intermediate'},
+                   storage_classes=['archive'],
+                   trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+
         self.assertRegex(
             c.manifest_text(),
             r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['archive'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Intermediate'})
+        self.assertEqual(c.api_response()["trash_at"], '2111-01-01T11:11:11.111111000Z')
+
+    def test_create_and_save_after_commiting(self):
+        c = self.create_count_txt()
+        c.save(properties={'type' : 'Intermediate'},
+               storage_classes=['hot'],
+               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+        c.save(properties={'type' : 'Output'},
+               storage_classes=['cold'],
+               trash_at=datetime.datetime(2222, 2, 2, 22, 22, 22, 222222))
+
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['cold'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Output'})
+        self.assertEqual(c.api_response()["trash_at"], '2222-02-02T22:22:22.222222000Z')
 
     def test_create_diff_apply(self):
         c1 = self.create_count_txt()
index 872c93bae25b5480de1cbf91400f716543415700..a7b79933bbc2999381fea887ac3a70e77f346b3c 100644 (file)
@@ -319,6 +319,29 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
         self.assertEqual('100::1', service.hostname)
         self.assertEqual(10, service.port)
 
+    def test_insecure_disables_tls_verify(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+
+        api_client.insecure = True
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                0)
+
+        api_client.insecure = False
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            # getopt()==None here means we didn't change the
+            # default. If we were using real pycurl instead of a mock,
+            # it would return the default value 1.
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                None)
+
     # test_*_timeout verify that KeepClient instructs pycurl to use
     # the appropriate connection and read timeouts. They don't care
     # whether pycurl actually exhibits the expected timeout behavior
@@ -1257,6 +1280,8 @@ class KeepClientAPIErrorTest(unittest.TestCase):
             def __getattr__(self, r):
                 if r == "api_token":
                     return "abc"
+                elif r == "insecure":
+                    return False
                 else:
                     raise arvados.errors.KeepReadError()
         keep_client = arvados.KeepClient(api_client=ApiMock(),
index c351189f81d5df59fbdf5a4dcfc6afd20a97a586..e39c4263177b8ed4f290c17cb744c309725c56bc 100644 (file)
@@ -7,12 +7,14 @@ if not File.exist?('/usr/bin/git') then
   exit
 end
 
+git_latest_tag = `git describe --abbrev=0`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
 git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
 git_timestamp = Time.at(git_timestamp.to_i).utc
 
 Gem::Specification.new do |s|
   s.name        = 'arvados'
-  s.version     = "0.1.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
   s.date        = git_timestamp.strftime("%Y-%m-%d")
   s.summary     = "Arvados client library"
   s.description = "Arvados client library, git commit #{git_hash}"
index 30cd8a559fb5c0d58177f59907edf7e0c716ac6f..a3ff8e78997855640e93aa479d92dd93cb36b012 100644 (file)
@@ -58,7 +58,7 @@ GEM
       i18n (~> 0)
       json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
-    arvados-cli (0.1.20171211220040)
+    arvados-cli (1.1.4.20180412190507)
       activesupport (>= 3.2.13, < 5)
       andand (~> 1.3, >= 1.3.3)
       arvados (~> 0.1, >= 0.1.20150128223554)
@@ -86,11 +86,11 @@ GEM
       execjs
     coffee-script-source (1.12.2)
     concurrent-ruby (1.0.5)
-    crass (1.0.3)
+    crass (1.0.4)
     curb (0.9.4)
-    database_cleaner (1.6.2)
+    database_cleaner (1.7.0)
     erubis (2.7.0)
-    eventmachine (1.2.5)
+    eventmachine (1.2.6)
     execjs (2.7.0)
     extlib (0.9.16)
     factory_girl (4.9.0)
@@ -130,7 +130,7 @@ GEM
     httpclient (2.8.3)
     i18n (0.9.5)
       concurrent-ruby (~> 1.0)
-    jquery-rails (4.3.1)
+    jquery-rails (4.3.3)
       rails-dom-testing (>= 1, < 3)
       railties (>= 4.2.0)
       thor (>= 0.14, < 2.0)
@@ -143,13 +143,13 @@ GEM
     logging (2.2.2)
       little-plugger (~> 1.1)
       multi_json (~> 1.10)
-    lograge (0.9.0)
+    lograge (0.10.0)
       actionpack (>= 4)
       activesupport (>= 4)
       railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    loofah (2.2.0)
+    loofah (2.2.2)
       crass (~> 1.0.2)
       nokogiri (>= 1.5.9)
     mail (2.7.0)
@@ -159,7 +159,7 @@ GEM
     mini_mime (1.0.0)
     mini_portile2 (2.3.0)
     minitest (5.11.3)
-    mocha (1.3.0)
+    mocha (1.5.0)
       metaclass (~> 0.0.1)
     multi_json (1.13.1)
     multi_xml (0.6.0)
@@ -188,7 +188,7 @@ GEM
       oauth2 (~> 1.1)
       omniauth (~> 1.2)
     os (0.9.6)
-    passenger (5.2.1)
+    passenger (5.3.0)
       rack
       rake (>= 0.8.1)
     pg (0.21.0)
@@ -196,7 +196,7 @@ GEM
     protected_attributes (1.1.4)
       activemodel (>= 4.0.1, < 5.0)
     public_suffix (3.0.2)
-    rack (1.6.9)
+    rack (1.6.10)
     rack-test (0.6.3)
       rack (>= 1.0)
     rails (4.2.10)
@@ -216,8 +216,8 @@ GEM
       activesupport (>= 4.2.0, < 5.0)
       nokogiri (~> 1.6)
       rails-deprecated_sanitizer (>= 1.0.1)
-    rails-html-sanitizer (1.0.3)
-      loofah (~> 2.0)
+    rails-html-sanitizer (1.0.4)
+      loofah (~> 2.2, >= 2.2.2)
     rails-observers (0.1.5)
       activemodel (>= 4.0)
     railties (4.2.10)
@@ -225,9 +225,9 @@ GEM
       activesupport (= 4.2.10)
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.3.0)
+    rake (12.3.1)
     ref (2.0.0)
-    request_store (1.4.0)
+    request_store (1.4.1)
       rack (>= 1.4)
     responders (2.4.0)
       actionpack (>= 4.2.0, < 5.3)
@@ -254,7 +254,7 @@ GEM
     simplecov-html (0.7.1)
     simplecov-rcov (0.2.3)
       simplecov (>= 0.4.1)
-    sprockets (2.12.4)
+    sprockets (2.12.5)
       hike (~> 1.2)
       multi_json (~> 1.0)
       rack (~> 1.0)
index fb75007dc6738ab984d5de19bf347fb9b672c975..6e77c12a1d6f37a88b45c4875ee43e7c912b94a9 100644 (file)
@@ -36,7 +36,19 @@ class Arvados::V1::CollectionsController < ApplicationController
   def find_object_by_uuid
     if loc = Keep::Locator.parse(params[:id])
       loc.strip_hints!
-      if c = Collection.readable_by(*@read_users).where({ portable_data_hash: loc.to_s }).limit(1).first
+
+      # It matters which Collection object we pick because we use it to get signed_manifest_text,
+      # the value of which is affected by the value of trash_at.
+      #
+      # From postgres doc: "By default, null values sort as if larger than any non-null
+      # value; that is, NULLS FIRST is the default for DESC order, and
+      # NULLS LAST otherwise."
+      #
+      # "trash_at desc" sorts null first, then latest to earliest, so
+      # it will select the Collection object with the longest
+      # available lifetime.
+
+      if c = Collection.readable_by(*@read_users).where({ portable_data_hash: loc.to_s }).order("trash_at desc").limit(1).first
         @object = {
           uuid: c.portable_data_hash,
           portable_data_hash: c.portable_data_hash,
index 6ec92b0ba66f9a59bc978844563a68d84d20f417..25cb0037a253de47a7b6b55da10e7ad8e9c758ea 100644 (file)
@@ -20,10 +20,9 @@ class Arvados::V1::ContainersController < ApplicationController
     show
   end
 
-  # Updates use row locking to resolve races between multiple
-  # dispatchers trying to lock the same container.
   def update
     @object.with_lock do
+      @object.reload
       super
     end
   end
index adac9960c41a06fff4da68da67e87a0ebf6facd6..49fc398e14bc86232ec8f791ffa0d986a376c48a 100644 (file)
@@ -33,6 +33,8 @@ class Arvados::V1::SchemaController < ApplicationController
         version: "v1",
         revision: "20131114",
         source_version: AppVersion.hash,
+        sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
+        packageVersion: AppVersion.package_version,
         generatedAt: db_current_time.iso8601,
         title: "Arvados API",
         description: "The API to interact with Arvados.",
index dc7e62f3e340741bec37e1d72bbb99c7ccf797d4..d2126ec5f7793ffe6e502d182ea3d852d8c5ceb6 100644 (file)
@@ -6,9 +6,9 @@ class Arvados::V1::UsersController < ApplicationController
   accept_attribute_as_json :prefs, Hash
 
   skip_before_filter :find_object_by_uuid, only:
-    [:activate, :current, :system, :setup]
+    [:activate, :current, :system, :setup, :merge]
   skip_before_filter :render_404_if_no_object, only:
-    [:activate, :current, :system, :setup]
+    [:activate, :current, :system, :setup, :merge]
   before_filter :admin_required, only: [:setup, :unsetup, :update_uuid]
 
   def current
@@ -125,8 +125,60 @@ class Arvados::V1::UsersController < ApplicationController
     show
   end
 
+  def merge
+    if !Thread.current[:api_client].andand.is_trusted
+      return send_error("supplied API token is not from a trusted client", status: 403)
+    elsif Thread.current[:api_client_authorization].scopes != ['all']
+      return send_error("cannot merge with a scoped token", status: 403)
+    end
+
+    new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
+    if !new_auth
+      return send_error("invalid new_user_token", status: 401)
+    end
+    if !new_auth.api_client.andand.is_trusted
+      return send_error("supplied new_user_token is not from a trusted client", status: 403)
+    elsif new_auth.scopes != ['all']
+      return send_error("supplied new_user_token has restricted scope", status: 403)
+    end
+    new_user = new_auth.user
+
+    if current_user.uuid == new_user.uuid
+      return send_error("cannot merge user to self", status: 422)
+    end
+
+    if !new_user.can?(write: params[:new_owner_uuid])
+      return send_error("cannot move objects into supplied new_owner_uuid: new user does not have write permission", status: 403)
+    end
+
+    redirect = params[:redirect_to_new_user]
+    if !redirect
+      return send_error("merge with redirect_to_new_user=false is not yet supported", status: 422)
+    end
+
+    @object = current_user
+    act_as_system_user do
+      @object.merge(new_owner_uuid: params[:new_owner_uuid], redirect_to_user_uuid: redirect && new_user.uuid)
+    end
+    show
+  end
+
   protected
 
+  def self._merge_requires_parameters
+    {
+      new_owner_uuid: {
+        type: 'string', required: true,
+      },
+      new_user_token: {
+        type: 'string', required: true,
+      },
+      redirect_to_new_user: {
+        type: 'boolean', required: false,
+      },
+    }
+  end
+
   def self._setup_requires_parameters
     {
       user: {
@@ -159,7 +211,7 @@ class Arvados::V1::UsersController < ApplicationController
     return super if @read_users.any?(&:is_admin)
     if params[:uuid] != current_user.andand.uuid
       # Non-admin index/show returns very basic information about readable users.
-      safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name"]
+      safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name", "username"]
       if @select
         @select = @select & safe_attrs
       else
index 5de85bc98bcbcb1a0051c3ecee355e82292b5a27..b8fe2948923582ad9f40f3ec00c394cd6b2473ec 100644 (file)
@@ -26,9 +26,9 @@ class UserSessionsController < ApplicationController
 
     # Only local users can create sessions, hence uuid_like_pattern
     # here.
-    user = User.where('identity_url = ? and uuid like ?',
-                      omniauth['info']['identity_url'],
-                      User.uuid_like_pattern).first
+    user = User.unscoped.where('identity_url = ? and uuid like ?',
+                               omniauth['info']['identity_url'],
+                               User.uuid_like_pattern).first
     if not user
       # Check for permission to log in to an existing User record with
       # a different identity_url
@@ -45,6 +45,7 @@ class UserSessionsController < ApplicationController
         end
       end
     end
+
     if not user
       # New user registration
       user = User.new(:email => omniauth['info']['email'],
@@ -67,6 +68,13 @@ class UserSessionsController < ApplicationController
         # First login to a pre-activated account
         user.identity_url = omniauth['info']['identity_url']
       end
+
+      while (uuid = user.redirect_to_user_uuid)
+        user = User.unscoped.where(uuid: uuid).first
+        if !user
+          raise Exception.new("identity_url #{omniauth['info']['identity_url']} redirects to nonexistent uuid #{uuid}")
+        end
+      end
     end
 
     # For the benefit of functional and integration tests:
index b158faa272635d1cce630faf58bea0fc307fa128..8ea9f7bd885a396541b2e1db9f6c9c55688ba870 100644 (file)
@@ -92,7 +92,7 @@ class ApiClientAuthorization < ArvadosModel
        uuid_prefix+".arvadosapi.com")
   end
 
-  def self.validate(token:, remote:)
+  def self.validate(token:, remote: nil)
     return nil if !token
     remote ||= Rails.configuration.uuid_prefix
 
@@ -161,7 +161,8 @@ class ApiClientAuthorization < ArvadosModel
           end
         end
 
-        if Rails.configuration.new_users_are_active
+        if Rails.configuration.new_users_are_active ||
+           Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
           # Update is_active to whatever it is at the remote end
           user.is_active = remote_user['is_active']
         elsif !remote_user['is_active']
index 8882b2c76344f4db32cdfebfd6aba69d5428b596..7ec9845bc1983c0819f4d801e5044d8e5765f00f 100644 (file)
@@ -5,6 +5,7 @@
 require 'log_reuse_info'
 require 'whitelist_update'
 require 'safe_json'
+require 'update_priority'
 
 class Container < ArvadosModel
   include ArvadosModelUpdates
@@ -37,6 +38,7 @@ class Container < ArvadosModel
   before_save :scrub_secret_mounts
   after_save :handle_completed
   after_save :propagate_priority
+  after_commit { UpdatePriority.run_update_thread }
 
   has_many :container_requests, :foreign_key => :container_uuid, :class_name => 'ContainerRequest', :primary_key => :uuid
   belongs_to :auth, :class_name => 'ApiClientAuthorization', :foreign_key => :auth_uuid, :primary_key => :uuid
@@ -315,11 +317,7 @@ class Container < ArvadosModel
     # (because state might have changed while acquiring the lock).
     check_lock_fail
     transaction do
-      begin
-        reload(lock: 'FOR UPDATE NOWAIT')
-      rescue
-        raise LockFailedError.new("cannot lock: other transaction in progress")
-      end
+      reload
       check_lock_fail
       update_attributes!(state: Locked)
     end
index bc01b33652357b26d73e1d66c7d7189fbd43c3c6..dd3ff767dd4c8f86b523add765afe2f3516fba5d 100644 (file)
@@ -28,19 +28,20 @@ class ContainerRequest < ArvadosModel
 
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :validate_runtime_constraints
-  before_validation :validate_scheduling_parameters
+  before_validation :set_default_preemptible_scheduling_parameter
   before_validation :set_container
   validates :command, :container_image, :output_path, :cwd, :presence => true
   validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
   validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+  validate :validate_scheduling_parameters
   validate :validate_state_change
   validate :check_update_whitelist
   validate :secret_mounts_key_conflict
   before_save :scrub_secret_mounts
-  after_save :update_priority
-  after_save :finalize_if_needed
   before_create :set_requesting_container_uuid
   before_destroy :set_priority_zero
+  after_save :update_priority
+  after_save :finalize_if_needed
 
   api_accessible :user, extend: :common do |t|
     t.add :command
@@ -83,10 +84,10 @@ class ContainerRequest < ArvadosModel
     Committed => [Final]
   }
 
-  AttrsPermittedAlways = [:owner_uuid, :state, :name, :description]
+  AttrsPermittedAlways = [:owner_uuid, :state, :name, :description, :properties]
   AttrsPermittedBeforeCommit = [:command, :container_count_max,
   :container_image, :cwd, :environment, :filters, :mounts,
-  :output_path, :priority, :properties, :requesting_container_uuid,
+  :output_path, :priority,
   :runtime_constraints, :state, :container_uuid, :use_existing,
   :scheduling_parameters, :secret_mounts, :output_name, :output_ttl]
 
@@ -197,6 +198,18 @@ class ContainerRequest < ArvadosModel
     end
   end
 
+  def set_default_preemptible_scheduling_parameter
+    c = get_requesting_container()
+    if self.state == Committed
+      # If preemptible instances (eg: AWS Spot Instances) are allowed,
+      # ask them on child containers by default.
+      if Rails.configuration.preemptible_instances and !c.nil? and
+        self.scheduling_parameters['preemptible'].nil?
+          self.scheduling_parameters['preemptible'] = true
+      end
+    end
+  end
+
   def validate_runtime_constraints
     case self.state
     when Committed
@@ -223,6 +236,14 @@ class ContainerRequest < ArvadosModel
             scheduling_parameters['partitions'].size)
             errors.add :scheduling_parameters, "partitions must be an array of strings"
       end
+      if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+        errors.add :scheduling_parameters, "preemptible instances are not allowed"
+      end
+      if scheduling_parameters.include? 'max_run_time' and
+        (!scheduling_parameters['max_run_time'].is_a?(Integer) ||
+          scheduling_parameters['max_run_time'] < 0)
+          errors.add :scheduling_parameters, "max_run_time must be positive integer"
+      end
     end
   end
 
@@ -288,7 +309,6 @@ class ContainerRequest < ArvadosModel
     act_as_system_user do
       Container.
         where('uuid in (?)', [self.container_uuid_was, self.container_uuid].compact).
-        lock(true).
         map(&:update_priority!)
     end
   end
@@ -298,14 +318,18 @@ class ContainerRequest < ArvadosModel
   end
 
   def set_requesting_container_uuid
-    return !new_record? if self.requesting_container_uuid   # already set
+    c = get_requesting_container()
+    if !c.nil?
+      self.requesting_container_uuid = c.uuid
+      self.priority = c.priority>0 ? 1 : 0
+    end
+  end
 
-    token_uuid = current_api_client_authorization.andand.uuid
-    container = Container.where('auth_uuid=?', token_uuid).order('created_at desc').first
-    if container
-      self.requesting_container_uuid = container.uuid
-      self.priority = container.priority > 0 ? 1 : 0
+  def get_requesting_container
+    return self.requesting_container_uuid if !self.requesting_container_uuid.nil?
+    return if !current_api_client_authorization
+    if (c = Container.where('auth_uuid=?', current_api_client_authorization.uuid).select([:uuid, :priority]).first)
+      return c
     end
-    true
   end
 end
index fe183678c155855a19e6acd2fc4cf8089cd6749e..7a7f0a3a600643cd43afe4d0eca3a2f66ef2a2b1 100644 (file)
@@ -12,6 +12,8 @@ class Group < ArvadosModel
   include CanBeAnOwner
   include Trashable
 
+  serialize :properties, Hash
+
   after_create :invalidate_permissions_cache
   after_update :maybe_invalidate_permissions_cache
   before_create :assign_name
@@ -24,6 +26,7 @@ class Group < ArvadosModel
     t.add :delete_at
     t.add :trash_at
     t.add :is_trashed
+    t.add :properties
   end
 
   def maybe_invalidate_permissions_cache
index 9209411f1e30fbb36ab0d44769c2815550bac0a0..cc3a22cbf0d75f93563bfb375d1306141e958a26 100644 (file)
@@ -30,6 +30,7 @@ class User < ArvadosModel
   before_create :set_initial_username, :if => Proc.new { |user|
     user.username.nil? and user.email
   }
+  after_create :setup_on_activate
   after_create :add_system_group_permission_link
   after_create :invalidate_permissions_cache
   after_create :auto_setup_new_user, :if => Proc.new { |user|
@@ -48,6 +49,8 @@ class User < ArvadosModel
   has_many :authorized_keys, :foreign_key => :authorized_user_uuid, :primary_key => :uuid
   has_many :repositories, foreign_key: :owner_uuid, primary_key: :uuid
 
+  default_scope { where('redirect_to_user_uuid is null') }
+
   api_accessible :user, extend: :common do |t|
     t.add :email
     t.add :username
@@ -269,25 +272,85 @@ class User < ArvadosModel
       old_uuid = self.uuid
       self.uuid = new_uuid
       save!(validate: false)
+      change_all_uuid_refs(old_uuid: old_uuid, new_uuid: new_uuid)
+    end
+  end
+
+  # Move this user's (i.e., self's) owned items into new_owner_uuid.
+  # Also redirect future uses of this account to
+  # redirect_to_user_uuid, i.e., when a caller authenticates to this
+  # account in the future, the account redirect_to_user_uuid account
+  # will be used instead.
+  #
+  # current_user must have admin privileges, i.e., the caller is
+  # responsible for checking permission to do this.
+  def merge(new_owner_uuid:, redirect_to_user_uuid:)
+    raise PermissionDeniedError if !current_user.andand.is_admin
+    raise "not implemented" if !redirect_to_user_uuid
+    transaction(requires_new: true) do
+      reload
+      raise "cannot merge an already merged user" if self.redirect_to_user_uuid
+
+      new_user = User.where(uuid: redirect_to_user_uuid).first
+      raise "user does not exist" if !new_user
+      raise "cannot merge to an already merged user" if new_user.redirect_to_user_uuid
+
+      # Existing API tokens are updated to authenticate to the new
+      # user.
+      ApiClientAuthorization.
+        where(user_id: id).
+        update_all(user_id: new_user.id)
+
+      # References to the old user UUID in the context of a user ID
+      # (rather than a "home project" in the project hierarchy) are
+      # updated to point to the new user.
+      [
+        [AuthorizedKey, :owner_uuid],
+        [AuthorizedKey, :authorized_user_uuid],
+        [Repository, :owner_uuid],
+        [Link, :owner_uuid],
+        [Link, :tail_uuid],
+        [Link, :head_uuid],
+      ].each do |klass, column|
+        klass.where(column => uuid).update_all(column => new_user.uuid)
+      end
+
+      # References to the merged user's "home project" are updated to
+      # point to new_owner_uuid.
       ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
-        klass.columns.each do |col|
-          if col.name.end_with?('_uuid')
-            column = col.name.to_sym
-            klass.where(column => old_uuid).update_all(column => new_uuid)
-          end
-        end
+        next if [ApiClientAuthorization,
+                 AuthorizedKey,
+                 Link,
+                 Log,
+                 Repository].include?(klass)
+        next if !klass.columns.collect(&:name).include?('owner_uuid')
+        klass.where(owner_uuid: uuid).update_all(owner_uuid: new_owner_uuid)
       end
+
+      update_attributes!(redirect_to_user_uuid: new_user.uuid)
+      invalidate_permissions_cache
     end
   end
 
   protected
 
+  def change_all_uuid_refs(old_uuid:, new_uuid:)
+    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+      klass.columns.each do |col|
+        if col.name.end_with?('_uuid')
+          column = col.name.to_sym
+          klass.where(column => old_uuid).update_all(column => new_uuid)
+        end
+      end
+    end
+  end
+
   def ensure_ownership_path_leads_to_user
     true
   end
 
   def permission_to_update
-    if username_changed?
+    if username_changed? || redirect_to_user_uuid_changed?
       current_user.andand.is_admin
     else
       # users must be able to update themselves (even if they are
@@ -298,7 +361,8 @@ class User < ArvadosModel
 
   def permission_to_create
     current_user.andand.is_admin or
-      (self == current_user and
+      (self == current_user &&
+       self.redirect_to_user_uuid.nil? &&
        self.is_active == Rails.configuration.new_users_are_active)
   end
 
@@ -351,7 +415,7 @@ class User < ArvadosModel
     end
     if self.is_active_changed?
       if self.is_active != self.is_active_was
-        logger.warn "User #{current_user.uuid} tried to change is_active from #{self.is_admin_was} to #{self.is_admin} for #{self.uuid}"
+        logger.warn "User #{current_user.uuid} tried to change is_active from #{self.is_active_was} to #{self.is_active} for #{self.uuid}"
         self.is_active = self.is_active_was
       end
     end
@@ -400,7 +464,7 @@ class User < ArvadosModel
 
     if !oid_login_perms.any?
       # create openid login permission
-      oid_login_perm = Link.create(link_class: 'permission',
+      oid_login_perm = Link.create!(link_class: 'permission',
                                    name: 'can_login',
                                    tail_uuid: self.email,
                                    head_uuid: self.uuid,
index a1c35f10fcf1f9e1aae9ead9bf1cda00b5f2535a..f976a83ca96bf4cffb562cd74f3ccaf27590198f 100644 (file)
@@ -117,7 +117,11 @@ common:
   ### New user and & email settings
   ###
 
-  # Config parameters to automatically setup new users.
+  # Config parameters to automatically setup new users.  If enabled,
+  # this users will be able to self-activate.  Enable this if you want
+  # to run an open instance where anyone can create an account and use
+  # the system without requiring manual approval.
+  #
   # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
   # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
   auto_setup_new_users: false
@@ -125,7 +129,9 @@ common:
   auto_setup_new_users_with_repository: false
   auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
 
-  # When new_users_are_active is set to true, the user agreement check is skipped.
+  # When new_users_are_active is set to true, new users will be active
+  # immediately.  This skips the "self-activate" step which enforces
+  # user agreements.  Should only be enabled for development.
   new_users_are_active: false
 
   # The e-mail address of the user you would like to become marked as an admin
@@ -289,6 +295,11 @@ common:
   ### Crunch, DNS & compute node management
   ###
 
+  # Preemptible instance support (e.g. AWS Spot Instances)
+  # When true, child containers will get created with the preemptible
+  # scheduling parameter parameter set.
+  preemptible_instances: false
+
   # Docker image to be used when none found in runtime_constraints of a job
   default_docker_image_for_jobs: false
 
@@ -404,6 +415,12 @@ common:
   # remote_hosts above.
   remote_hosts_via_dns: false
 
+  # List of cluster prefixes.  These are "trusted" clusters, users
+  # from the clusters listed here will be automatically setup and
+  # activated.  This is separate from the settings
+  # auto_setup_new_users and new_users_are_active.
+  auto_activate_users_from: []
+
   ###
   ### Remaining assorted configuration options.
   ###
@@ -449,6 +466,11 @@ common:
   # "git log".
   source_version: false
 
+  # Override the automatic package version string. With the default version of
+  # false, the package version is read from package-build.version in Rails.root
+  # (included in vendor packages).
+  package_version: false
+
   # Enable asynchronous permission graph rebuild.  Must run
   # script/permission-updater.rb as a separate process.  When the permission
   # cache is invalidated, the background process will update the permission
index db9b2255c2e92cb2d5b346d12f35fbb9a43bb95a..ef4e428bff0f97fafa0f0831beb98de52a2a164d 100644 (file)
@@ -27,6 +27,16 @@ Server::Application.configure do
       end
     end
 
+    # Redact new_user_token param in /arvados/v1/users/merge
+    # request. Log the auth UUID instead, if the token exists.
+    if params['new_user_token'].is_a? String
+      params['new_user_token_uuid'] =
+        ApiClientAuthorization.
+          where('api_token = ?', params['new_user_token']).
+          first.andand.uuid
+      params['new_user_token'] = '[...]'
+    end
+
     params_s = SafeJSON.dump(params)
     if params_s.length > Rails.configuration.max_request_log_params_size
       payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
index ad2406ae45a7be049c8122920204ab90542b9f67..b0c09840d790db1d634139bd796691d82f2b7c8c 100644 (file)
@@ -81,6 +81,7 @@ Server::Application.routes.draw do
         post 'setup', on: :collection
         post 'unsetup', on: :member
         post 'update_uuid', on: :member
+        post 'merge', on: :collection
       end
       resources :virtual_machines do
         get 'logins', on: :member
index dfa08db1a9c82cf01e063f7963e8413bb2cccfd3..707c3dd946f377d7ada7513cf12a1ced9d896487 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 require 'migrate_yaml_to_json'
 
 class YamlToJson < ActiveRecord::Migration
index 003e5fb0929edfa490e4be82383f2ca7e0b75acb..921803a2970137c89cf8e19c8fdd329061f09c1c 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 require './db/migrate/20161213172944_full_text_search_indexes'
 
 class JsonCollectionProperties < ActiveRecord::Migration
index d90011c7c1db55ccf9d1c679245155c573fe0cca..aa42423a4dbd7ab9dc2d39ad13ca93e14dba485f 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 class AddIndexToContainers < ActiveRecord::Migration
   def up
     ActiveRecord::Base.connection.execute("CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at desc, uuid asc)")
index b93dc54fcdb4eb4eba384fd57c5477c0a1a363d4..c9e50a64b79e56a64b1d90cfaee7be2c2dbf8900 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 class FixTrashFlagFollow < ActiveRecord::Migration
   def change
     ActiveRecord::Base.connection.execute("DROP MATERIALIZED VIEW materialized_permission_view")
index ce2403e743578f272f34cf360dfb544dc6f2132c..0183ef6dc51dc50d81df2999fa8bdd01539f8c14 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 class AddGinIndexToCollectionProperties < ActiveRecord::Migration
   def up
     ActiveRecord::Base.connection.execute("CREATE INDEX collection_index_on_properties ON collections USING gin (properties);")
index c56b7dcaf730cf715e24f87a717729a243249412..a161f633d8f9a7d5d9a2422650931bbb07c10fec 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 class AddSecretMountsToContainers < ActiveRecord::Migration
   def change
     add_column :container_requests, :secret_mounts, :jsonb, default: {}
index d577cbbb3eed43473484473d6edcd9f9a55e18b4..529126b299a701617ede80a1ba75fed1b05de280 100644 (file)
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 class ChangeContainerPriorityBigint < ActiveRecord::Migration
   def change
     change_column :containers, :priority, :integer, limit: 8
diff --git a/services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb b/services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb
new file mode 100644 (file)
index 0000000..10b35a7
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRedirectToUserUuidToUsers < ActiveRecord::Migration
+  def up
+    add_column :users, :redirect_to_user_uuid, :string
+    User.reset_column_information
+    remove_index :users, name: 'users_search_index'
+    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'
+  end
+
+  def down
+    remove_index :users, name: 'users_search_index'
+    remove_column :users, :redirect_to_user_uuid
+    User.reset_column_information
+    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'
+  end
+end
diff --git a/services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb b/services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb
new file mode 100644 (file)
index 0000000..79e777e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddContainerAuthUuidIndex < ActiveRecord::Migration
+  def change
+    add_index :containers, :auth_uuid
+  end
+end
diff --git a/services/api/db/migrate/20180607175050_properties_to_jsonb.rb b/services/api/db/migrate/20180607175050_properties_to_jsonb.rb
new file mode 100644 (file)
index 0000000..988227a
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class PropertiesToJsonb < ActiveRecord::Migration
+
+  @@tables_columns = [["nodes", "properties"],
+                      ["nodes", "info"],
+                      ["container_requests", "properties"],
+                      ["links", "properties"]]
+
+  def up
+    @@tables_columns.each do |table, column|
+      # Drop the FT index before changing column type to avoid
+      # "PG::DatatypeMismatch: ERROR: COALESCE types jsonb and text
+      # cannot be matched".
+      ActiveRecord::Base.connection.execute "DROP INDEX IF EXISTS #{table}_full_text_search_idx"
+      ActiveRecord::Base.connection.execute "ALTER TABLE #{table} ALTER COLUMN #{column} TYPE jsonb USING #{column}::jsonb"
+      ActiveRecord::Base.connection.execute "CREATE INDEX #{table}_index_on_#{column} ON #{table} USING gin (#{column})"
+    end
+    FullTextSearchIndexes.new.replace_index("container_requests")
+  end
+
+  def down
+    @@tables_columns.each do |table, column|
+      ActiveRecord::Base.connection.execute "DROP INDEX IF EXISTS #{table}_index_on_#{column}"
+      ActiveRecord::Base.connection.execute "ALTER TABLE #{table} ALTER COLUMN #{column} TYPE text"
+    end
+  end
+end
diff --git a/services/api/db/migrate/20180608123145_add_properties_to_groups.rb b/services/api/db/migrate/20180608123145_add_properties_to_groups.rb
new file mode 100644 (file)
index 0000000..12c6696
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class AddPropertiesToGroups < ActiveRecord::Migration
+  def up
+    add_column :groups, :properties, :jsonb, default: {}
+    ActiveRecord::Base.connection.execute("CREATE INDEX group_index_on_properties ON groups USING gin (properties);")
+    FullTextSearchIndexes.new.replace_index('groups')
+  end
+
+  def down
+    ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS group_index_on_properties")
+    remove_column :groups, :properties
+  end
+end
index 27511145e9002f6abf8370d38872af99cf18922c..a201a05aaf83a8efe52469f349e4c84fb75927f3 100644 (file)
@@ -277,7 +277,7 @@ CREATE TABLE container_requests (
     modified_by_user_uuid character varying(255),
     name character varying(255),
     description text,
-    properties text,
+    properties jsonb,
     state character varying(255),
     requesting_container_uuid character varying(255),
     container_uuid character varying(255),
@@ -396,7 +396,8 @@ CREATE TABLE groups (
     group_class character varying(255),
     trash_at timestamp without time zone,
     is_trashed boolean DEFAULT false NOT NULL,
-    delete_at timestamp without time zone
+    delete_at timestamp without time zone,
+    properties jsonb DEFAULT '{}'::jsonb
 );
 
 
@@ -682,7 +683,7 @@ CREATE TABLE links (
     link_class character varying(255),
     name character varying(255),
     head_uuid character varying(255),
-    properties text,
+    properties jsonb,
     updated_at timestamp without time zone NOT NULL
 );
 
@@ -768,7 +769,8 @@ CREATE TABLE users (
     updated_at timestamp without time zone NOT NULL,
     default_owner_uuid character varying(255),
     is_active boolean DEFAULT false,
-    username character varying(255)
+    username character varying(255),
+    redirect_to_user_uuid character varying
 );
 
 
@@ -852,9 +854,9 @@ CREATE TABLE nodes (
     ip_address character varying(255),
     first_ping_at timestamp without time zone,
     last_ping_at timestamp without time zone,
-    info text,
+    info jsonb,
     updated_at timestamp without time zone NOT NULL,
-    properties text,
+    properties jsonb,
     job_uuid character varying(255)
 );
 
@@ -1636,7 +1638,14 @@ CREATE INDEX collections_search_index ON collections USING btree (owner_uuid, mo
 -- Name: container_requests_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
+CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
+
+
+--
+-- Name: container_requests_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX container_requests_index_on_properties ON container_requests USING gin (properties);
 
 
 --
@@ -1653,11 +1662,18 @@ CREATE INDEX container_requests_search_index ON container_requests USING btree (
 CREATE INDEX containers_search_index ON containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
 
 
+--
+-- Name: group_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX group_index_on_properties ON groups USING gin (properties);
+
+
 --
 -- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text)));
+CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))));
 
 
 --
@@ -1877,6 +1893,13 @@ CREATE INDEX index_container_requests_on_requesting_container_uuid ON container_
 CREATE UNIQUE INDEX index_container_requests_on_uuid ON container_requests USING btree (uuid);
 
 
+--
+-- Name: index_containers_on_auth_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_auth_uuid ON containers USING btree (auth_uuid);
+
+
 --
 -- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
 --
@@ -2605,6 +2628,13 @@ CREATE INDEX keep_disks_search_index ON keep_disks USING btree (uuid, owner_uuid
 CREATE INDEX keep_services_search_index ON keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
 
 
+--
+-- Name: links_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX links_index_on_properties ON links USING gin (properties);
+
+
 --
 -- Name: links_search_index; Type: INDEX; Schema: public; Owner: -
 --
@@ -2626,6 +2656,20 @@ CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON links USING btr
 CREATE INDEX logs_search_index ON logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
 
 
+--
+-- Name: nodes_index_on_info; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX nodes_index_on_info ON nodes USING gin (info);
+
+
+--
+-- Name: nodes_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX nodes_index_on_properties ON nodes USING gin (properties);
+
+
 --
 -- Name: nodes_search_index; Type: INDEX; Schema: public; Owner: -
 --
@@ -2714,7 +2758,7 @@ CREATE UNIQUE INDEX unique_schema_migrations ON schema_migrations USING btree (v
 -- Name: users_search_index; Type: INDEX; Schema: public; Owner: -
 --
 
-CREATE INDEX users_search_index ON users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username);
+CREATE INDEX users_search_index ON users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
 
 
 --
@@ -3068,3 +3112,10 @@ INSERT INTO schema_migrations (version) VALUES ('20180228220311');
 
 INSERT INTO schema_migrations (version) VALUES ('20180313180114');
 
+INSERT INTO schema_migrations (version) VALUES ('20180501182859');
+
+INSERT INTO schema_migrations (version) VALUES ('20180514135529');
+
+INSERT INTO schema_migrations (version) VALUES ('20180608123145');
+
+INSERT INTO schema_migrations (version) VALUES ('20180607175050');
index abcf40ee3666622d9b2c0209531cd733c730f783..335608b2b6611eaac1eba516219d457f549c6862 100644 (file)
@@ -15,6 +15,7 @@ class AppVersion
 
   def self.forget
     @hash = nil
+    @package_version = nil
   end
 
   # Return abbrev commit hash for current code version: "abc1234", or
@@ -53,4 +54,18 @@ class AppVersion
 
     @hash || "unknown"
   end
+
+  def self.package_version
+    if (cached = Rails.configuration.package_version || @package_version)
+      return cached
+    end
+
+    begin
+      @package_version = IO.read(Rails.root.join("package-build.version")).strip
+    rescue Errno::ENOENT
+      @package_version = "unknown"
+    end
+
+    @package_version
+  end
 end
index 3cabc1e3ce75842d6e187a7f99ab6a12dd510d84..73ad7606cc879ef58f7569c960196191c7fb7721 100644 (file)
@@ -297,7 +297,7 @@ class CrunchDispatch
     @fetched_commits[sha1] = ($? == 0)
   end
 
-  def tag_commit(commit_hash, tag_name)
+  def tag_commit(job, commit_hash, tag_name)
     # @git_tags[T]==V if we know commit V has been tagged T in the
     # arvados_internal repository.
     if not @git_tags[tag_name]
@@ -381,20 +381,20 @@ class CrunchDispatch
           next
         end
         ready &&= get_commit repo.server_path, job.script_version
-        ready &&= tag_commit job.script_version, job.uuid
+        ready &&= tag_commit job, job.script_version, job.uuid
       end
 
       # This should be unnecessary, because API server does it during
       # job create/update, but it's still not a bad idea to verify the
       # tag is correct before starting the job:
-      ready &&= tag_commit job.script_version, job.uuid
+      ready &&= tag_commit job, job.script_version, job.uuid
 
       # The arvados_sdk_version doesn't support use of arbitrary
       # remote URLs, so the requested version isn't necessarily copied
       # into the internal repository yet.
       if job.arvados_sdk_version
         ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
-        ready &&= tag_commit job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
+        ready &&= tag_commit job, job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
       end
 
       if not ready
diff --git a/services/api/lib/update_priority.rb b/services/api/lib/update_priority.rb
new file mode 100644 (file)
index 0000000..21cd74b
--- /dev/null
@@ -0,0 +1,57 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module UpdatePriority
+  extend CurrentApiClient
+
+  # Clean up after races.
+  #
+  # If container priority>0 but there are no committed container
+  # requests for it, reset priority to 0.
+  #
+  # If container priority=0 but there are committed container requests
+  # for it with priority>0, update priority.
+  def self.update_priority
+    if !File.owned?(Rails.root.join('tmp'))
+      Rails.logger.warn("UpdatePriority: not owner of #{Rails.root}/tmp, skipping")
+      return
+    end
+    lockfile = Rails.root.join('tmp', 'update_priority.lock')
+    File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
+      return unless f.flock(File::LOCK_NB|File::LOCK_EX)
+
+      # priority>0 but should be 0:
+      ActiveRecord::Base.connection.
+        exec_query("UPDATE containers AS c SET priority=0 WHERE state IN ('Queued', 'Locked', 'Running') AND priority>0 AND uuid NOT IN (SELECT container_uuid FROM container_requests WHERE priority>0 AND state='Committed');", 'UpdatePriority')
+
+      # priority==0 but should be >0:
+      act_as_system_user do
+        Container.
+          joins("JOIN container_requests ON container_requests.container_uuid=containers.uuid AND container_requests.state=#{Container.sanitize(ContainerRequest::Committed)} AND container_requests.priority>0").
+          where('containers.state IN (?) AND containers.priority=0 AND container_requests.uuid IS NOT NULL',
+                [Container::Queued, Container::Locked, Container::Running]).
+          map(&:update_priority!)
+      end
+    end
+  end
+
+  def self.run_update_thread
+    need = false
+    Rails.cache.fetch('UpdatePriority', expires_in: 5.seconds) do
+      need = true
+    end
+    return if !need
+
+    Thread.new do
+      Thread.current.abort_on_exception = false
+      begin
+        update_priority
+      rescue => e
+        Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
+      ensure
+        ActiveRecord::Base.connection.close
+      end
+    end
+  end
+end
index f25d4238106697002a692c552e0b300b4d90067a..17aed4b48dba66b079431007408dae49ee6442cf 100644 (file)
@@ -6,7 +6,7 @@ module WhitelistUpdate
   def check_update_whitelist permitted_fields
     attribute_names.each do |field|
       if !permitted_fields.include?(field.to_sym) && really_changed(field)
-        errors.add field, "cannot be modified in this state (#{send(field+"_was").inspect}, #{send(field).inspect})"
+        errors.add field, "cannot be modified in state '#{self.state}' (#{send(field+"_was").inspect}, #{send(field).inspect})"
       end
     end
   end
index 8d9fc53c04d2c6bc4ccaea2009d72a178c6c11c7..92bd7cf872cfeca1c53d38c5ea05d7836e929f4f 100644 (file)
@@ -183,6 +183,13 @@ inactive_uninvited:
   api_token: 62mhllc0otp78v08e3rpa3nsmf8q8ogk47f7u5z4erp5gpj9al
   expires_at: 2038-01-01 00:00:00
 
+inactive_uninvited_trustedclient:
+  uuid: zzzzz-gj3su-228z32aux8dg2s1
+  api_client: trusted_workbench
+  user: inactive_uninvited
+  api_token: 7s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
+  expires_at: 2038-01-01 00:00:00
+
 inactive_but_signed_user_agreement:
   uuid: zzzzz-gj3su-247z32aux8dg2s1
   api_client: untrusted
index 807047e53ab40d07ab28b875b57a0c90a5b22096..7ff67f82ee9f0fa1f5d1bdf0f3b45b02b3b7fae8 100644 (file)
@@ -533,7 +533,7 @@ replication_desired_2_confirmed_2:
   replication_confirmed: 2
   updated_at: 2015-02-07 00:24:52.983381227 Z
   uuid: zzzzz-4zz18-434zv1tnnf2rygp
-  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 3:6:bar\n"
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 3:3:bar\n"
   name: replication want=2 have=2
 
 storage_classes_desired_default_unconfirmed:
index 29ce4f5aea5ffb29489f38fc49e9235bfa979b00..43a4a13d4e7964b3562d6a0558b70185af5d0f39 100644 (file)
@@ -106,7 +106,7 @@ completed-older:
   name: completed
   state: Final
   priority: 1
-  created_at: 2016-01-11 11:11:11.111111111 Z
+  created_at: <%= 30.minute.ago.to_s(:db) %>
   updated_at: 2016-01-11 11:11:11.111111111 Z
   modified_at: 2016-01-11 11:11:11.111111111 Z
   modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -164,7 +164,7 @@ cr_for_requester2:
   name: requester_cr2
   state: Final
   priority: 1
-  created_at: 2016-01-11 11:11:11.111111111 Z
+  created_at: <%= 30.minute.ago.to_s(:db) %>
   updated_at: 2016-01-11 11:11:11.111111111 Z
   modified_at: 2016-01-11 11:11:11.111111111 Z
   modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
index 8fb800c5f94f8a93bdc2f3990282ea76df7bb51b..8d2586921958570d97104b3fdd8bcefb8e51112f 100644 (file)
@@ -365,3 +365,37 @@ permission_perftest:
       organization: example.com
       role: IT
     getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+redirects_to_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-1au3is3g3chtthd
+  email: redirects-to-active-user@arvados.local
+  first_name: Active2
+  last_name: User2
+  identity_url: https://redirects-to-active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: redirect_active
+  redirect_to_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+double_redirects_to_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-oiusowoxoz0pk3p
+  email: double-redirects-to-active-user@arvados.local
+  first_name: Active3
+  last_name: User3
+  identity_url: https://double-redirects-to-active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: double_redirect_active
+  redirect_to_user_uuid: zzzzz-tpzed-1au3is3g3chtthd
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
index c15060d1a9847cf33f774399b6decf7ff8f96b45..40868c87b8857ba34cd5aff2bfbf065506d50b00 100644 (file)
@@ -31,17 +31,29 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
     assert_includes discovery_doc, 'defaultTrashLifetime'
     assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
     assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
+    assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
+    assert_match(/^unknown$/, discovery_doc['packageVersion'])
     assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
     assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
     assert_equal('zzzzz', discovery_doc['uuidPrefix'])
   end
 
-  test "discovery document overrides source_version with config" do
+  test "discovery document overrides source_version & sourceVersion with config" do
     Rails.configuration.source_version = 'aaa888fff'
     get :index
     assert_response :success
     discovery_doc = JSON.parse(@response.body)
+    # Key source_version will be replaced with sourceVersion
     assert_equal 'aaa888fff', discovery_doc['source_version']
+    assert_equal 'aaa888fff', discovery_doc['sourceVersion']
+  end
+
+  test "discovery document overrides packageVersion with config" do
+    Rails.configuration.package_version = '1.0.0-stable'
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_equal '1.0.0-stable', discovery_doc['packageVersion']
   end
 
   test "empty disable_api_methods" do
index a50648617fd59aea8fb1bdb39c7066b3b4e60974..b01597c05bf0280ea6cc6fa052ba98ff70526994 100644 (file)
@@ -815,9 +815,126 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     end
   end
 
+  test "refuse to merge with redirect_to_user_uuid=false (not yet supported)" do
+    authorize_with :project_viewer_trustedclient
+    post :merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: false,
+         }
+    assert_response(422)
+  end
+
+  test "refuse to merge user into self" do
+    authorize_with(:active_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  [[:active, :project_viewer_trustedclient],
+   [:active_trustedclient, :project_viewer]].each do |src, dst|
+    test "refuse to merge with untrusted token (#{src} -> #{dst})" do
+      authorize_with(src)
+      post(:merge, {
+             new_user_token: api_client_authorizations(dst).api_token,
+             new_owner_uuid: api_client_authorizations(dst).user.uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(403)
+    end
+  end
+
+  [[:expired_trustedclient, :project_viewer_trustedclient],
+   [:project_viewer_trustedclient, :expired_trustedclient]].each do |src, dst|
+    test "refuse to merge with expired token (#{src} -> #{dst})" do
+      authorize_with(src)
+      post(:merge, {
+             new_user_token: api_client_authorizations(dst).api_token,
+             new_owner_uuid: api_client_authorizations(dst).user.uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(401)
+    end
+  end
+
+  [['src', :active_trustedclient],
+   ['dst', :project_viewer_trustedclient]].each do |which_scoped, auth|
+    test "refuse to merge with scoped #{which_scoped} token" do
+      act_as_system_user do
+        api_client_authorizations(auth).update_attributes(scopes: ["GET /", "POST /", "PUT /"])
+      end
+      authorize_with(:active_trustedclient)
+      post(:merge, {
+             new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,
+             new_owner_uuid: users(:project_viewer).uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(403)
+    end
+  end
+
+  test "refuse to merge if new_owner_uuid is not writable" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: groups(:anonymously_accessible_project).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(403)
+  end
+
+  test "refuse to merge if new_owner_uuid is empty" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: "",
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  test "refuse to merge if new_owner_uuid is not provided" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  test "refuse to update redirect_to_user_uuid directly" do
+    authorize_with(:active_trustedclient)
+    patch(:update, {
+            id: users(:active).uuid,
+            user: {
+              redirect_to_user_uuid: users(:active).uuid,
+            },
+          })
+    assert_response(403)
+  end
+
+  test "merge 'project_viewer' account into 'active' account" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(:success)
+    assert_equal(users(:project_viewer).redirect_to_user_uuid, users(:active).uuid)
+
+    auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
+    assert_not_nil(auth)
+    assert_not_nil(auth.user)
+    assert_equal(users(:active).uuid, auth.user.uuid)
+  end
 
   NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
-                         "last_name"].sort
+                         "last_name", "username"].sort
 
   def check_non_admin_index
     assert_response :success
index 6d7f4a0616e4068956c050b3db84f504b2e34ef3..c38c230b2276609c6ce21ccf581f4e710854167d 100644 (file)
@@ -85,6 +85,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     assert_response :success
     assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
     assert_equal false, json_response['is_admin']
+    assert_equal false, json_response['is_active']
     assert_equal 'foo@example.com', json_response['email']
     assert_equal 'barney', json_response['username']
 
@@ -218,4 +219,36 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     refute_includes(group_uuids, groups(:trashed_project).uuid)
     refute_includes(group_uuids, groups(:testusergroup_admins).uuid)
   end
+
+  test 'auto-activate user from trusted cluster' do
+    Rails.configuration.auto_activate_users_from = ['zbbbb']
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal false, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
+  test 'pre-activate remote user' do
+    post '/arvados/v1/users', {
+           "user" => {
+             "uuid" => "zbbbb-tpzed-000000000000000",
+             "email" => 'foo@example.com',
+             "username" => 'barney',
+             "is_active" => true
+           }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(:admin)}"}
+    assert_response :success
+
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal nil, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
 end
index 6f9cf7edcbb6bb13b561cba11cc772c1ff7be097..0497c6a7d56294ae3d0841db5acd8ef9a441d809 100644 (file)
@@ -9,7 +9,7 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
     'https://wb.example.com'
   end
 
-  def mock_auth_with(email: nil, username: nil)
+  def mock_auth_with(email: nil, username: nil, identity_url: nil)
     mock = {
       'provider' => 'josh_id',
       'uid' => 'https://edward.example.com',
@@ -22,6 +22,7 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
     }
     mock['info']['email'] = email unless email.nil?
     mock['info']['username'] = username unless username.nil?
+    mock['info']['identity_url'] = identity_url unless identity_url.nil?
     post('/auth/josh_id/callback',
          {return_to: client_url},
          {'omniauth.auth' => mock})
@@ -40,6 +41,24 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
     assert_equal 'foo', u.username
   end
 
+  test 'existing user login' do
+    mock_auth_with(identity_url: "https://active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
+  test 'user redirect_to_user_uuid' do
+    mock_auth_with(identity_url: "https://redirects-to-active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
+  test 'user double redirect_to_user_uuid' do
+    mock_auth_with(identity_url: "https://double-redirects-to-active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
   test 'create new user during omniauth callback' do
     mock_auth_with(email: 'edward@example.com')
     assert_equal(0, @response.redirect_url.index(client_url),
index 8ddab3fee1eb6963dff5c34b3f2788fa09bcef1e..28e43b84506f492b7c3f88c73a0b461eda40ea51 100644 (file)
@@ -216,4 +216,39 @@ class UsersTest < ActionDispatch::IntegrationTest
     end
     nil
   end
+
+  test 'merge active into project_viewer account' do
+    post('/arvados/v1/groups', {
+           group: {
+             group_class: 'project',
+             name: "active user's stuff",
+           },
+         }, auth(:project_viewer))
+    assert_response(:success)
+    project_uuid = json_response['uuid']
+
+    post('/arvados/v1/users/merge', {
+           new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,
+           new_owner_uuid: project_uuid,
+           redirect_to_new_user: true,
+         }, auth(:active_trustedclient))
+    assert_response(:success)
+
+    get('/arvados/v1/users/current', {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['uuid'])
+
+    get('/arvados/v1/authorized_keys/' + authorized_keys(:active).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
+    assert_equal(users(:project_viewer).uuid, json_response['authorized_user_uuid'])
+
+    get('/arvados/v1/repositories/' + repositories(:foo).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
+
+    get('/arvados/v1/groups/' + groups(:aproject).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(project_uuid, json_response['owner_uuid'])
+  end
 end
index 923083832c658627f02b6001b8f71ccfd47f6a59..d07027721f603565d3d6c66838fdd5ad666b95da 100644 (file)
@@ -99,7 +99,7 @@ class ArvadosModelTest < ActiveSupport::TestCase
                         properties: {'foo' => 'bar'}.with_indifferent_access)
     raw = ActiveRecord::Base.connection.
       select_value("select properties from links where uuid='#{link.uuid}'")
-    assert_equal '{"foo":"bar"}', raw
+    assert_equal '{"foo": "bar"}', raw
   end
 
   test "store long string" do
index cc257ccf486dcf65ed6e503022ce7df18e556454..f266c096b475ca6306c9086d22029bdc6e22cb3e 100644 (file)
@@ -261,10 +261,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
 
     c = Container.find_by_uuid cr.container_uuid
     assert_operator 0, :<, c.priority
+    lock_and_run(c)
 
-    cr2 = create_minimal_req!
-    cr2.update_attributes!(priority: 10, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"], container_count_max: 1)
-    cr2.reload
+    cr2 = with_container_auth(c) do
+      create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+    end
+    assert_not_nil cr2.requesting_container_uuid
     assert_equal users(:active).uuid, cr2.modified_by_user_uuid
 
     c2 = Container.find_by_uuid cr2.container_uuid
@@ -613,7 +615,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
 
   test "requesting_container_uuid at create is not allowed" do
     set_user_from_auth :active
-    assert_raises(ActiveRecord::RecordNotSaved) do
+    assert_raises(ActiveRecord::RecordInvalid) do
       create_minimal_req!(state: "Uncommitted", priority: 1, requesting_container_uuid: 'youcantdothat')
     end
   end
@@ -755,12 +757,109 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal ContainerRequest::Final, cr3.state
   end
 
+  [
+    [false, ActiveRecord::RecordInvalid],
+    [true, nil],
+  ].each do |preemptible_conf, expected|
+    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+      sp = {"preemptible" => true}
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      scheduling_parameters: sp,
+                      mounts: {"test" => {"kind" => "json"}}}
+      Rails.configuration.preemptible_instances = preemptible_conf
+      set_user_from_auth :active
+
+      cr = create_minimal_req!(common_attrs)
+      cr.state = ContainerRequest::Committed
+
+      if !expected.nil?
+        assert_raises(expected) do
+          cr.save!
+        end
+      else
+        cr.save!
+        assert_equal sp, cr.scheduling_parameters
+      end
+    end
+  end
+
+  [
+    'zzzzz-dz642-runningcontainr',
+    nil,
+  ].each do |requesting_c|
+    test "having preemptible instances active on the API server, a committed #{requesting_c.nil? ? 'non-':''}child CR should not ask for preemptible instance if parameter already set to false" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      scheduling_parameters: {"preemptible" => false},
+                      mounts: {"test" => {"kind" => "json"}}}
+
+      Rails.configuration.preemptible_instances = true
+      set_user_from_auth :active
+
+      if requesting_c
+        cr = with_container_auth(Container.find_by_uuid requesting_c) do
+          create_minimal_req!(common_attrs)
+        end
+        assert_not_nil cr.requesting_container_uuid
+      else
+        cr = create_minimal_req!(common_attrs)
+      end
+
+      cr.state = ContainerRequest::Committed
+      cr.save!
+
+      assert_equal false, cr.scheduling_parameters['preemptible']
+    end
+  end
+
+  [
+    [true, 'zzzzz-dz642-runningcontainr', true],
+    [true, nil, nil],
+    [false, 'zzzzz-dz642-runningcontainr', nil],
+    [false, nil, nil],
+  ].each do |preemptible_conf, requesting_c, schedule_preemptible|
+    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      mounts: {"test" => {"kind" => "json"}}}
+
+      Rails.configuration.preemptible_instances = preemptible_conf
+      set_user_from_auth :active
+
+      if requesting_c
+        cr = with_container_auth(Container.find_by_uuid requesting_c) do
+          create_minimal_req!(common_attrs)
+        end
+        assert_not_nil cr.requesting_container_uuid
+      else
+        cr = create_minimal_req!(common_attrs)
+      end
+
+      cr.state = ContainerRequest::Committed
+      cr.save!
+
+      assert_equal schedule_preemptible, cr.scheduling_parameters['preemptible']
+    end
+  end
+
   [
     [{"partitions" => ["fastcpu","vfastcpu", 100]}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
     [{"partitions" => ["fastcpu","vfastcpu", 100]}, ContainerRequest::Uncommitted],
     [{"partitions" => "fastcpu"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
     [{"partitions" => "fastcpu"}, ContainerRequest::Uncommitted],
     [{"partitions" => ["fastcpu","vfastcpu"]}, ContainerRequest::Committed],
+    [{"max_run_time" => "one day"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => "one day"}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => -1}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => -1}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => 86400}, ContainerRequest::Committed],
   ].each do |sp, state, expected|
     test "create container request with scheduling_parameters #{sp} in state #{state} and verify #{expected}" do
       common_attrs = {cwd: "test",
@@ -787,6 +886,26 @@ class ContainerRequestTest < ActiveSupport::TestCase
     end
   end
 
+  test "Having preemptible_instances=true create a committed child container request and verify the scheduling parameter of its container" do
+    common_attrs = {cwd: "test",
+                    priority: 1,
+                    command: ["echo", "hello"],
+                    output_path: "test",
+                    state: ContainerRequest::Committed,
+                    mounts: {"test" => {"kind" => "json"}}}
+    set_user_from_auth :active
+    Rails.configuration.preemptible_instances = true
+
+    cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
+      create_minimal_req!(common_attrs)
+    end
+    assert_equal 'zzzzz-dz642-runningcontainr', cr.requesting_container_uuid
+    assert_equal true, cr.scheduling_parameters["preemptible"]
+
+    c = Container.find_by_uuid(cr.container_uuid)
+    assert_equal true, c.scheduling_parameters["preemptible"]
+  end
+
   [['Committed', true, {name: "foobar", priority: 123}],
    ['Committed', false, {container_count: 2}],
    ['Committed', false, {container_count: 0}],
diff --git a/services/api/test/unit/update_priority_test.rb b/services/api/test/unit/update_priority_test.rb
new file mode 100644 (file)
index 0000000..2d28d3f
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'update_priority'
+
+class UpdatePriorityTest < ActiveSupport::TestCase
+  test 'priority 0 but should be >0' do
+    uuid = containers(:running).uuid
+    ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+
+    uuid = containers(:queued).uuid
+    ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+  end
+
+  test 'priority>0 but should be 0' do
+    uuid = containers(:running).uuid
+    ActiveRecord::Base.connection.exec_query('DELETE FROM container_requests WHERE container_uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+  end
+end
index 72beca6c78134dbe92bd9ce4b65d8b3e70c6d530..67c410047cfb5e62ba65be801a46bd20b721971d 100644 (file)
@@ -643,11 +643,11 @@ class UserTest < ActiveSupport::TestCase
     assert_equal(expect_username, user.username)
 
     # check user setup
-    verify_link_exists(Rails.configuration.auto_setup_new_users,
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
                        groups(:all_users).uuid, user.uuid,
                        "permission", "can_read")
     # Check for OID login link.
-    verify_link_exists(Rails.configuration.auto_setup_new_users,
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
                        user.uuid, user.email, "permission", "can_login")
     # Check for repository.
     if named_repo = (prior_repo or
index 23e4b3a8cb456aac06f644c0219148967586fe71..d1f19dd7b5702e2431471bc7ce2164f553a8cb11 100644 (file)
@@ -7,6 +7,7 @@ package main
 // Dispatcher service for Crunch that submits containers to the slurm queue.
 
 import (
+       "bytes"
        "context"
        "flag"
        "fmt"
@@ -202,7 +203,7 @@ func (disp *Dispatcher) checkSqueueForOrphans() {
        }
 }
 
-func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
+func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
        mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM+disp.ReserveExtraRAM) / float64(1048576)))
 
        var disk int64
@@ -212,29 +213,36 @@ func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error
                }
        }
        disk = int64(math.Ceil(float64(disk) / float64(1048576)))
-
-       var sbatchArgs []string
-       sbatchArgs = append(sbatchArgs, disp.SbatchArguments...)
-       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
-       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem=%d", mem))
-       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
-       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--tmp=%d", disk))
-       sbatchArgs = append(sbatchArgs, fmt.Sprintf("--nice=%d", initialNiceValue))
-       if len(container.SchedulingParameters.Partitions) > 0 {
-               sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
+       return []string{
+               fmt.Sprintf("--mem=%d", mem),
+               fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
+               fmt.Sprintf("--tmp=%d", disk),
        }
+}
+
+func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
+       var args []string
+       args = append(args, disp.SbatchArguments...)
+       args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue))
 
        if disp.cluster == nil {
                // no instance types configured
+               args = append(args, disp.slurmConstraintArgs(container)...)
        } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
                // ditto
+               args = append(args, disp.slurmConstraintArgs(container)...)
        } else if err != nil {
                return nil, err
        } else {
-               sbatchArgs = append(sbatchArgs, "--constraint=instancetype="+it.Name)
+               // use instancetype constraint instead of slurm mem/cpu/tmp specs
+               args = append(args, "--constraint=instancetype="+it.Name)
        }
 
-       return sbatchArgs, nil
+       if len(container.SchedulingParameters.Partitions) > 0 {
+               args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
+       }
+
+       return args, nil
 }
 
 func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
@@ -267,8 +275,21 @@ func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
                log.Printf("Submitting container %s to slurm", ctr.UUID)
                if err := disp.submit(ctr, disp.CrunchRunCommand); err != nil {
                        var text string
-                       if err == dispatchcloud.ErrConstraintsNotSatisfiable {
-                               text = fmt.Sprintf("cannot run container %s: %s", ctr.UUID, err)
+                       if err, ok := err.(dispatchcloud.ConstraintsNotSatisfiableError); ok {
+                               var logBuf bytes.Buffer
+                               fmt.Fprintf(&logBuf, "cannot run container %s: %s\n", ctr.UUID, err)
+                               if len(err.AvailableTypes) == 0 {
+                                       fmt.Fprint(&logBuf, "No instance types are configured.\n")
+                               } else {
+                                       fmt.Fprint(&logBuf, "Available instance types:\n")
+                                       for _, t := range err.AvailableTypes {
+                                               fmt.Fprintf(&logBuf,
+                                                       "Type %q: %d VCPUs, %d RAM, %d Scratch, %f Price\n",
+                                                       t.Name, t.VCPUs, t.RAM, t.Scratch, t.Price,
+                                               )
+                                       }
+                               }
+                               text = logBuf.String()
                                disp.UpdateState(ctr.UUID, dispatch.Cancelled)
                        } else {
                                text = fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
index 508a0dfc1387bafcca41b6b8d851f0960d3fc9b9..1509d7ad8b33fd24f56e80f64ad85d929b940bdb 100644 (file)
@@ -19,6 +19,7 @@ Type=notify
 ExecStart=/usr/bin/crunch-dispatch-slurm
 Restart=always
 RestartSec=1
+LimitNOFILE=1000000
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 499f6d5d7275cb609f48cf40c50de381210997ac..23a8a0ca01124df89575c5724a4b6cb6650527fb 100644 (file)
@@ -193,10 +193,11 @@ func (s *IntegrationSuite) TestMissingFromSqueue(c *C) {
        container := s.integrationTest(c,
                [][]string{{
                        fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
+                       fmt.Sprintf("--nice=%d", 10000),
                        fmt.Sprintf("--mem=%d", 11445),
                        fmt.Sprintf("--cpus-per-task=%d", 4),
                        fmt.Sprintf("--tmp=%d", 45777),
-                       fmt.Sprintf("--nice=%d", 10000)}},
+               }},
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
                        dispatcher.UpdateState(container.UUID, dispatch.Running)
                        time.Sleep(3 * time.Second)
@@ -208,7 +209,7 @@ func (s *IntegrationSuite) TestMissingFromSqueue(c *C) {
 func (s *IntegrationSuite) TestSbatchFail(c *C) {
        s.slurm = slurmFake{errBatch: errors.New("something terrible happened")}
        container := s.integrationTest(c,
-               [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--mem=11445", "--cpus-per-task=4", "--tmp=45777", "--nice=10000"}},
+               [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--nice=10000", "--mem=11445", "--cpus-per-task=4", "--tmp=45777"}},
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
                        dispatcher.UpdateState(container.UUID, dispatch.Running)
                        dispatcher.UpdateState(container.UUID, dispatch.Complete)
@@ -353,7 +354,7 @@ func (s *StubbedSuite) TestSbatchArgs(c *C) {
                s.disp.SbatchArguments = defaults
 
                args, err := s.disp.sbatchArgs(container)
-               c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=10000"))
+               c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--nice=10000", "--mem=239", "--cpus-per-task=2", "--tmp=0"))
                c.Check(err, IsNil)
        }
 }
@@ -366,40 +367,42 @@ func (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {
        }
 
        for _, trial := range []struct {
-               types      []arvados.InstanceType
+               types      map[string]arvados.InstanceType
                sbatchArgs []string
                err        error
        }{
                // Choose node type => use --constraint arg
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
-                               {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
-                               {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
-                               {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny":   {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                               "a1.small":  {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
+                               "a1.medium": {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
+                               "a1.large":  {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
                        },
                        sbatchArgs: []string{"--constraint=instancetype=a1.medium"},
                },
                // No node types configured => no slurm constraint
                {
                        types:      nil,
-                       sbatchArgs: nil,
+                       sbatchArgs: []string{"--mem=239", "--cpus-per-task=2", "--tmp=0"},
                },
                // No node type is big enough => error
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny": {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
                        },
-                       err: dispatchcloud.ErrConstraintsNotSatisfiable,
+                       err: dispatchcloud.ConstraintsNotSatisfiableError{},
                },
        } {
                c.Logf("%#v", trial)
                s.disp.cluster = &arvados.Cluster{InstanceTypes: trial.types}
 
                args, err := s.disp.sbatchArgs(container)
-               c.Check(err, Equals, trial.err)
+               c.Check(err == nil, Equals, trial.err == nil)
                if trial.err == nil {
-                       c.Check(args, DeepEquals, append([]string{"--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=10000"}, trial.sbatchArgs...))
+                       c.Check(args, DeepEquals, append([]string{"--job-name=123", "--nice=10000"}, trial.sbatchArgs...))
+               } else {
+                       c.Check(len(err.(dispatchcloud.ConstraintsNotSatisfiableError).AvailableTypes), Equals, len(trial.types))
                }
        }
 }
@@ -414,7 +417,8 @@ func (s *StubbedSuite) TestSbatchPartition(c *C) {
 
        args, err := s.disp.sbatchArgs(container)
        c.Check(args, DeepEquals, []string{
-               "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--nice=10000",
+               "--job-name=123", "--nice=10000",
+               "--mem=239", "--cpus-per-task=1", "--tmp=0",
                "--partition=blurb,b2",
        })
        c.Check(err, IsNil)
index ee79c6f774c1ca4cb277f1c356ebca792d790f49..742943f197580e186e7fd1f7b8084a1357f3661d 100644 (file)
@@ -157,7 +157,7 @@ func (sqc *SqueueChecker) check() {
                replacing.nice = n
                newq[uuid] = replacing
 
-               if state == "PENDING" && reason == "BadConstraints" && p == 0 && replacing.wantPriority > 0 {
+               if state == "PENDING" && ((reason == "BadConstraints" && p == 0) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
                        // When using SLURM 14.x or 15.x, our queued
                        // jobs land in this state when "scontrol
                        // reconfigure" invalidates their feature
@@ -171,7 +171,14 @@ func (sqc *SqueueChecker) check() {
                        // reappeared, so rather than second-guessing
                        // whether SLURM is ready, we just keep trying
                        // this until it works.
+                       //
+                       // "launch failed requeued held" seems to be
+                       // another manifestation of this problem,
+                       // resolved the same way.
+                       log.Printf("releasing held job %q", uuid)
                        sqc.Slurm.Release(uuid)
+               } else if p < 1<<20 && replacing.wantPriority > 0 {
+                       log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
                }
        }
        sqc.queue = newq
diff --git a/services/crunch-run/copier.go b/services/crunch-run/copier.go
new file mode 100644 (file)
index 0000000..4c45f6a
--- /dev/null
@@ -0,0 +1,357 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+       "sort"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+)
+
+type printfer interface {
+       Printf(string, ...interface{})
+}
+
+var errTooManySymlinks = errors.New("too many symlinks, or symlink cycle")
+
+const limitFollowSymlinks = 10
+
+type filetodo struct {
+       src  string
+       dst  string
+       size int64
+}
+
+// copier copies data from a finished container's output path to a new
+// Arvados collection.
+//
+// Regular files (and symlinks to regular files) in hostOutputDir are
+// copied from the local filesystem.
+//
+// Symlinks to mounted collections, and any collections mounted under
+// ctrOutputDir, are copied by transforming the relevant parts of the
+// existing manifests, without moving any data around.
+//
+// Symlinks to other parts of the container's filesystem result in
+// errors.
+//
+// Use:
+//
+//     manifest, err := (&copier{...}).Copy()
+type copier struct {
+       client        *arvados.Client
+       arvClient     IArvadosClient
+       keepClient    IKeepClient
+       hostOutputDir string
+       ctrOutputDir  string
+       binds         []string
+       mounts        map[string]arvados.Mount
+       secretMounts  map[string]arvados.Mount
+       logger        printfer
+
+       dirs     []string
+       files    []filetodo
+       manifest string
+
+       manifestCache map[string]*manifest.Manifest
+}
+
+// Copy copies data as needed, and returns a new manifest.
+func (cp *copier) Copy() (string, error) {
+       err := cp.walkMount("", cp.ctrOutputDir, limitFollowSymlinks, true)
+       if err != nil {
+               return "", err
+       }
+       fs, err := (&arvados.Collection{ManifestText: cp.manifest}).FileSystem(cp.client, cp.keepClient)
+       if err != nil {
+               return "", err
+       }
+       for _, d := range cp.dirs {
+               err = fs.Mkdir(d, 0777)
+               if err != nil {
+                       return "", err
+               }
+       }
+       for _, f := range cp.files {
+               err = cp.copyFile(fs, f)
+               if err != nil {
+                       return "", err
+               }
+       }
+       return fs.MarshalManifest(".")
+}
+
+func (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) error {
+       cp.logger.Printf("copying %q (%d bytes)", f.dst, f.size)
+       dst, err := fs.OpenFile(f.dst, os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return err
+       }
+       src, err := os.Open(f.src)
+       if err != nil {
+               dst.Close()
+               return err
+       }
+       defer src.Close()
+       _, err = io.Copy(dst, src)
+       if err != nil {
+               dst.Close()
+               return err
+       }
+       return dst.Close()
+}
+
+// Append to cp.manifest, cp.files, and cp.dirs so as to copy src (an
+// absolute path in the container's filesystem) to dest (an absolute
+// path in the output collection, or "" for output root).
+//
+// src must be (or be a descendant of) a readonly "collection" mount,
+// a writable collection mounted at ctrOutputPath, or a "tmp" mount.
+//
+// If walkMountsBelow is true, include contents of any collection
+// mounted below src as well.
+func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow bool) error {
+       // srcRoot, srcMount indicate the innermost mount that
+       // contains src.
+       var srcRoot string
+       var srcMount arvados.Mount
+       for root, mnt := range cp.mounts {
+               if len(root) > len(srcRoot) && strings.HasPrefix(src+"/", root+"/") {
+                       srcRoot, srcMount = root, mnt
+               }
+       }
+       for root := range cp.secretMounts {
+               if len(root) > len(srcRoot) && strings.HasPrefix(src+"/", root+"/") {
+                       // Silently omit secrets, and symlinks to
+                       // secrets.
+                       return nil
+               }
+       }
+       if srcRoot == "" {
+               return fmt.Errorf("cannot output file %q: not in any mount", src)
+       }
+
+       // srcRelPath is the path to the file/dir we are trying to
+       // copy, relative to its mount point -- ".", "./foo.txt", ...
+       srcRelPath := filepath.Join(".", srcMount.Path, src[len(srcRoot):])
+
+       switch {
+       case srcMount.ExcludeFromOutput:
+       case srcMount.Kind == "tmp":
+               // Handle by walking the host filesystem.
+               return cp.walkHostFS(dest, src, maxSymlinks, walkMountsBelow)
+       case srcMount.Kind != "collection":
+               return fmt.Errorf("%q: unsupported mount %q in output (kind is %q)", src, srcRoot, srcMount.Kind)
+       case !srcMount.Writable:
+               mft, err := cp.getManifest(srcMount.PortableDataHash)
+               if err != nil {
+                       return err
+               }
+               cp.manifest += mft.Extract(srcRelPath, dest).Text
+       default:
+               hostRoot, err := cp.hostRoot(srcRoot)
+               if err != nil {
+                       return err
+               }
+               f, err := os.Open(filepath.Join(hostRoot, ".arvados#collection"))
+               if err != nil {
+                       return err
+               }
+               defer f.Close()
+               var coll arvados.Collection
+               err = json.NewDecoder(f).Decode(&coll)
+               if err != nil {
+                       return err
+               }
+               mft := manifest.Manifest{Text: coll.ManifestText}
+               cp.manifest += mft.Extract(srcRelPath, dest).Text
+       }
+       if walkMountsBelow {
+               return cp.walkMountsBelow(dest, src)
+       } else {
+               return nil
+       }
+}
+
+func (cp *copier) walkMountsBelow(dest, src string) error {
+       for mnt, mntinfo := range cp.mounts {
+               if !strings.HasPrefix(mnt, src+"/") {
+                       continue
+               }
+               if cp.copyRegularFiles(mntinfo) {
+                       // These got copied into the nearest parent
+                       // mount as regular files during setup, so
+                       // they get copied as regular files when we
+                       // process the parent. Output will reflect any
+                       // changes and deletions done by the
+                       // container.
+                       continue
+               }
+               // Example: we are processing dest=/foo src=/mnt1/dir1
+               // (perhaps we followed a symlink /outdir/foo ->
+               // /mnt1/dir1). Caller has already processed the
+               // collection mounted at /mnt1, but now we find that
+               // /mnt1/dir1/mnt2 is also a mount, so we need to copy
+               // src=/mnt1/dir1/mnt2 to dest=/foo/mnt2.
+               //
+               // We handle all descendants of /mnt1/dir1 in this
+               // loop instead of using recursion:
+               // /mnt1/dir1/mnt2/mnt3 is a child of both /mnt1 and
+               // /mnt1/dir1/mnt2, but we only want to walk it
+               // once. (This simplification is safe because mounted
+               // collections cannot contain symlinks.)
+               err := cp.walkMount(dest+mnt[len(src):], mnt, 0, false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+// Add entries to cp.dirs and cp.files so as to copy src (an absolute
+// path in the container's filesystem which corresponds to a real file
+// or directory in cp.hostOutputDir) to dest (an absolute path in the
+// output collection, or "" for output root).
+//
+// Always follow symlinks.
+//
+// If includeMounts is true, include mounts at and below src.
+// Otherwise, skip them.
+func (cp *copier) walkHostFS(dest, src string, maxSymlinks int, includeMounts bool) error {
+       if includeMounts {
+               err := cp.walkMountsBelow(dest, src)
+               if err != nil {
+                       return err
+               }
+       }
+
+       hostsrc := cp.hostOutputDir + src[len(cp.ctrOutputDir):]
+
+       // If src is a symlink, walk its target.
+       fi, err := os.Lstat(hostsrc)
+       if err != nil {
+               return fmt.Errorf("lstat %q: %s", src, err)
+       }
+       if fi.Mode()&os.ModeSymlink != 0 {
+               if maxSymlinks < 0 {
+                       return errTooManySymlinks
+               }
+               target, err := os.Readlink(hostsrc)
+               if err != nil {
+                       return fmt.Errorf("readlink %q: %s", src, err)
+               }
+               if !strings.HasPrefix(target, "/") {
+                       target = filepath.Join(filepath.Dir(src), target)
+               }
+               return cp.walkMount(dest, target, maxSymlinks-1, true)
+       }
+
+       // If src is a regular directory, append it to cp.dirs and
+       // walk each of its children. (If there are no children,
+       // create an empty file "dest/.keep".)
+       if fi.Mode().IsDir() {
+               if dest != "" {
+                       cp.dirs = append(cp.dirs, dest)
+               }
+               dir, err := os.Open(hostsrc)
+               if err != nil {
+                       return fmt.Errorf("open %q: %s", src, err)
+               }
+               names, err := dir.Readdirnames(-1)
+               dir.Close()
+               if err != nil {
+                       return fmt.Errorf("readdirnames %q: %s", src, err)
+               }
+               if len(names) == 0 {
+                       if dest != "" {
+                               cp.files = append(cp.files, filetodo{
+                                       src: os.DevNull,
+                                       dst: dest + "/.keep",
+                               })
+                       }
+                       return nil
+               }
+               sort.Strings(names)
+               for _, name := range names {
+                       dest, src := dest+"/"+name, src+"/"+name
+                       if _, isSecret := cp.secretMounts[src]; isSecret {
+                               continue
+                       }
+                       if mntinfo, isMount := cp.mounts[src]; isMount && !cp.copyRegularFiles(mntinfo) {
+                               // If a regular file/dir somehow
+                               // exists at a path that's also a
+                               // mount target, ignore the file --
+                               // the mount has already been included
+                               // with walkMountsBelow().
+                               //
+                               // (...except mount types that are
+                               // handled as regular files.)
+                               continue
+                       }
+                       err = cp.walkHostFS(dest, src, maxSymlinks, false)
+                       if err != nil {
+                               return err
+                       }
+               }
+               return nil
+       }
+
+       // If src is a regular file, append it to cp.files.
+       if fi.Mode().IsRegular() {
+               cp.files = append(cp.files, filetodo{
+                       src:  hostsrc,
+                       dst:  dest,
+                       size: fi.Size(),
+               })
+               return nil
+       }
+
+       return fmt.Errorf("Unsupported file type (mode %o) in output dir: %q", fi.Mode(), src)
+}
+
+// Return the host path that was mounted at the given path in the
+// container.
+func (cp *copier) hostRoot(ctrRoot string) (string, error) {
+       if ctrRoot == cp.ctrOutputDir {
+               return cp.hostOutputDir, nil
+       }
+       for _, bind := range cp.binds {
+               tokens := strings.Split(bind, ":")
+               if len(tokens) >= 2 && tokens[1] == ctrRoot {
+                       return tokens[0], nil
+               }
+       }
+       return "", fmt.Errorf("not bind-mounted: %q", ctrRoot)
+}
+
+func (cp *copier) copyRegularFiles(m arvados.Mount) bool {
+       return m.Kind == "text" || m.Kind == "json" || (m.Kind == "collection" && m.Writable)
+}
+
+func (cp *copier) getManifest(pdh string) (*manifest.Manifest, error) {
+       if mft, ok := cp.manifestCache[pdh]; ok {
+               return mft, nil
+       }
+       var coll arvados.Collection
+       err := cp.arvClient.Get("collections", pdh, nil, &coll)
+       if err != nil {
+               return nil, fmt.Errorf("error retrieving collection record for %q: %s", pdh, err)
+       }
+       mft := &manifest.Manifest{Text: coll.ManifestText}
+       if cp.manifestCache == nil {
+               cp.manifestCache = map[string]*manifest.Manifest{pdh: mft}
+       } else {
+               cp.manifestCache[pdh] = mft
+       }
+       return mft, nil
+}
diff --git a/services/crunch-run/copier_test.go b/services/crunch-run/copier_test.go
new file mode 100644 (file)
index 0000000..a2b5608
--- /dev/null
@@ -0,0 +1,222 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io"
+       "io/ioutil"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&copierSuite{})
+
+type copierSuite struct {
+       cp copier
+}
+
+func (s *copierSuite) SetUpTest(c *check.C) {
+       tmpdir, err := ioutil.TempDir("", "crunch-run.test.")
+       c.Assert(err, check.IsNil)
+       api, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       s.cp = copier{
+               client:        arvados.NewClientFromEnv(),
+               arvClient:     api,
+               hostOutputDir: tmpdir,
+               ctrOutputDir:  "/ctr/outdir",
+               mounts: map[string]arvados.Mount{
+                       "/ctr/outdir": {Kind: "tmp"},
+               },
+               secretMounts: map[string]arvados.Mount{
+                       "/secret_text": {Kind: "text", Content: "xyzzy"},
+               },
+       }
+}
+
+func (s *copierSuite) TearDownTest(c *check.C) {
+       os.RemoveAll(s.cp.hostOutputDir)
+}
+
+func (s *copierSuite) TestEmptyOutput(c *check.C) {
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string(nil))
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestRegularFilesAndDirs(c *check.C) {
+       err := os.MkdirAll(s.cp.hostOutputDir+"/dir1/dir2/dir3", 0755)
+       c.Assert(err, check.IsNil)
+       f, err := os.OpenFile(s.cp.hostOutputDir+"/dir1/foo", os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, "foo")
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{"/dir1", "/dir1/dir2", "/dir1/dir2/dir3"})
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {src: os.DevNull, dst: "/dir1/dir2/dir3/.keep"},
+               {src: s.cp.hostOutputDir + "/dir1/foo", dst: "/dir1/foo", size: 3},
+       })
+}
+
+func (s *copierSuite) TestSymlinkCycle(c *check.C) {
+       c.Assert(os.Mkdir(s.cp.hostOutputDir+"/dir1", 0755), check.IsNil)
+       c.Assert(os.Mkdir(s.cp.hostOutputDir+"/dir2", 0755), check.IsNil)
+       c.Assert(os.Symlink("../dir2", s.cp.hostOutputDir+"/dir1/l_dir2"), check.IsNil)
+       c.Assert(os.Symlink("../dir1", s.cp.hostOutputDir+"/dir2/l_dir1"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*cycle.*`)
+}
+
+func (s *copierSuite) TestSymlinkTargetMissing(c *check.C) {
+       c.Assert(os.Symlink("./missing", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*/ctr/outdir/missing.*`)
+}
+
+func (s *copierSuite) TestSymlinkTargetNotMounted(c *check.C) {
+       c.Assert(os.Symlink("../boop", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*/ctr/boop.*`)
+}
+
+func (s *copierSuite) TestSymlinkToSecret(c *check.C) {
+       c.Assert(os.Symlink("/secret_text", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(len(s.cp.dirs), check.Equals, 0)
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestSecretInOutputDir(c *check.C) {
+       s.cp.secretMounts["/ctr/outdir/secret_text"] = s.cp.secretMounts["/secret_text"]
+       s.writeFileInOutputDir(c, "secret_text", "xyzzy")
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(len(s.cp.dirs), check.Equals, 0)
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
+       // simulate mounted read-only collection
+       s.cp.mounts["/mnt"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+       }
+
+       // simulate mounted writable collection
+       bindtmp, err := ioutil.TempDir("", "crunch-run.test.")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(bindtmp)
+       f, err := os.OpenFile(bindtmp+"/.arvados#collection", os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, `{"manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"}`)
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+       s.cp.mounts["/mnt-w"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+               Writable:         true,
+       }
+       s.cp.binds = append(s.cp.binds, bindtmp+":/mnt-w")
+
+       c.Assert(os.Symlink("../../mnt", s.cp.hostOutputDir+"/l_dir"), check.IsNil)
+       c.Assert(os.Symlink("/mnt/foo", s.cp.hostOutputDir+"/l_file"), check.IsNil)
+       c.Assert(os.Symlink("/mnt-w/bar", s.cp.hostOutputDir+"/l_file_w"), check.IsNil)
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.manifest, check.Matches, `(?ms)\./l_dir acbd\S+ 0:3:foo\n\. acbd\S+ 0:3:l_file\n\. 37b5\S+ 0:3:l_file_w\n`)
+}
+
+func (s *copierSuite) TestSymlink(c *check.C) {
+       hostfile := s.cp.hostOutputDir + "/dir1/file"
+
+       err := os.MkdirAll(s.cp.hostOutputDir+"/dir1/dir2/dir3", 0755)
+       c.Assert(err, check.IsNil)
+       s.writeFileInOutputDir(c, "dir1/file", "file")
+       for _, err := range []error{
+               os.Symlink(s.cp.ctrOutputDir+"/dir1/file", s.cp.hostOutputDir+"/l_abs_file"),
+               os.Symlink(s.cp.ctrOutputDir+"/dir1/dir2", s.cp.hostOutputDir+"/l_abs_dir2"),
+               os.Symlink("../../dir1/file", s.cp.hostOutputDir+"/dir1/dir2/l_rel_file"),
+               os.Symlink("dir1/file", s.cp.hostOutputDir+"/l_rel_file"),
+               os.MkdirAll(s.cp.hostOutputDir+"/morelinks", 0755),
+               os.Symlink("../dir1/dir2", s.cp.hostOutputDir+"/morelinks/l_rel_dir2"),
+               os.Symlink("dir1/dir2/dir3", s.cp.hostOutputDir+"/l_rel_dir3"),
+               // rel. symlink -> rel. symlink -> regular file
+               os.Symlink("../dir1/dir2/l_rel_file", s.cp.hostOutputDir+"/morelinks/l_rel_l_rel_file"),
+       } {
+               c.Assert(err, check.IsNil)
+       }
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{
+               "/dir1", "/dir1/dir2", "/dir1/dir2/dir3",
+               "/l_abs_dir2", "/l_abs_dir2/dir3",
+               "/l_rel_dir3",
+               "/morelinks", "/morelinks/l_rel_dir2", "/morelinks/l_rel_dir2/dir3",
+       })
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {dst: "/dir1/dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/dir1/dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/dir1/file", src: hostfile, size: 4},
+               {dst: "/l_abs_dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/l_abs_dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/l_abs_file", src: hostfile, size: 4},
+               {dst: "/l_rel_dir3/.keep", src: os.DevNull},
+               {dst: "/l_rel_file", src: hostfile, size: 4},
+               {dst: "/morelinks/l_rel_dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/morelinks/l_rel_dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/morelinks/l_rel_l_rel_file", src: hostfile, size: 4},
+       })
+}
+
+func (s *copierSuite) TestUnsupportedOutputMount(c *check.C) {
+       s.cp.mounts["/ctr/outdir"] = arvados.Mount{Kind: "waz"}
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.NotNil)
+}
+
+func (s *copierSuite) TestUnsupportedMountKindBelow(c *check.C) {
+       s.cp.mounts["/ctr/outdir/dirk"] = arvados.Mount{Kind: "waz"}
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.NotNil)
+}
+
+func (s *copierSuite) TestWritableMountBelow(c *check.C) {
+       s.cp.mounts["/ctr/outdir/mount"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+               Writable:         true,
+       }
+       c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/mount", 0755), check.IsNil)
+       s.writeFileInOutputDir(c, "file", "file")
+       s.writeFileInOutputDir(c, "mount/foo", "foo")
+
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{"/mount"})
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {src: s.cp.hostOutputDir + "/file", dst: "/file", size: 4},
+               {src: s.cp.hostOutputDir + "/mount/foo", dst: "/mount/foo", size: 3},
+       })
+}
+
+func (s *copierSuite) writeFileInOutputDir(c *check.C, path, data string) {
+       f, err := os.OpenFile(s.cp.hostOutputDir+"/"+path, os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, data)
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+}
index 53815cbe1c8222d4e6c9614ce889d649224af7e1..098c53f8a6a587816703ad6997ceb51eec7f0232 100644 (file)
@@ -32,6 +32,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "github.com/shirou/gopsutil/process"
        "golang.org/x/net/context"
 
        dockertypes "github.com/docker/docker/api/types"
@@ -57,13 +58,14 @@ var ErrCancelled = errors.New("Cancelled")
 
 // IKeepClient is the minimal Keep API methods used by crunch-run.
 type IKeepClient interface {
-       PutHB(hash string, buf []byte) (string, int, error)
+       PutB(buf []byte) (string, int, error)
+       ReadAt(locator string, p []byte, off int) (int, error)
        ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
        ClearBlockCache()
 }
 
 // NewLogWriter is a factory function to create a new log writer.
-type NewLogWriter func(name string) io.WriteCloser
+type NewLogWriter func(name string) (io.WriteCloser, error)
 
 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
 
@@ -82,10 +84,15 @@ type ThinDockerClient interface {
        ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
 }
 
+type PsProcess interface {
+       CmdlineSlice() ([]string, error)
+}
+
 // ContainerRunner is the main stateful struct used for a single execution of a
 // container.
 type ContainerRunner struct {
        Docker    ThinDockerClient
+       client    *arvados.Client
        ArvClient IArvadosClient
        Kc        IKeepClient
        arvados.Container
@@ -99,7 +106,7 @@ type ContainerRunner struct {
        CrunchLog     *ThrottledLogger
        Stdout        io.WriteCloser
        Stderr        io.WriteCloser
-       LogCollection *CollectionWriter
+       LogCollection arvados.CollectionFileSystem
        LogsPDH       *string
        RunArvMount
        MkTempDir
@@ -116,6 +123,8 @@ type ContainerRunner struct {
        finalState    string
        parentTemp    string
 
+       ListProcesses func() ([]PsProcess, error)
+
        statLogger       io.WriteCloser
        statReporter     *crunchstat.Reporter
        hoststatLogger   io.WriteCloser
@@ -139,9 +148,10 @@ type ContainerRunner struct {
        cStateLock sync.Mutex
        cCancelled bool // StopContainer() invoked
 
-       enableNetwork string // one of "default" or "always"
-       networkMode   string // passed through to HostConfig.NetworkMode
-       arvMountLog   *ThrottledLogger
+       enableNetwork   string // one of "default" or "always"
+       networkMode     string // passed through to HostConfig.NetworkMode
+       arvMountLog     *ThrottledLogger
+       checkContainerd time.Duration
 }
 
 // setupSignals sets up signal handling to gracefully terminate the underlying
@@ -180,26 +190,31 @@ func (runner *ContainerRunner) stop(sig os.Signal) {
 var errorBlacklist = []string{
        "(?ms).*[Cc]annot connect to the Docker daemon.*",
        "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+       "(?ms).*grpc: the connection is unavailable.*",
 }
 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
 
+func (runner *ContainerRunner) runBrokenNodeHook() {
+       if *brokenNodeHook == "" {
+               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+       } else {
+               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+               // run killme script
+               c := exec.Command(*brokenNodeHook)
+               c.Stdout = runner.CrunchLog
+               c.Stderr = runner.CrunchLog
+               err := c.Run()
+               if err != nil {
+                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+               }
+       }
+}
+
 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
        for _, d := range errorBlacklist {
                if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
                        runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
-                       if *brokenNodeHook == "" {
-                               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
-                       } else {
-                               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
-                               // run killme script
-                               c := exec.Command(*brokenNodeHook)
-                               c.Stdout = runner.CrunchLog
-                               c.Stderr = runner.CrunchLog
-                               err := c.Run()
-                               if err != nil {
-                                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
-                               }
-                       }
+                       runner.runBrokenNodeHook()
                        return true
                }
        }
@@ -275,7 +290,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }
        c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
 
-       runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
+       w, err := runner.NewLogWriter("arv-mount")
+       if err != nil {
+               return nil, err
+       }
+       runner.arvMountLog = NewThrottledLogger(w)
        c.Stdout = runner.arvMountLog
        c.Stderr = runner.arvMountLog
 
@@ -696,18 +715,27 @@ func (runner *ContainerRunner) stopHoststat() error {
        return nil
 }
 
-func (runner *ContainerRunner) startHoststat() {
-       runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+func (runner *ContainerRunner) startHoststat() error {
+       w, err := runner.NewLogWriter("hoststat")
+       if err != nil {
+               return err
+       }
+       runner.hoststatLogger = NewThrottledLogger(w)
        runner.hoststatReporter = &crunchstat.Reporter{
                Logger:     log.New(runner.hoststatLogger, "", 0),
                CgroupRoot: runner.cgroupRoot,
                PollPeriod: runner.statInterval,
        }
        runner.hoststatReporter.Start()
+       return nil
 }
 
-func (runner *ContainerRunner) startCrunchstat() {
-       runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
+func (runner *ContainerRunner) startCrunchstat() error {
+       w, err := runner.NewLogWriter("crunchstat")
+       if err != nil {
+               return err
+       }
+       runner.statLogger = NewThrottledLogger(w)
        runner.statReporter = &crunchstat.Reporter{
                CID:          runner.ContainerID,
                Logger:       log.New(runner.statLogger, "", 0),
@@ -716,6 +744,7 @@ func (runner *ContainerRunner) startCrunchstat() {
                PollPeriod:   runner.statInterval,
        }
        runner.statReporter.Start()
+       return nil
 }
 
 type infoCommand struct {
@@ -729,7 +758,10 @@ type infoCommand struct {
 // might differ from what's described in the node record (see
 // LogNodeRecord).
 func (runner *ContainerRunner) LogHostInfo() (err error) {
-       w := runner.NewLogWriter("node-info")
+       w, err := runner.NewLogWriter("node-info")
+       if err != nil {
+               return
+       }
 
        commands := []infoCommand{
                {
@@ -802,11 +834,15 @@ func (runner *ContainerRunner) LogNodeRecord() error {
 }
 
 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+       writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return false, err
+       }
        w := &ArvLogWriter{
                ArvClient:     runner.ArvClient,
                UUID:          runner.Container.UUID,
                loggingStream: label,
-               writeCloser:   runner.LogCollection.Open(label + ".json"),
+               writeCloser:   writer,
        }
 
        reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
@@ -893,8 +929,10 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        return err
                }
                runner.Stdout = stdoutFile
+       } else if w, err := runner.NewLogWriter("stdout"); err != nil {
+               return err
        } else {
-               runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
+               runner.Stdout = NewThrottledLogger(w)
        }
 
        if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
@@ -903,8 +941,10 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        return err
                }
                runner.Stderr = stderrFile
+       } else if w, err := runner.NewLogWriter("stderr"); err != nil {
+               return err
        } else {
-               runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
+               runner.Stderr = NewThrottledLogger(w)
        }
 
        if stdinRdr != nil {
@@ -974,6 +1014,10 @@ func (runner *ContainerRunner) CreateContainer() error {
        runner.ContainerConfig.Volumes = runner.Volumes
 
        maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
+       if maxRAM < 4*1024*1024 {
+               // Docker daemon won't let you set a limit less than 4 MiB
+               maxRAM = 4 * 1024 * 1024
+       }
        runner.HostConfig = dockercontainer.HostConfig{
                Binds: runner.Binds,
                LogConfig: dockercontainer.LogConfig{
@@ -1044,13 +1088,60 @@ func (runner *ContainerRunner) StartContainer() error {
        return nil
 }
 
+// checkContainerd checks if "containerd" is present in the process list.
+func (runner *ContainerRunner) CheckContainerd() error {
+       if runner.checkContainerd == 0 {
+               return nil
+       }
+       p, _ := runner.ListProcesses()
+       for _, i := range p {
+               e, _ := i.CmdlineSlice()
+               if len(e) > 0 {
+                       if strings.Index(e[0], "containerd") > -1 {
+                               return nil
+                       }
+               }
+       }
+
+       // Not found
+       runner.runBrokenNodeHook()
+       runner.stop(nil)
+       return fmt.Errorf("'containerd' not found in process list.")
+}
+
 // WaitFinish waits for the container to terminate, capture the exit code, and
 // close the stdout/stderr logging.
 func (runner *ContainerRunner) WaitFinish() error {
+       var runTimeExceeded <-chan time.Time
        runner.CrunchLog.Print("Waiting for container to finish")
 
        waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
        arvMountExit := runner.ArvMountExit
+       if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
+               runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
+       }
+
+       containerdGone := make(chan error)
+       defer close(containerdGone)
+       if runner.checkContainerd > 0 {
+               go func() {
+                       ticker := time.NewTicker(time.Duration(runner.checkContainerd))
+                       defer ticker.Stop()
+                       for {
+                               select {
+                               case <-ticker.C:
+                                       if ck := runner.CheckContainerd(); ck != nil {
+                                               containerdGone <- ck
+                                               return
+                                       }
+                               case <-containerdGone:
+                                       // Channel closed, quit goroutine
+                                       return
+                               }
+                       }
+               }()
+       }
+
        for {
                select {
                case waitBody := <-waitOk:
@@ -1071,185 +1162,20 @@ func (runner *ContainerRunner) WaitFinish() error {
                        // arvMountExit will always be ready now that
                        // it's closed, but that doesn't interest us.
                        arvMountExit = nil
-               }
-       }
-}
 
-var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
-
-func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
-       // Follow symlinks if necessary
-       info = startinfo
-       tgt = path
-       readlinktgt = ""
-       nextlink := path
-       for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
-               if followed >= limitFollowSymlinks {
-                       // Got stuck in a loop or just a pathological number of links, give up.
-                       err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
-                       return
-               }
-
-               readlinktgt, err = os.Readlink(nextlink)
-               if err != nil {
-                       return
-               }
-
-               tgt = readlinktgt
-               if !strings.HasPrefix(tgt, "/") {
-                       // Relative symlink, resolve it to host path
-                       tgt = filepath.Join(filepath.Dir(path), tgt)
-               }
-               if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                       // Absolute symlink to container output path, adjust it to host output path.
-                       tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
-               }
-               if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                       // After dereferencing, symlink target must either be
-                       // within output directory, or must point to a
-                       // collection mount.
-                       err = ErrNotInOutputDir
-                       return
-               }
-
-               info, err = os.Lstat(tgt)
-               if err != nil {
-                       // tgt
-                       err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
-                               path[len(runner.HostOutputDir):], readlinktgt, err)
-                       return
-               }
-
-               nextlink = tgt
-       }
-
-       return
-}
-
-var limitFollowSymlinks = 10
-
-// UploadFile uploads files within the output directory, with special handling
-// for symlinks. If the symlink leads to a keep mount, copy the manifest text
-// from the keep mount into the output manifestText.  Ensure that whether
-// symlinks are relative or absolute, every symlink target (even targets that
-// are symlinks themselves) must point to a path in either the output directory
-// or a collection mount.
-//
-// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
-func (runner *ContainerRunner) UploadOutputFile(
-       path string,
-       info os.FileInfo,
-       infoerr error,
-       binds []string,
-       walkUpload *WalkUpload,
-       relocateFrom string,
-       relocateTo string,
-       followed int) (manifestText string, err error) {
-
-       if infoerr != nil {
-               return "", infoerr
-       }
-
-       if info.Mode().IsDir() {
-               // if empty, need to create a .keep file
-               dir, direrr := os.Open(path)
-               if direrr != nil {
-                       return "", direrr
-               }
-               defer dir.Close()
-               names, eof := dir.Readdirnames(1)
-               if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
-                       containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
-                       for _, bind := range binds {
-                               mnt := runner.Container.Mounts[bind]
-                               // Check if there is a bind for this
-                               // directory, in which case assume we don't need .keep
-                               if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
-                                       return
-                               }
-                       }
-                       outputSuffix := path[len(runner.HostOutputDir)+1:]
-                       return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
-               }
-               return
-       }
-
-       if followed >= limitFollowSymlinks {
-               // Got stuck in a loop or just a pathological number of
-               // directory links, give up.
-               err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
-               return
-       }
-
-       // "path" is the actual path we are visiting
-       // "tgt" is the target of "path" (a non-symlink) after following symlinks
-       // "relocated" is the path in the output manifest where the file should be placed,
-       // but has HostOutputDir as a prefix.
-
-       // The destination path in the output manifest may need to be
-       // logically relocated to some other path in order to appear
-       // in the correct location as a result of following a symlink.
-       // Remove the relocateFrom prefix and replace it with
-       // relocateTo.
-       relocated := relocateTo + path[len(relocateFrom):]
-
-       tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
-       if derefErr != nil && derefErr != ErrNotInOutputDir {
-               return "", derefErr
-       }
+               case <-runTimeExceeded:
+                       runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
+                       runner.stop(nil)
+                       runTimeExceeded = nil
 
-       // go through mounts and try reverse map to collection reference
-       for _, bind := range binds {
-               mnt := runner.Container.Mounts[bind]
-               if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
-                       // get path relative to bind
-                       targetSuffix := tgt[len(bind):]
-
-                       // Copy mount and adjust the path to add path relative to the bind
-                       adjustedMount := mnt
-                       adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
-
-                       // Terminates in this keep mount, so add the
-                       // manifest text at appropriate location.
-                       outputSuffix := relocated[len(runner.HostOutputDir):]
-                       manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
-                       return
+               case err := <-containerdGone:
+                       return err
                }
        }
-
-       // If target is not a collection mount, it must be located within the
-       // output directory, otherwise it is an error.
-       if derefErr == ErrNotInOutputDir {
-               err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
-                       path[len(runner.HostOutputDir):], readlinktgt)
-               return
-       }
-
-       if info.Mode().IsRegular() {
-               return "", walkUpload.UploadFile(relocated, tgt)
-       }
-
-       if info.Mode().IsDir() {
-               // Symlink leads to directory.  Walk() doesn't follow
-               // directory symlinks, so we walk the target directory
-               // instead.  Within the walk, file paths are relocated
-               // so they appear under the original symlink path.
-               err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
-                       var m string
-                       m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
-                               binds, walkUpload, tgt, relocated, followed+1)
-                       if walkerr == nil {
-                               manifestText = manifestText + m
-                       }
-                       return walkerr
-               })
-               return
-       }
-
-       return
 }
 
-// HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
+// CaptureOutput saves data from the container's output directory if
+// needed, and updates the container output accordingly.
 func (runner *ContainerRunner) CaptureOutput() error {
        if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
                // Output may have been set directly by the container, so
@@ -1266,163 +1192,36 @@ func (runner *ContainerRunner) CaptureOutput() error {
                }
        }
 
-       if runner.HostOutputDir == "" {
-               return nil
-       }
-
-       _, err := os.Stat(runner.HostOutputDir)
-       if err != nil {
-               return fmt.Errorf("While checking host output path: %v", err)
-       }
-
-       // Pre-populate output from the configured mount points
-       var binds []string
-       for bind, mnt := range runner.Container.Mounts {
-               if mnt.Kind == "collection" {
-                       binds = append(binds, bind)
-               }
-       }
-       sort.Strings(binds)
-
-       // Delete secret mounts so they don't get saved to the output collection.
-       for bind := range runner.SecretMounts {
-               if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
-                       err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):])
-                       if err != nil {
-                               return fmt.Errorf("Unable to remove secret mount: %v", err)
-                       }
-               }
-       }
-
-       var manifestText string
-
-       collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
-       _, err = os.Stat(collectionMetafile)
+       txt, err := (&copier{
+               client:        runner.client,
+               arvClient:     runner.ArvClient,
+               keepClient:    runner.Kc,
+               hostOutputDir: runner.HostOutputDir,
+               ctrOutputDir:  runner.Container.OutputPath,
+               binds:         runner.Binds,
+               mounts:        runner.Container.Mounts,
+               secretMounts:  runner.SecretMounts,
+               logger:        runner.CrunchLog,
+       }).Copy()
        if err != nil {
-               // Regular directory
-
-               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
-               walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
-
-               var m string
-               err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
-                       m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
-                       if err == nil {
-                               manifestText = manifestText + m
-                       }
-                       return err
-               })
-
-               cw.EndUpload(walkUpload)
-
-               if err != nil {
-                       return fmt.Errorf("While uploading output files: %v", err)
-               }
-
-               m, err = cw.ManifestText()
-               manifestText = manifestText + m
-               if err != nil {
-                       return fmt.Errorf("While uploading output files: %v", err)
-               }
-       } else {
-               // FUSE mount directory
-               file, openerr := os.Open(collectionMetafile)
-               if openerr != nil {
-                       return fmt.Errorf("While opening FUSE metafile: %v", err)
-               }
-               defer file.Close()
-
-               var rec arvados.Collection
-               err = json.NewDecoder(file).Decode(&rec)
-               if err != nil {
-                       return fmt.Errorf("While reading FUSE metafile: %v", err)
-               }
-               manifestText = rec.ManifestText
-       }
-
-       for _, bind := range binds {
-               mnt := runner.Container.Mounts[bind]
-
-               bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
-
-               if bindSuffix == bind || len(bindSuffix) <= 0 {
-                       // either does not start with OutputPath or is OutputPath itself
-                       continue
-               }
-
-               if mnt.ExcludeFromOutput == true || mnt.Writable {
-                       continue
-               }
-
-               // append to manifest_text
-               m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
-               if err != nil {
-                       return err
-               }
-
-               manifestText = manifestText + m
-       }
-
-       // Save output
-       var response arvados.Collection
-       manifest := manifest.Manifest{Text: manifestText}
-       manifestText = manifest.Extract(".", ".").Text
-       err = runner.ArvClient.Create("collections",
-               arvadosclient.Dict{
-                       "ensure_unique_name": true,
-                       "collection": arvadosclient.Dict{
-                               "is_trashed":    true,
-                               "name":          "output for " + runner.Container.UUID,
-                               "manifest_text": manifestText}},
-               &response)
+               return err
+       }
+       var resp arvados.Collection
+       err = runner.ArvClient.Create("collections", arvadosclient.Dict{
+               "ensure_unique_name": true,
+               "collection": arvadosclient.Dict{
+                       "is_trashed":    true,
+                       "name":          "output for " + runner.Container.UUID,
+                       "manifest_text": txt,
+               },
+       }, &resp)
        if err != nil {
-               return fmt.Errorf("While creating output collection: %v", err)
+               return fmt.Errorf("error creating output collection: %v", err)
        }
-       runner.OutputPDH = &response.PortableDataHash
+       runner.OutputPDH = &resp.PortableDataHash
        return nil
 }
 
-var outputCollections = make(map[string]arvados.Collection)
-
-// Fetch the collection for the mnt.PortableDataHash
-// Return the manifest_text fragment corresponding to the specified mnt.Path
-//  after making any required updates.
-//  Ex:
-//    If mnt.Path is not specified,
-//      return the entire manifest_text after replacing any "." with bindSuffix
-//    If mnt.Path corresponds to one stream,
-//      return the manifest_text for that stream after replacing that stream name with bindSuffix
-//    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
-//      for that stream after replacing stream name with bindSuffix minus the last word
-//      and the file name with last word of the bindSuffix
-//  Allowed path examples:
-//    "path":"/"
-//    "path":"/subdir1"
-//    "path":"/subdir1/subdir2"
-//    "path":"/subdir/filename" etc
-func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
-       collection := outputCollections[mnt.PortableDataHash]
-       if collection.PortableDataHash == "" {
-               err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
-               if err != nil {
-                       return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
-               }
-               outputCollections[mnt.PortableDataHash] = collection
-       }
-
-       if collection.ManifestText == "" {
-               runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
-               return "", nil
-       }
-
-       mft := manifest.Manifest{Text: collection.ManifestText}
-       extracted := mft.Extract(mnt.Path, bindSuffix)
-       if extracted.Err != nil {
-               return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
-       }
-       return extracted.Text, nil
-}
-
 func (runner *ContainerRunner) CleanupDirs() {
        if runner.ArvMount != nil {
                var delay int64 = 8
@@ -1495,8 +1294,12 @@ func (runner *ContainerRunner) CommitLogs() error {
                // point, but re-open crunch log with ArvClient in case there are any
                // other further errors (such as failing to write the log to Keep!)
                // while shutting down
-               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
-                       UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
+                       ArvClient:     runner.ArvClient,
+                       UUID:          runner.Container.UUID,
+                       loggingStream: "crunch-run",
+                       writeCloser:   nil,
+               })
                runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
        }()
 
@@ -1509,7 +1312,7 @@ func (runner *ContainerRunner) CommitLogs() error {
                return nil
        }
 
-       mt, err := runner.LogCollection.ManifestText()
+       mt, err := runner.LogCollection.MarshalManifest(".")
        if err != nil {
                return fmt.Errorf("While creating log manifest: %v", err)
        }
@@ -1584,12 +1387,17 @@ func (runner *ContainerRunner) IsCancelled() bool {
 }
 
 // NewArvLogWriter creates an ArvLogWriter
-func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
+func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
+       writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return nil, err
+       }
        return &ArvLogWriter{
                ArvClient:     runner.ArvClient,
                UUID:          runner.Container.UUID,
                loggingStream: name,
-               writeCloser:   runner.LogCollection.Open(name + ".txt")}
+               writeCloser:   writer,
+       }, nil
 }
 
 // Run the full container lifecycle.
@@ -1658,7 +1466,16 @@ func (runner *ContainerRunner) Run() (err error) {
                return
        }
        runner.setupSignals()
-       runner.startHoststat()
+       err = runner.startHoststat()
+       if err != nil {
+               return
+       }
+
+       // Sanity check that containerd is running.
+       err = runner.CheckContainerd()
+       if err != nil {
+               return
+       }
 
        // check for and/or load image
        err = runner.LoadImage()
@@ -1707,7 +1524,10 @@ func (runner *ContainerRunner) Run() (err error) {
        }
        runner.finalState = "Cancelled"
 
-       runner.startCrunchstat()
+       err = runner.startCrunchstat()
+       if err != nil {
+               return
+       }
 
        err = runner.StartContainer()
        if err != nil {
@@ -1766,15 +1586,27 @@ func (runner *ContainerRunner) fetchContainerRecord() error {
 }
 
 // NewContainerRunner creates a new container runner.
-func NewContainerRunner(api IArvadosClient,
-       kc IKeepClient,
-       docker ThinDockerClient,
-       containerUUID string) *ContainerRunner {
-
-       cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
+func NewContainerRunner(client *arvados.Client, api IArvadosClient, kc IKeepClient, docker ThinDockerClient, containerUUID string) (*ContainerRunner, error) {
+       cr := &ContainerRunner{
+               client:    client,
+               ArvClient: api,
+               Kc:        kc,
+               Docker:    docker,
+       }
        cr.NewLogWriter = cr.NewArvLogWriter
        cr.RunArvMount = cr.ArvMountCmd
        cr.MkTempDir = ioutil.TempDir
+       cr.ListProcesses = func() ([]PsProcess, error) {
+               pr, err := process.Processes()
+               if err != nil {
+                       return nil, err
+               }
+               ps := make([]PsProcess, len(pr))
+               for i, j := range pr {
+                       ps[i] = j
+               }
+               return ps, nil
+       }
        cr.MkArvClient = func(token string) (IArvadosClient, error) {
                cl, err := arvadosclient.MakeArvadosClient()
                if err != nil {
@@ -1783,14 +1615,22 @@ func NewContainerRunner(api IArvadosClient,
                cl.ApiToken = token
                return cl, nil
        }
-       cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
+       var err error
+       cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.client, cr.Kc)
+       if err != nil {
+               return nil, err
+       }
        cr.Container.UUID = containerUUID
-       cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
+       w, err := cr.NewLogWriter("crunch-run")
+       if err != nil {
+               return nil, err
+       }
+       cr.CrunchLog = NewThrottledLogger(w)
        cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
 
        loadLogThrottleParams(api)
 
-       return cr
+       return cr, nil
 }
 
 func main() {
@@ -1809,6 +1649,7 @@ func main() {
        `)
        memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
        getVersion := flag.Bool("version", false, "Print version information and exit.")
+       checkContainerd := flag.Duration("check-containerd", 60*time.Second, "Periodic check if (docker-)containerd is running (use 0s to disable).")
        flag.Parse()
 
        // Print version information if requested
@@ -1842,7 +1683,10 @@ func main() {
        // minimum version we want to support.
        docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
 
-       cr := NewContainerRunner(api, kc, docker, containerId)
+       cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
+       if err != nil {
+               log.Fatal(err)
+       }
        if dockererr != nil {
                cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
                cr.checkBrokenNode(dockererr)
@@ -1861,6 +1705,7 @@ func main() {
        cr.expectCgroupParent = *cgroupParent
        cr.enableNetwork = *enableNetwork
        cr.networkMode = *networkMode
+       cr.checkContainerd = *checkContainerd
        if *cgroupParentSubsystem != "" {
                p := findCgroup(*cgroupParentSubsystem)
                cr.setCgroupParent = p
@@ -1872,15 +1717,15 @@ func main() {
        if *memprofile != "" {
                f, err := os.Create(*memprofile)
                if err != nil {
-                       log.Printf("could not create memory profile: ", err)
+                       log.Printf("could not create memory profile: %s", err)
                }
                runtime.GC() // get up-to-date statistics
                if err := pprof.WriteHeapProfile(f); err != nil {
-                       log.Printf("could not write memory profile: ", err)
+                       log.Printf("could not write memory profile: %s", err)
                }
                closeerr := f.Close()
                if closeerr != nil {
-                       log.Printf("closing memprofile file: ", err)
+                       log.Printf("closing memprofile file: %s", err)
                }
        }
 
index ba9195999d3e7a3f867b3ece3eecea03a600c0e7..8d8e0400003a94dae160ee65a69ccd92f723c823 100644 (file)
@@ -13,7 +13,6 @@ import (
        "fmt"
        "io"
        "io/ioutil"
-       "log"
        "net"
        "os"
        "os/exec"
@@ -46,10 +45,12 @@ func TestCrunchExec(t *testing.T) {
 var _ = Suite(&TestSuite{})
 
 type TestSuite struct {
+       client *arvados.Client
        docker *TestDockerClient
 }
 
 func (s *TestSuite) SetUpTest(c *C) {
+       s.client = arvados.NewClientFromEnv()
        s.docker = NewTestDockerClient()
 }
 
@@ -356,12 +357,16 @@ call:
        return nil
 }
 
-func (client *KeepTestClient) PutHB(hash string, buf []byte) (string, int, error) {
+func (client *KeepTestClient) PutB(buf []byte) (string, int, error) {
        client.Content = buf
-       return fmt.Sprintf("%s+%d", hash, len(buf)), len(buf), nil
+       return fmt.Sprintf("%x+%d", md5.Sum(buf), len(buf)), len(buf), nil
 }
 
-func (*KeepTestClient) ClearBlockCache() {
+func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) {
+       return 0, errors.New("not implemented")
+}
+
+func (client *KeepTestClient) ClearBlockCache() {
 }
 
 func (client *KeepTestClient) Close() {
@@ -397,6 +402,10 @@ func (fw FileWrapper) Write([]byte) (int, error) {
        return 0, errors.New("not implemented")
 }
 
+func (fw FileWrapper) Sync() error {
+       return errors.New("not implemented")
+}
+
 func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
        if filename == hwImageId+".tar" {
                rdr := ioutil.NopCloser(&bytes.Buffer{})
@@ -413,9 +422,10 @@ func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename s
 func (s *TestSuite) TestLoadImage(c *C) {
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
 
-       _, err := cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+       _, err = cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
        c.Check(err, IsNil)
 
        _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId)
@@ -480,26 +490,24 @@ func (ArvErrorTestClient) Discovery(key string) (interface{}, error) {
        return discoveryMap[key], nil
 }
 
-type KeepErrorTestClient struct{}
-
-func (KeepErrorTestClient) PutHB(hash string, buf []byte) (string, int, error) {
-       return "", 0, errors.New("KeepError")
+type KeepErrorTestClient struct {
+       KeepTestClient
 }
 
-func (KeepErrorTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
+func (*KeepErrorTestClient) ManifestFileReader(manifest.Manifest, string) (arvados.File, error) {
        return nil, errors.New("KeepError")
 }
 
-func (KeepErrorTestClient) ClearBlockCache() {
+func (*KeepErrorTestClient) PutB(buf []byte) (string, int, error) {
+       return "", 0, errors.New("KeepError")
 }
 
-type KeepReadErrorTestClient struct{}
-
-func (KeepReadErrorTestClient) PutHB(hash string, buf []byte) (string, int, error) {
-       return "", 0, nil
+type KeepReadErrorTestClient struct {
+       KeepTestClient
 }
 
-func (KeepReadErrorTestClient) ClearBlockCache() {
+func (*KeepReadErrorTestClient) ReadAt(string, []byte, int) (int, error) {
+       return 0, errors.New("KeepError")
 }
 
 type ErrorReader struct {
@@ -522,37 +530,42 @@ func (s *TestSuite) TestLoadImageArvError(c *C) {
        // (1) Arvados error
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.Container.ContainerImage = hwPDH
 
-       err := cr.LoadImage()
+       err = cr.LoadImage()
        c.Check(err.Error(), Equals, "While getting container image collection: ArvError")
 }
 
 func (s *TestSuite) TestLoadImageKeepError(c *C) {
        // (2) Keep error
-       cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, &KeepErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.Container.ContainerImage = hwPDH
 
-       err := cr.LoadImage()
+       err = cr.LoadImage()
+       c.Assert(err, NotNil)
        c.Check(err.Error(), Equals, "While creating ManifestFileReader for container image: KeepError")
 }
 
 func (s *TestSuite) TestLoadImageCollectionError(c *C) {
        // (3) Collection doesn't contain image
-       cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, &KeepReadErrorTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.Container.ContainerImage = otherPDH
 
-       err := cr.LoadImage()
+       err = cr.LoadImage()
        c.Check(err.Error(), Equals, "First file in the container image collection does not end in .tar")
 }
 
 func (s *TestSuite) TestLoadImageKeepReadError(c *C) {
        // (4) Collection doesn't contain image
-       cr := NewContainerRunner(&ArvTestClient{}, KeepReadErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, &KeepReadErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.Container.ContainerImage = hwPDH
 
-       err := cr.LoadImage()
+       err = cr.LoadImage()
        c.Check(err, NotNil)
 }
 
@@ -569,14 +582,14 @@ type TestLogs struct {
        Stderr ClosableBuffer
 }
 
-func (tl *TestLogs) NewTestLoggingWriter(logstr string) io.WriteCloser {
+func (tl *TestLogs) NewTestLoggingWriter(logstr string) (io.WriteCloser, error) {
        if logstr == "stdout" {
-               return &tl.Stdout
+               return &tl.Stdout, nil
        }
        if logstr == "stderr" {
-               return &tl.Stderr
+               return &tl.Stderr, nil
        }
-       return nil
+       return nil, errors.New("???")
 }
 
 func dockerLog(fd byte, msg string) []byte {
@@ -595,13 +608,14 @@ func (s *TestSuite) TestRunContainer(c *C) {
        }
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
 
        var logs TestLogs
        cr.NewLogWriter = logs.NewTestLoggingWriter
        cr.Container.ContainerImage = hwPDH
        cr.Container.Command = []string{"./hw"}
-       err := cr.LoadImage()
+       err = cr.LoadImage()
        c.Check(err, IsNil)
 
        err = cr.CreateContainer()
@@ -621,14 +635,15 @@ func (s *TestSuite) TestCommitLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
        cr.CrunchLog.Print("Hello world!")
        cr.CrunchLog.Print("Goodbye")
        cr.finalState = "Complete"
 
-       err := cr.CommitLogs()
+       err = cr.CommitLogs()
        c.Check(err, IsNil)
 
        c.Check(api.Calls, Equals, 2)
@@ -642,9 +657,10 @@ func (s *TestSuite) TestUpdateContainerRunning(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
 
-       err := cr.UpdateContainerRunning()
+       err = cr.UpdateContainerRunning()
        c.Check(err, IsNil)
 
        c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
@@ -654,7 +670,8 @@ func (s *TestSuite) TestUpdateContainerComplete(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
 
        cr.LogsPDH = new(string)
        *cr.LogsPDH = "d3a229d2fe3690c2c3e75a71a153c6a3+60"
@@ -663,7 +680,7 @@ func (s *TestSuite) TestUpdateContainerComplete(c *C) {
        *cr.ExitCode = 42
        cr.finalState = "Complete"
 
-       err := cr.UpdateContainerFinal()
+       err = cr.UpdateContainerFinal()
        c.Check(err, IsNil)
 
        c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
@@ -675,11 +692,12 @@ func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.cCancelled = true
        cr.finalState = "Cancelled"
 
-       err := cr.UpdateContainerFinal()
+       err = cr.UpdateContainerFinal()
        c.Check(err, IsNil)
 
        c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], IsNil)
@@ -700,7 +718,7 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
        err = json.Unmarshal([]byte(record), &sm)
        c.Check(err, IsNil)
        secretMounts, err := json.Marshal(sm)
-       log.Printf("%q %q", sm, secretMounts)
+       c.Logf("%s %q", sm, secretMounts)
        c.Check(err, IsNil)
 
        s.docker.exitCode = exitCode
@@ -711,7 +729,8 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
        s.docker.api = api
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.statInterval = 100 * time.Millisecond
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
@@ -774,7 +793,7 @@ func (s *TestSuite) TestFullRunHello(c *C) {
     "mounts": {"/tmp": {"kind": "tmp"} },
     "output_path": "/tmp",
     "priority": 1,
-    "runtime_constraints": {}
+       "runtime_constraints": {}
 }`, nil, 0, func(t *TestDockerClient) {
                t.logWriter.Write(dockerLog(1, "hello world\n"))
                t.logWriter.Close()
@@ -786,6 +805,26 @@ func (s *TestSuite) TestFullRunHello(c *C) {
 
 }
 
+func (s *TestSuite) TestRunTimeExceeded(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["sleep", "3"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+       "runtime_constraints": {},
+       "scheduling_parameters":{"max_run_time": 1}
+}`, nil, 0, func(t *TestDockerClient) {
+               time.Sleep(3 * time.Second)
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*maximum run time exceeded.*")
+}
+
 func (s *TestSuite) TestCrunchstat(c *C) {
        api, _, _ := s.fullRunHelper(c, `{
                "command": ["sleep", "1"],
@@ -989,7 +1028,8 @@ func (s *TestSuite) testStopContainer(c *C, setup func(cr *ContainerRunner)) {
        api := &ArvTestClient{Container: rec}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }
        cr.MkArvClient = func(token string) (IArvadosClient, error) {
                return &ArvTestClient{}, nil
@@ -1060,7 +1100,8 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
 
@@ -1470,7 +1511,8 @@ func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDoc
        api = &ArvTestClient{Container: rec}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
        cr.MkArvClient = func(token string) (IArvadosClient, error) {
@@ -1512,8 +1554,8 @@ func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
 }
 
 func (s *TestSuite) TestFullRunWithAPI(c *C) {
+       defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST"))
        os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
-       defer os.Unsetenv("ARVADOS_API_HOST")
        api, _, _ := s.fullRunHelper(c, `{
     "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
     "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
@@ -1535,8 +1577,8 @@ func (s *TestSuite) TestFullRunWithAPI(c *C) {
 }
 
 func (s *TestSuite) TestFullRunSetOutput(c *C) {
+       defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST"))
        os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
-       defer os.Unsetenv("ARVADOS_API_HOST")
        api, _, _ := s.fullRunHelper(c, `{
     "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
     "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
@@ -1634,7 +1676,7 @@ func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
                                manifest := collection["manifest_text"].(string)
 
                                c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
-./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 9:18:bar 9:18:sub1file2
+./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:bar 36:18:sub1file2
 ./foo/baz 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 9:18:sub2file2
 ./foo/sub1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
 ./foo/sub1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
@@ -1652,7 +1694,7 @@ func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(
                "environment": {"FROBIZ": "bilbo"},
                "mounts": {
         "/tmp": {"kind": "tmp"},
-        "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt"},
+        "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367", "path": "/subdir1/file2_in_subdir1.txt"},
         "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
     },
                "output_path": "/tmp",
@@ -1685,52 +1727,6 @@ func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(
        }
 }
 
-func (s *TestSuite) TestOutputSymlinkToInput(c *C) {
-       helperRecord := `{
-               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
-               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
-               "cwd": "/bin",
-               "environment": {"FROBIZ": "bilbo"},
-               "mounts": {
-        "/tmp": {"kind": "tmp"},
-        "/keep/foo/sub1file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path": "/subdir1/file2_in_subdir1.txt"},
-        "/keep/foo2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367"}
-    },
-               "output_path": "/tmp",
-               "priority": 1,
-               "runtime_constraints": {}
-       }`
-
-       extraMounts := []string{
-               "a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
-       }
-
-       api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
-               os.Symlink("/keep/foo/sub1file2", t.realTemp+"/tmp2/baz")
-               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/tmp2/baz2")
-               os.Symlink("/keep/foo2/subdir1", t.realTemp+"/tmp2/baz3")
-               os.Mkdir(t.realTemp+"/tmp2/baz4", 0700)
-               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/tmp2/baz4/baz5")
-               t.logWriter.Close()
-       })
-
-       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
-       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
-       for _, v := range api.Content {
-               if v["collection"] != nil {
-                       collection := v["collection"].(arvadosclient.Dict)
-                       if strings.Index(collection["name"].(string), "output") == 0 {
-                               manifest := collection["manifest_text"].(string)
-                               c.Check(manifest, Equals, `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:baz 9:18:baz2
-./baz3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
-./baz3/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
-./baz4 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:baz5
-`)
-                       }
-               }
-       }
-}
-
 func (s *TestSuite) TestOutputError(c *C) {
        helperRecord := `{
                "command": ["/bin/sh", "-c", "echo $FROBIZ"],
@@ -1755,59 +1751,6 @@ func (s *TestSuite) TestOutputError(c *C) {
        c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
 }
 
-func (s *TestSuite) TestOutputSymlinkToOutput(c *C) {
-       helperRecord := `{
-               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
-               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
-               "cwd": "/bin",
-               "environment": {"FROBIZ": "bilbo"},
-               "mounts": {
-        "/tmp": {"kind": "tmp"}
-    },
-               "output_path": "/tmp",
-               "priority": 1,
-               "runtime_constraints": {}
-       }`
-
-       extraMounts := []string{}
-
-       api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
-               rf, _ := os.Create(t.realTemp + "/tmp2/realfile")
-               rf.Write([]byte("foo"))
-               rf.Close()
-
-               os.Mkdir(t.realTemp+"/tmp2/realdir", 0700)
-               rf, _ = os.Create(t.realTemp + "/tmp2/realdir/subfile")
-               rf.Write([]byte("bar"))
-               rf.Close()
-
-               os.Symlink("/tmp/realfile", t.realTemp+"/tmp2/file1")
-               os.Symlink("realfile", t.realTemp+"/tmp2/file2")
-               os.Symlink("/tmp/file1", t.realTemp+"/tmp2/file3")
-               os.Symlink("file2", t.realTemp+"/tmp2/file4")
-               os.Symlink("realdir", t.realTemp+"/tmp2/dir1")
-               os.Symlink("/tmp/realdir", t.realTemp+"/tmp2/dir2")
-               t.logWriter.Close()
-       })
-
-       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
-       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
-       for _, v := range api.Content {
-               if v["collection"] != nil {
-                       collection := v["collection"].(arvadosclient.Dict)
-                       if strings.Index(collection["name"].(string), "output") == 0 {
-                               manifest := collection["manifest_text"].(string)
-                               c.Check(manifest, Equals,
-                                       `. 7a2c86e102dcc231bd232aad99686dfa+15 0:3:file1 3:3:file2 6:3:file3 9:3:file4 12:3:realfile
-./dir1 37b51d194a7513e45b56f6524f2d51f2+3 0:3:subfile
-./dir2 37b51d194a7513e45b56f6524f2d51f2+3 0:3:subfile
-./realdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:subfile
-`)
-                       }
-               }
-       }
-}
-
 func (s *TestSuite) TestStdinCollectionMountPoint(c *C) {
        helperRecord := `{
                "command": ["/bin/sh", "-c", "echo $FROBIZ"],
@@ -1911,7 +1854,8 @@ func (s *TestSuite) TestStderrMount(c *C) {
 func (s *TestSuite) TestNumberRoundTrip(c *C) {
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.fetchContainerRecord()
 
        jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content)
@@ -1920,59 +1864,6 @@ func (s *TestSuite) TestNumberRoundTrip(c *C) {
        c.Check(string(jsondata), Equals, `{"number":123456789123456789}`)
 }
 
-func (s *TestSuite) TestEvalSymlinks(c *C) {
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
-
-       realTemp, err := ioutil.TempDir("", "crunchrun_test-")
-       c.Assert(err, IsNil)
-       defer os.RemoveAll(realTemp)
-
-       cr.HostOutputDir = realTemp
-
-       // Absolute path outside output dir
-       os.Symlink("/etc/passwd", realTemp+"/p1")
-
-       // Relative outside output dir
-       os.Symlink("../zip", realTemp+"/p2")
-
-       // Circular references
-       os.Symlink("p4", realTemp+"/p3")
-       os.Symlink("p5", realTemp+"/p4")
-       os.Symlink("p3", realTemp+"/p5")
-
-       // Target doesn't exist
-       os.Symlink("p99", realTemp+"/p6")
-
-       for _, v := range []string{"p1", "p2", "p3", "p4", "p5"} {
-               info, err := os.Lstat(realTemp + "/" + v)
-               c.Assert(err, IsNil)
-               _, _, _, err = cr.derefOutputSymlink(realTemp+"/"+v, info)
-               c.Assert(err, NotNil)
-       }
-}
-
-func (s *TestSuite) TestEvalSymlinkDir(c *C) {
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
-
-       realTemp, err := ioutil.TempDir("", "crunchrun_test-")
-       c.Assert(err, IsNil)
-       defer os.RemoveAll(realTemp)
-
-       cr.HostOutputDir = realTemp
-
-       // Absolute path outside output dir
-       os.Symlink(".", realTemp+"/loop")
-
-       v := "loop"
-       info, err := os.Lstat(realTemp + "/" + v)
-       _, err = cr.UploadOutputFile(realTemp+"/"+v, info, err, []string{}, nil, "", "", 0)
-       c.Assert(err, NotNil)
-}
-
 func (s *TestSuite) TestFullBrokenDocker1(c *C) {
        tf, err := ioutil.TempFile("", "brokenNodeHook-")
        c.Assert(err, IsNil)
@@ -2176,3 +2067,49 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
        c.Check(api.CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil)
        c.Check(api.CalledWith("collection.manifest_text", ""), NotNil)
 }
+
+type FakeProcess struct {
+       cmdLine []string
+}
+
+func (fp FakeProcess) CmdlineSlice() ([]string, error) {
+       return fp.cmdLine, nil
+}
+
+func (s *TestSuite) helpCheckContainerd(c *C, lp func() ([]PsProcess, error)) error {
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{callraw: true}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       cr.checkContainerd = time.Duration(100 * time.Millisecond)
+       c.Assert(err, IsNil)
+       cr.ListProcesses = lp
+
+       s.docker.fn = func(t *TestDockerClient) {
+               time.Sleep(1 * time.Second)
+               t.logWriter.Close()
+       }
+
+       err = cr.CreateContainer()
+       c.Check(err, IsNil)
+
+       err = cr.StartContainer()
+       c.Check(err, IsNil)
+
+       err = cr.WaitFinish()
+       return err
+
+}
+
+func (s *TestSuite) TestCheckContainerdPresent(c *C) {
+       err := s.helpCheckContainerd(c, func() ([]PsProcess, error) {
+               return []PsProcess{FakeProcess{[]string{"docker-containerd"}}}, nil
+       })
+       c.Check(err, IsNil)
+}
+
+func (s *TestSuite) TestCheckContainerdMissing(c *C) {
+       err := s.helpCheckContainerd(c, func() ([]PsProcess, error) {
+               return []PsProcess{FakeProcess{[]string{"abc"}}}, nil
+       })
+       c.Check(err, ErrorMatches, `'containerd' not found in process list.`)
+}
index 2ecc8726f5e54d91518b3f20c08eb8f1dec41852..c312a532e44f43d63fa65b1d6ff6e7af9028a924 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
 package main
 
 import (
index abac2bbecc6e997886283f11012d50a44a9beab9..13a171ae8416729cf67fd940a2170d871abc5bd1 100644 (file)
@@ -10,11 +10,14 @@ import (
        "testing"
        "time"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        . "gopkg.in/check.v1"
 )
 
-type LoggingTestSuite struct{}
+type LoggingTestSuite struct {
+       client *arvados.Client
+}
 
 type TestTimestamper struct {
        count int
@@ -32,11 +35,16 @@ func (this *TestTimestamper) Timestamp(t time.Time) string {
 // Gocheck boilerplate
 var _ = Suite(&LoggingTestSuite{})
 
+func (s *LoggingTestSuite) SetUpTest(c *C) {
+       s.client = arvados.NewClientFromEnv()
+}
+
 func (s *LoggingTestSuite) TestWriteLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
        cr.CrunchLog.Print("Hello world!")
@@ -45,7 +53,7 @@ func (s *LoggingTestSuite) TestWriteLogs(c *C) {
 
        c.Check(api.Calls, Equals, 1)
 
-       mt, err := cr.LogCollection.ManifestText()
+       mt, err := cr.LogCollection.MarshalManifest(".")
        c.Check(err, IsNil)
        c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
 
@@ -64,7 +72,8 @@ func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
        cr.CrunchLog.Immediate = nil
 
@@ -74,10 +83,10 @@ func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
        cr.CrunchLog.Print("Goodbye")
        cr.CrunchLog.Close()
 
-       c.Check(api.Calls > 1, Equals, true)
+       c.Check(api.Calls > 0, Equals, true)
        c.Check(api.Calls < 2000000, Equals, true)
 
-       mt, err := cr.LogCollection.ManifestText()
+       mt, err := cr.LogCollection.MarshalManifest(".")
        c.Check(err, IsNil)
        c.Check(mt, Equals, ". 9c2c05d1fae6aaa8af85113ba725716d+67108864 80b821383a07266c2a66a4566835e26e+21780065 0:88888929:crunch-run.txt\n")
 }
@@ -86,10 +95,13 @@ func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        ts := &TestTimestamper{}
        cr.CrunchLog.Timestamper = ts.Timestamp
-       stdout := NewThrottledLogger(cr.NewLogWriter("stdout"))
+       w, err := cr.NewLogWriter("stdout")
+       c.Assert(err, IsNil)
+       stdout := NewThrottledLogger(w)
        stdout.Timestamper = ts.Timestamp
 
        cr.CrunchLog.Print("Hello world!")
@@ -112,26 +124,24 @@ func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
 2015-12-29T15:51:45.000000004Z Blurb
 `)
 
-       mt, err := cr.LogCollection.ManifestText()
+       mt, err := cr.LogCollection.MarshalManifest(".")
        c.Check(err, IsNil)
-       c.Check(mt, Equals, ""+
-               ". 408672f5b5325f7d20edfbf899faee42+83 0:83:crunch-run.txt\n"+
-               ". c556a293010069fa79a6790a931531d5+80 0:80:stdout.txt\n")
+       c.Check(mt, Equals, ". 48f9023dc683a850b1c9b482b14c4b97+163 0:83:crunch-run.txt 83:80:stdout.txt\n")
 }
 
 func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytes(c *C) {
-       testWriteLogsWithRateLimit(c, "crunchLogThrottleBytes", 50, 65536, "Exceeded rate 50 bytes per 60 seconds")
+       s.testWriteLogsWithRateLimit(c, "crunchLogThrottleBytes", 50, 65536, "Exceeded rate 50 bytes per 60 seconds")
 }
 
 func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleLines(c *C) {
-       testWriteLogsWithRateLimit(c, "crunchLogThrottleLines", 1, 1024, "Exceeded rate 1 lines per 60 seconds")
+       s.testWriteLogsWithRateLimit(c, "crunchLogThrottleLines", 1, 1024, "Exceeded rate 1 lines per 60 seconds")
 }
 
 func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytesPerEvent(c *C) {
-       testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 50, 67108864, "Exceeded log limit 50 bytes (crunch_limit_log_bytes_per_job)")
+       s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 50, 67108864, "Exceeded log limit 50 bytes (crunch_limit_log_bytes_per_job)")
 }
 
-func testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, throttleDefault int, expected string) {
+func (s *LoggingTestSuite) testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, throttleDefault int, expected string) {
        discoveryMap[throttleParam] = float64(throttleValue)
        defer func() {
                discoveryMap[throttleParam] = float64(throttleDefault)
@@ -140,7 +150,8 @@ func testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, t
        api := &ArvTestClient{}
        kc := &KeepTestClient{}
        defer kc.Close()
-       cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
        cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
 
        cr.CrunchLog.Print("Hello world!")
@@ -149,7 +160,7 @@ func testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, t
 
        c.Check(api.Calls, Equals, 1)
 
-       mt, err := cr.LogCollection.ManifestText()
+       mt, err := cr.LogCollection.MarshalManifest(".")
        c.Check(err, IsNil)
        c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
 
diff --git a/services/crunch-run/upload.go b/services/crunch-run/upload.go
deleted file mode 100644 (file)
index ddad8bf..0000000
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-// Originally based on sdk/go/crunchrunner/upload.go
-//
-// Unlike the original, which iterates over a directory tree and uploads each
-// file sequentially, this version supports opening and writing multiple files
-// in a collection simultaneously.
-//
-// Eventually this should move into the Arvados Go SDK for a more comprehensive
-// implementation of Collections.
-
-import (
-       "bytes"
-       "crypto/md5"
-       "errors"
-       "fmt"
-       "io"
-       "log"
-       "os"
-       "path/filepath"
-       "strings"
-       "sync"
-
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       "git.curoverse.com/arvados.git/sdk/go/manifest"
-)
-
-// Block is a data block in a manifest stream
-type Block struct {
-       data   []byte
-       offset int64
-}
-
-// CollectionFileWriter is a Writer that permits writing to a file in a Keep Collection.
-type CollectionFileWriter struct {
-       IKeepClient
-       *manifest.ManifestStream
-       offset uint64
-       length uint64
-       *Block
-       uploader chan *Block
-       finish   chan []error
-       fn       string
-}
-
-// Write to a file in a keep collection
-func (m *CollectionFileWriter) Write(p []byte) (int, error) {
-       n, err := m.ReadFrom(bytes.NewReader(p))
-       return int(n), err
-}
-
-// ReadFrom a Reader and write to the Keep collection file.
-func (m *CollectionFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
-       var total int64
-       var count int
-
-       for err == nil {
-               if m.Block == nil {
-                       m.Block = &Block{make([]byte, keepclient.BLOCKSIZE), 0}
-               }
-               count, err = r.Read(m.Block.data[m.Block.offset:])
-               total += int64(count)
-               m.Block.offset += int64(count)
-               if m.Block.offset == keepclient.BLOCKSIZE {
-                       m.uploader <- m.Block
-                       m.Block = nil
-               }
-       }
-
-       m.length += uint64(total)
-
-       if err == io.EOF {
-               return total, nil
-       }
-       return total, err
-}
-
-// Close stops writing a file and adds it to the parent manifest.
-func (m *CollectionFileWriter) Close() error {
-       m.ManifestStream.FileStreamSegments = append(m.ManifestStream.FileStreamSegments,
-               manifest.FileStreamSegment{m.offset, m.length, m.fn})
-       return nil
-}
-
-func (m *CollectionFileWriter) NewFile(fn string) {
-       m.offset += m.length
-       m.length = 0
-       m.fn = fn
-}
-
-func (m *CollectionFileWriter) goUpload(workers chan struct{}) {
-       var mtx sync.Mutex
-       var wg sync.WaitGroup
-
-       var errors []error
-       uploader := m.uploader
-       finish := m.finish
-       for block := range uploader {
-               mtx.Lock()
-               m.ManifestStream.Blocks = append(m.ManifestStream.Blocks, "")
-               blockIndex := len(m.ManifestStream.Blocks) - 1
-               mtx.Unlock()
-
-               workers <- struct{}{} // wait for an available worker slot
-               wg.Add(1)
-
-               go func(block *Block, blockIndex int) {
-                       hash := fmt.Sprintf("%x", md5.Sum(block.data[0:block.offset]))
-                       signedHash, _, err := m.IKeepClient.PutHB(hash, block.data[0:block.offset])
-                       <-workers
-
-                       mtx.Lock()
-                       if err != nil {
-                               errors = append(errors, err)
-                       } else {
-                               m.ManifestStream.Blocks[blockIndex] = signedHash
-                       }
-                       mtx.Unlock()
-
-                       wg.Done()
-               }(block, blockIndex)
-       }
-       wg.Wait()
-
-       finish <- errors
-}
-
-// CollectionWriter implements creating new Keep collections by opening files
-// and writing to them.
-type CollectionWriter struct {
-       MaxWriters int
-       IKeepClient
-       Streams []*CollectionFileWriter
-       workers chan struct{}
-       mtx     sync.Mutex
-}
-
-// Open a new file for writing in the Keep collection.
-func (m *CollectionWriter) Open(path string) io.WriteCloser {
-       var dir string
-       var fn string
-
-       i := strings.Index(path, "/")
-       if i > -1 {
-               dir = "./" + path[0:i]
-               fn = path[i+1:]
-       } else {
-               dir = "."
-               fn = path
-       }
-
-       fw := &CollectionFileWriter{
-               m.IKeepClient,
-               &manifest.ManifestStream{StreamName: dir},
-               0,
-               0,
-               nil,
-               make(chan *Block),
-               make(chan []error),
-               fn}
-
-       m.mtx.Lock()
-       defer m.mtx.Unlock()
-       if m.workers == nil {
-               if m.MaxWriters < 1 {
-                       m.MaxWriters = 2
-               }
-               m.workers = make(chan struct{}, m.MaxWriters)
-       }
-
-       go fw.goUpload(m.workers)
-
-       m.Streams = append(m.Streams, fw)
-
-       return fw
-}
-
-// Finish writing the collection, wait for all blocks to complete uploading.
-func (m *CollectionWriter) Finish() error {
-       var errstring string
-       m.mtx.Lock()
-       defer m.mtx.Unlock()
-
-       for _, stream := range m.Streams {
-               if stream.uploader == nil {
-                       continue
-               }
-               if stream.Block != nil {
-                       stream.uploader <- stream.Block
-                       stream.Block = nil
-               }
-               close(stream.uploader)
-               stream.uploader = nil
-
-               errors := <-stream.finish
-               close(stream.finish)
-               stream.finish = nil
-
-               for _, r := range errors {
-                       errstring = fmt.Sprintf("%v%v\n", errstring, r.Error())
-               }
-       }
-       if errstring != "" {
-               return errors.New(errstring)
-       }
-       return nil
-}
-
-// ManifestText returns the manifest text of the collection.  Calls Finish()
-// first to ensure that all blocks are written and that signed locators and
-// available.
-func (m *CollectionWriter) ManifestText() (mt string, err error) {
-       err = m.Finish()
-       if err != nil {
-               return "", err
-       }
-
-       var buf bytes.Buffer
-
-       m.mtx.Lock()
-       defer m.mtx.Unlock()
-       for _, v := range m.Streams {
-               if len(v.FileStreamSegments) == 0 {
-                       continue
-               }
-               k := v.StreamName
-               if k == "." {
-                       buf.WriteString(".")
-               } else {
-                       k = strings.Replace(k, " ", "\\040", -1)
-                       k = strings.Replace(k, "\n", "", -1)
-                       buf.WriteString("./" + k)
-               }
-               if len(v.Blocks) > 0 {
-                       for _, b := range v.Blocks {
-                               buf.WriteString(" ")
-                               buf.WriteString(b)
-                       }
-               } else {
-                       buf.WriteString(" d41d8cd98f00b204e9800998ecf8427e+0")
-               }
-               for _, f := range v.FileStreamSegments {
-                       buf.WriteString(" ")
-                       name := strings.Replace(f.Name, " ", "\\040", -1)
-                       name = strings.Replace(name, "\n", "", -1)
-                       buf.WriteString(fmt.Sprintf("%v:%v:%v", f.SegPos, f.SegLen, name))
-               }
-               buf.WriteString("\n")
-       }
-       return buf.String(), nil
-}
-
-type WalkUpload struct {
-       MaxWriters  int
-       kc          IKeepClient
-       stripPrefix string
-       streamMap   map[string]*CollectionFileWriter
-       status      *log.Logger
-       workers     chan struct{}
-       mtx         sync.Mutex
-}
-
-func (m *WalkUpload) UploadFile(path string, sourcePath string) error {
-       var dir string
-       basename := filepath.Base(path)
-       if len(path) > (len(m.stripPrefix) + len(basename) + 1) {
-               dir = path[len(m.stripPrefix)+1 : (len(path) - len(basename) - 1)]
-       }
-       if dir == "" {
-               dir = "."
-       }
-
-       fn := path[(len(path) - len(basename)):]
-
-       info, err := os.Stat(sourcePath)
-       if err != nil {
-               return err
-       }
-       file, err := os.Open(sourcePath)
-       if err != nil {
-               return err
-       }
-       defer file.Close()
-
-       if m.streamMap[dir] == nil {
-               m.streamMap[dir] = &CollectionFileWriter{
-                       m.kc,
-                       &manifest.ManifestStream{StreamName: dir},
-                       0,
-                       0,
-                       nil,
-                       make(chan *Block),
-                       make(chan []error),
-                       ""}
-
-               m.mtx.Lock()
-               if m.workers == nil {
-                       if m.MaxWriters < 1 {
-                               m.MaxWriters = 2
-                       }
-                       m.workers = make(chan struct{}, m.MaxWriters)
-               }
-               m.mtx.Unlock()
-
-               go m.streamMap[dir].goUpload(m.workers)
-       }
-
-       fileWriter := m.streamMap[dir]
-
-       // Reset the CollectionFileWriter for a new file
-       fileWriter.NewFile(fn)
-
-       m.status.Printf("Uploading %v/%v (%v bytes)", dir, fn, info.Size())
-
-       _, err = io.Copy(fileWriter, file)
-       if err != nil {
-               m.status.Printf("Uh oh")
-               return err
-       }
-
-       // Commits the current file.  Legal to call this repeatedly.
-       fileWriter.Close()
-
-       return nil
-}
-
-func (cw *CollectionWriter) BeginUpload(root string, status *log.Logger) *WalkUpload {
-       streamMap := make(map[string]*CollectionFileWriter)
-       return &WalkUpload{0, cw.IKeepClient, root, streamMap, status, nil, sync.Mutex{}}
-}
-
-func (cw *CollectionWriter) EndUpload(wu *WalkUpload) {
-       cw.mtx.Lock()
-       for _, st := range wu.streamMap {
-               cw.Streams = append(cw.Streams, st)
-       }
-       cw.mtx.Unlock()
-}
diff --git a/services/crunch-run/upload_test.go b/services/crunch-run/upload_test.go
deleted file mode 100644 (file)
index 24333c3..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
-       "io/ioutil"
-       "log"
-       "os"
-       "path/filepath"
-       "sync"
-       "syscall"
-
-       . "gopkg.in/check.v1"
-)
-
-type UploadTestSuite struct{}
-
-// Gocheck boilerplate
-var _ = Suite(&UploadTestSuite{})
-
-func writeTree(cw *CollectionWriter, root string, status *log.Logger) (mt string, err error) {
-       walkUpload := cw.BeginUpload(root, status)
-
-       err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
-               info, _ = os.Stat(path)
-               if info.Mode().IsRegular() {
-                       return walkUpload.UploadFile(path, path)
-               }
-               return nil
-       })
-
-       cw.EndUpload(walkUpload)
-       if err != nil {
-               return "", err
-       }
-       mt, err = cw.ManifestText()
-       return
-}
-
-func (s *TestSuite) TestSimpleUpload(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-       c.Check(err, IsNil)
-       c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
-}
-
-func (s *TestSuite) TestUploadThreeFiles(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       for _, err := range []error{
-               ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600),
-               ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600),
-               os.Symlink("./file2.txt", tmpdir+"/file3.txt"),
-               syscall.Mkfifo(tmpdir+"/ignore.fifo", 0600),
-       } {
-               c.Assert(err, IsNil)
-       }
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, IsNil)
-       c.Check(str, Equals, ". aa65a413921163458c52fea478d5d3ee+9 0:3:file1.txt 3:3:file2.txt 6:3:file3.txt\n")
-}
-
-func (s *TestSuite) TestSimpleUploadSubdir(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       os.Mkdir(tmpdir+"/subdir", 0700)
-
-       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
-       ioutil.WriteFile(tmpdir+"/subdir/file2.txt", []byte("bar"), 0600)
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, IsNil)
-
-       // streams can get added in either order because of scheduling
-       // of goroutines.
-       if str != `. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt
-./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:file2.txt
-` && str != `./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:file2.txt
-. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt
-` {
-               c.Error("Did not get expected manifest text")
-       }
-}
-
-func (s *TestSuite) TestSimpleUploadLarge(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       file, _ := os.Create(tmpdir + "/" + "file1.txt")
-       data := make([]byte, 1024*1024-1)
-       for i := range data {
-               data[i] = byte(i % 10)
-       }
-       for i := 0; i < 65; i++ {
-               file.Write(data)
-       }
-       file.Close()
-
-       ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, IsNil)
-       c.Check(str, Equals, ". 00ecf01e0d93385115c9f8bed757425d+67108864 485cd630387b6b1846fe429f261ea05f+1048514 0:68157375:file1.txt 68157375:3:file2.txt\n")
-}
-
-func (s *TestSuite) TestUploadEmptySubdir(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       os.Mkdir(tmpdir+"/subdir", 0700)
-
-       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, IsNil)
-       c.Check(str, Equals, `. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt
-`)
-}
-
-func (s *TestSuite) TestUploadEmptyFile(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte(""), 0600)
-
-       kc := &KeepTestClient{}
-       defer kc.Close()
-       cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, IsNil)
-       c.Check(str, Equals, `. d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1.txt
-`)
-}
-
-func (s *TestSuite) TestUploadError(c *C) {
-       tmpdir, _ := ioutil.TempDir("", "")
-       defer func() {
-               os.RemoveAll(tmpdir)
-       }()
-
-       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
-
-       cw := CollectionWriter{0, &KeepErrorTestClient{}, nil, nil, sync.Mutex{}}
-       str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
-
-       c.Check(err, NotNil)
-       c.Check(str, Equals, "")
-}
index 34fd594be2dd6ce2793eebe1ac884c92aa8fdd58..769771e7beb52cafd5fa11033921973de501e54c 100644 (file)
@@ -609,12 +609,13 @@ class MagicDirectory(Directory):
     README_TEXT = """
 This directory provides access to Arvados collections as subdirectories listed
 by uuid (in the form 'zzzzz-4zz18-1234567890abcde') or portable data hash (in
-the form '1234567890abcdef0123456789abcdef+123').
+the form '1234567890abcdef0123456789abcdef+123'), and Arvados projects by uuid
+(in the form 'zzzzz-j7d0g-1234567890abcde').
 
 Note that this directory will appear empty until you attempt to access a
-specific collection subdirectory (such as trying to 'cd' into it), at which
-point the collection will actually be looked up on the server and the directory
-will appear if it exists.
+specific collection or project subdirectory (such as trying to 'cd' into it),
+at which point the collection or project will actually be looked up on the server
+and the directory will appear if it exists.
 
 """.lstrip()
 
@@ -645,8 +646,17 @@ will appear if it exists.
 
         try:
             e = None
-            e = self.inodes.add_entry(CollectionDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries, k))
+
+            if group_uuid_pattern.match(k):
+                project = self.api.groups().list(
+                    filters=[['group_class', '=', 'project'], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+                if project[u'items_available'] == 0:
+                    return False
+                e = self.inodes.add_entry(ProjectDirectory(
+                    self.inode, self.inodes, self.api, self.num_retries, project[u'items'][0]))
+            else:
+                e = self.inodes.add_entry(CollectionDirectory(
+                        self.inode, self.inodes, self.api, self.num_retries, k))
 
             if e.update():
                 if k not in self._entries:
@@ -829,7 +839,7 @@ class ProjectDirectory(Directory):
             self.inodes.add_entry(self.project_object_file)
 
         if not self._full_listing:
-            return
+            return True
 
         def samefn(a, i):
             if isinstance(a, CollectionDirectory) or isinstance(a, ProjectDirectory):
@@ -865,6 +875,7 @@ class ProjectDirectory(Directory):
                        self.namefn,
                        samefn,
                        self.createDirectory)
+            return True
         finally:
             self._updating_lock.release()
 
index 9fd25863ed597822c41b54b40c7371e57ed24275..5a1aa809146db0f4b5a89e32390877963302e9e6 100644 (file)
@@ -39,9 +39,10 @@ setup(name='arvados_fuse',
       ],
       install_requires=[
         'arvados-python-client >= 0.1.20151118035730',
-        'llfuse>=1.2',
+        # llfuse 1.3.4 fails to install via pip
+        'llfuse >=1.2, <1.3.4',
         'python-daemon',
-        'ciso8601',
+        'ciso8601 >=1.0.6, <2.0.0',
         'setuptools'
         ],
       test_suite='tests',
index 7ee20f024d4f6c09f54235e33f802907994cf1a9..fb282d1aaa76a91cd58b9a5a15a28e7b263a1c4c 100644 (file)
@@ -121,6 +121,10 @@ class FuseMagicTest(MountTestBase):
     def setUp(self, api=None):
         super(FuseMagicTest, self).setUp(api=api)
 
+        self.test_project = run_test_server.fixture('groups')['aproject']['uuid']
+        self.non_project_group = run_test_server.fixture('groups')['public']['uuid']
+        self.collection_in_test_project = run_test_server.fixture('collections')['foo_collection_in_aproject']['name']
+
         cw = arvados.CollectionWriter()
 
         cw.start_new_file('thing1.txt')
@@ -139,15 +143,26 @@ class FuseMagicTest(MountTestBase):
         self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or
                              arvados.util.uuid_pattern.match(fn)
                              for fn in mount_ls),
-                         "new FUSE MagicDirectory lists Collection")
+                         "new FUSE MagicDirectory has no collections or projects")
         self.assertDirContents(self.testcollection, ['thing1.txt'])
         self.assertDirContents(os.path.join('by_id', self.testcollection),
                                ['thing1.txt'])
+        self.assertIn(self.collection_in_test_project,
+                      llfuse.listdir(os.path.join(self.mounttmp, self.test_project)))
+        self.assertIn(self.collection_in_test_project,
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.test_project)))
+
         mount_ls = llfuse.listdir(self.mounttmp)
         self.assertIn('README', mount_ls)
         self.assertIn(self.testcollection, mount_ls)
         self.assertIn(self.testcollection,
                       llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
+        self.assertIn(self.test_project, mount_ls)
+        self.assertIn(self.test_project, 
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
+
+        with self.assertRaises(OSError):
+            llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.non_project_group))
 
         files = {}
         files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
index 496fb884d433a5eea350d27818d5e72629e9242f..1d2ec47a6af41d39907d62105fcb898e462ff82a 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
 package main
 
 import (
@@ -37,7 +41,7 @@ func main() {
        if err != nil {
                log.Fatal(err)
        }
-       nodeCfg, err := clusterCfg.GetThisSystemNode()
+       nodeCfg, err := clusterCfg.GetNodeProfile("")
        if err != nil {
                log.Fatal(err)
        }
index 32f36e02980be3420ff04d0e2d4c91a0c591c676..d86234a936cc96702f3a79d12c10d04548c0faa2 100644 (file)
@@ -5,6 +5,8 @@
 package main
 
 import (
+       "bytes"
+       "crypto/md5"
        "fmt"
        "log"
        "math"
@@ -48,10 +50,14 @@ type Balancer struct {
        Dumper             *log.Logger
        MinMtime           int64
 
-       collScanned  int
-       serviceRoots map[string]string
-       errors       []error
-       mutex        sync.Mutex
+       classes       []string
+       mounts        int
+       mountsByClass map[string]map[*KeepMount]bool
+       collScanned   int
+       serviceRoots  map[string]string
+       errors        []error
+       stats         balancerStats
+       mutex         sync.Mutex
 }
 
 // Run performs a balance operation using the given config and
@@ -82,12 +88,14 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions R
        if err != nil {
                return
        }
+
        for _, srv := range bal.KeepServices {
                err = srv.discoverMounts(&config.Client)
                if err != nil {
                        return
                }
        }
+       bal.cleanupMounts()
 
        if err = bal.CheckSanityEarly(&config.Client); err != nil {
                return
@@ -162,6 +170,38 @@ func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) e
        })
 }
 
+func (bal *Balancer) cleanupMounts() {
+       rwdev := map[string]*KeepService{}
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       if !mnt.ReadOnly && mnt.DeviceID != "" {
+                               rwdev[mnt.DeviceID] = srv
+                       }
+               }
+       }
+       // Drop the readonly mounts whose device is mounted RW
+       // elsewhere.
+       for _, srv := range bal.KeepServices {
+               var dedup []*KeepMount
+               for _, mnt := range srv.mounts {
+                       if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
+                               bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
+                       } else {
+                               dedup = append(dedup, mnt)
+                       }
+               }
+               srv.mounts = dedup
+       }
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       if mnt.Replication <= 0 {
+                               log.Printf("%s: mount %s reports replication=%d, using replication=1", srv, mnt.UUID, mnt.Replication)
+                               mnt.Replication = 1
+                       }
+               }
+       }
+}
+
 // CheckSanityEarly checks for configuration and runtime errors that
 // can be detected before GetCurrentState() and ComputeChangeSets()
 // are called.
@@ -242,32 +282,54 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
        errs := make(chan error, 2+len(bal.KeepServices))
        wg := sync.WaitGroup{}
 
-       // Start one goroutine for each KeepService: retrieve the
-       // index, and add the returned blocks to BlockStateMap.
+       // When a device is mounted more than once, we will get its
+       // index only once, and call AddReplicas on all of the mounts.
+       // equivMount keys are the mounts that will be indexed, and
+       // each value is a list of mounts to apply the received index
+       // to.
+       equivMount := map[*KeepMount][]*KeepMount{}
+       // deviceMount maps each device ID to the one mount that will
+       // be indexed for that device.
+       deviceMount := map[string]*KeepMount{}
        for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       equiv := deviceMount[mnt.DeviceID]
+                       if equiv == nil {
+                               equiv = mnt
+                               if mnt.DeviceID != "" {
+                                       deviceMount[mnt.DeviceID] = equiv
+                               }
+                       }
+                       equivMount[equiv] = append(equivMount[equiv], mnt)
+               }
+       }
+
+       // Start one goroutine for each (non-redundant) mount:
+       // retrieve the index, and add the returned blocks to
+       // BlockStateMap.
+       for _, mounts := range equivMount {
                wg.Add(1)
-               go func(srv *KeepService) {
+               go func(mounts []*KeepMount) {
                        defer wg.Done()
-                       bal.logf("%s: retrieve indexes", srv)
-                       for _, mount := range srv.mounts {
-                               bal.logf("%s: retrieve index", mount)
-                               idx, err := srv.IndexMount(c, mount.UUID, "")
-                               if err != nil {
-                                       errs <- fmt.Errorf("%s: retrieve index: %v", mount, err)
-                                       return
-                               }
-                               if len(errs) > 0 {
-                                       // Some other goroutine encountered an
-                                       // error -- any further effort here
-                                       // will be wasted.
-                                       return
-                               }
+                       bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
+                       idx, err := mounts[0].KeepService.IndexMount(c, mounts[0].UUID, "")
+                       if err != nil {
+                               errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err)
+                               return
+                       }
+                       if len(errs) > 0 {
+                               // Some other goroutine encountered an
+                               // error -- any further effort here
+                               // will be wasted.
+                               return
+                       }
+                       for _, mount := range mounts {
                                bal.logf("%s: add %d replicas to map", mount, len(idx))
                                bal.BlockStateMap.AddReplicas(mount, idx)
-                               bal.logf("%s: done", mount)
+                               bal.logf("%s: added %d replicas", mount, len(idx))
                        }
-                       bal.logf("%s: done", srv)
-               }(srv)
+                       bal.logf("mount %s: index done", mounts[0])
+               }(mounts)
        }
 
        // collQ buffers incoming collections so we can start fetching
@@ -338,7 +400,7 @@ func (bal *Balancer) addCollection(coll arvados.Collection) error {
                repl = *coll.ReplicationDesired
        }
        debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
-       bal.BlockStateMap.IncreaseDesired(repl, blkids)
+       bal.BlockStateMap.IncreaseDesired(coll.StorageClassesDesired, repl, blkids)
        return nil
 }
 
@@ -352,39 +414,75 @@ func (bal *Balancer) ComputeChangeSets() {
        // This just calls balanceBlock() once for each block, using a
        // pool of worker goroutines.
        defer timeMe(bal.Logger, "ComputeChangeSets")()
-       bal.setupServiceRoots()
+       bal.setupLookupTables()
 
        type balanceTask struct {
                blkid arvados.SizedDigest
                blk   *BlockState
        }
-       nWorkers := 1 + runtime.NumCPU()
-       todo := make(chan balanceTask, nWorkers)
-       var wg sync.WaitGroup
-       for i := 0; i < nWorkers; i++ {
-               wg.Add(1)
-               go func() {
-                       for work := range todo {
-                               bal.balanceBlock(work.blkid, work.blk)
+       workers := runtime.GOMAXPROCS(-1)
+       todo := make(chan balanceTask, workers)
+       go func() {
+               bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
+                       todo <- balanceTask{
+                               blkid: blkid,
+                               blk:   blk,
                        }
-                       wg.Done()
-               }()
-       }
-       bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
-               todo <- balanceTask{
-                       blkid: blkid,
-                       blk:   blk,
+               })
+               close(todo)
+       }()
+       results := make(chan balanceResult, workers)
+       go func() {
+               var wg sync.WaitGroup
+               for i := 0; i < workers; i++ {
+                       wg.Add(1)
+                       go func() {
+                               for work := range todo {
+                                       results <- bal.balanceBlock(work.blkid, work.blk)
+                               }
+                               wg.Done()
+                       }()
                }
-       })
-       close(todo)
-       wg.Wait()
+               wg.Wait()
+               close(results)
+       }()
+       bal.collectStatistics(results)
 }
 
-func (bal *Balancer) setupServiceRoots() {
+func (bal *Balancer) setupLookupTables() {
        bal.serviceRoots = make(map[string]string)
+       bal.classes = []string{"default"}
+       bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
+       bal.mounts = 0
        for _, srv := range bal.KeepServices {
                bal.serviceRoots[srv.UUID] = srv.UUID
+               for _, mnt := range srv.mounts {
+                       bal.mounts++
+
+                       // All mounts on a read-only service are
+                       // effectively read-only.
+                       mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+
+                       if len(mnt.StorageClasses) == 0 {
+                               bal.mountsByClass["default"][mnt] = true
+                               continue
+                       }
+                       for _, class := range mnt.StorageClasses {
+                               if mbc := bal.mountsByClass[class]; mbc == nil {
+                                       bal.classes = append(bal.classes, class)
+                                       bal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}
+                               } else {
+                                       mbc[mnt] = true
+                               }
+                       }
+               }
        }
+       // Consider classes in lexicographic order to avoid flapping
+       // between balancing runs.  The outcome of the "prefer a mount
+       // we're already planning to use for a different storage
+       // class" case in balanceBlock depends on the order classes
+       // are considered.
+       sort.Strings(bal.classes)
 }
 
 const (
@@ -401,129 +499,276 @@ var changeName = map[int]string{
        changeNone:  "none",
 }
 
+type balanceResult struct {
+       blk        *BlockState
+       blkid      arvados.SizedDigest
+       have       int
+       want       int
+       classState map[string]balancedBlockState
+}
+
 // balanceBlock compares current state to desired state for a single
 // block, and makes the appropriate ChangeSet calls.
-func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
+func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
        debugf("balanceBlock: %v %+v", blkid, blk)
 
-       // A slot is somewhere a replica could potentially be trashed
-       // from, pulled from, or pulled to. Each KeepService gets
-       // either one empty slot, or one or more non-empty slots.
        type slot struct {
-               srv  *KeepService // never nil
-               repl *Replica     // nil if none found
+               mnt  *KeepMount // never nil
+               repl *Replica   // replica already stored here (or nil)
+               want bool       // we should pull/leave a replica here
        }
 
-       // First, we build an ordered list of all slots worth
-       // considering (including all slots where replicas have been
-       // found, as well as all of the optimal slots for this block).
-       // Then, when we consider each slot in that order, we will
-       // have all of the information we need to make a decision
-       // about that slot.
+       // Build a list of all slots (one per mounted volume).
+       slots := make([]slot, 0, bal.mounts)
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       var repl *Replica
+                       for r := range blk.Replicas {
+                               if blk.Replicas[r].KeepMount == mnt {
+                                       repl = &blk.Replicas[r]
+                               }
+                       }
+                       // Initial value of "want" is "have, and can't
+                       // delete". These untrashable replicas get
+                       // prioritized when sorting slots: otherwise,
+                       // non-optimal readonly copies would cause us
+                       // to overreplicate.
+                       slots = append(slots, slot{
+                               mnt:  mnt,
+                               repl: repl,
+                               want: repl != nil && (mnt.ReadOnly || repl.Mtime >= bal.MinMtime),
+                       })
+               }
+       }
 
        uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
-       rendezvousOrder := make(map[*KeepService]int, len(uuids))
-       slots := make([]slot, len(uuids))
+       srvRendezvous := make(map[*KeepService]int, len(uuids))
        for i, uuid := range uuids {
                srv := bal.KeepServices[uuid]
-               rendezvousOrder[srv] = i
-               slots[i].srv = srv
-       }
-
-       // Sort readonly replicas ahead of trashable ones. This way,
-       // if a single service has excessive replicas, the ones we
-       // encounter last (and therefore choose to delete) will be on
-       // the writable volumes, where possible.
-       //
-       // TODO: within the trashable set, prefer the oldest replica
-       // that doesn't have a timestamp collision with others.
-       sort.Slice(blk.Replicas, func(i, j int) bool {
-               mnt := blk.Replicas[i].KeepMount
-               return mnt.ReadOnly || mnt.KeepService.ReadOnly
-       })
+               srvRendezvous[srv] = i
+       }
+
+       // Below we set underreplicated=true if we find any storage
+       // class that's currently underreplicated -- in that case we
+       // won't want to trash any replicas.
+       underreplicated := false
+
+       classState := make(map[string]balancedBlockState, len(bal.classes))
+       unsafeToDelete := make(map[int64]bool, len(slots))
+       for _, class := range bal.classes {
+               desired := blk.Desired[class]
+
+               countedDev := map[string]bool{}
+               have := 0
+               for _, slot := range slots {
+                       if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
+                               have += slot.mnt.Replication
+                               if slot.mnt.DeviceID != "" {
+                                       countedDev[slot.mnt.DeviceID] = true
+                               }
+                       }
+               }
+               classState[class] = balancedBlockState{
+                       desired: desired,
+                       surplus: have - desired,
+               }
+
+               if desired == 0 {
+                       continue
+               }
+
+               // Sort the slots by desirability.
+               sort.Slice(slots, func(i, j int) bool {
+                       si, sj := slots[i], slots[j]
+                       if classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {
+                               // Prefer a mount that satisfies the
+                               // desired class.
+                               return bal.mountsByClass[class][si.mnt]
+                       } else if wanti, wantj := si.want, si.want; wanti != wantj {
+                               // Prefer a mount that will have a
+                               // replica no matter what we do here
+                               // -- either because it already has an
+                               // untrashable replica, or because we
+                               // already need it to satisfy a
+                               // different storage class.
+                               return slots[i].want
+                       } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
+                               // Prefer a better rendezvous
+                               // position.
+                               return orderi < orderj
+                       } else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {
+                               // Prefer a mount that already has a
+                               // replica.
+                               return repli
+                       } else {
+                               // If pull/trash turns out to be
+                               // needed, distribute the
+                               // new/remaining replicas uniformly
+                               // across qualifying mounts on a given
+                               // server.
+                               return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
+                       }
+               })
+
+               // Servers/mounts/devices (with or without existing
+               // replicas) that are part of the best achievable
+               // layout for this storage class.
+               wantSrv := map[*KeepService]bool{}
+               wantMnt := map[*KeepMount]bool{}
+               wantDev := map[string]bool{}
+               // Positions (with existing replicas) that have been
+               // protected (via unsafeToDelete) to ensure we don't
+               // reduce replication below desired level when
+               // trashing replicas that aren't optimal positions for
+               // any storage class.
+               protMnt := map[*KeepMount]bool{}
+               // Replication planned so far (corresponds to wantMnt).
+               replWant := 0
+               // Protected replication (corresponds to protMnt).
+               replProt := 0
+
+               // trySlot tries using a slot to meet requirements,
+               // and returns true if all requirements are met.
+               trySlot := func(i int) bool {
+                       slot := slots[i]
+                       if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
+                               // Already allocated a replica to this
+                               // backend device, possibly on a
+                               // different server.
+                               return false
+                       }
+                       if replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {
+                               unsafeToDelete[slot.repl.Mtime] = true
+                               protMnt[slot.mnt] = true
+                               replProt += slot.mnt.Replication
+                       }
+                       if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+                               slots[i].want = true
+                               wantSrv[slot.mnt.KeepService] = true
+                               wantMnt[slot.mnt] = true
+                               if slot.mnt.DeviceID != "" {
+                                       wantDev[slot.mnt.DeviceID] = true
+                               }
+                               replWant += slot.mnt.Replication
+                       }
+                       return replProt >= desired && replWant >= desired
+               }
+
+               // First try to achieve desired replication without
+               // using the same server twice.
+               done := false
+               for i := 0; i < len(slots) && !done; i++ {
+                       if !wantSrv[slots[i].mnt.KeepService] {
+                               done = trySlot(i)
+                       }
+               }
+
+               // If that didn't suffice, do another pass without the
+               // "distinct services" restriction. (Achieving the
+               // desired volume replication on fewer than the
+               // desired number of services is better than
+               // underreplicating.)
+               for i := 0; i < len(slots) && !done; i++ {
+                       done = trySlot(i)
+               }
+
+               if !underreplicated {
+                       safe := 0
+                       for _, slot := range slots {
+                               if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
+                                       continue
+                               }
+                               if safe += slot.mnt.Replication; safe >= desired {
+                                       break
+                               }
+                       }
+                       underreplicated = safe < desired
+               }
+
+               // set the unachievable flag if there aren't enough
+               // slots offering the relevant storage class. (This is
+               // as easy as checking slots[desired] because we
+               // already sorted the qualifying slots to the front.)
+               if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
+                       cs := classState[class]
+                       cs.unachievable = true
+                       classState[class] = cs
+               }
+
+               // Avoid deleting wanted replicas from devices that
+               // are mounted on multiple servers -- even if they
+               // haven't already been added to unsafeToDelete
+               // because the servers report different Mtimes.
+               for _, slot := range slots {
+                       if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
+                               unsafeToDelete[slot.repl.Mtime] = true
+                       }
+               }
+       }
+
+       // TODO: If multiple replicas are trashable, prefer the oldest
+       // replica that doesn't have a timestamp collision with
+       // others.
+
+       countedDev := map[string]bool{}
+       var have, want int
+       for _, slot := range slots {
+               if countedDev[slot.mnt.DeviceID] {
+                       continue
+               }
+               if slot.want {
+                       want += slot.mnt.Replication
+               }
+               if slot.repl != nil {
+                       have += slot.mnt.Replication
+               }
+               if slot.mnt.DeviceID != "" {
+                       countedDev[slot.mnt.DeviceID] = true
+               }
+       }
 
-       // Assign existing replicas to slots.
-       for ri := range blk.Replicas {
-               repl := &blk.Replicas[ri]
-               srv := repl.KeepService
-               slotIdx := rendezvousOrder[srv]
-               if slots[slotIdx].repl != nil {
-                       // Additional replicas on a single server are
-                       // considered non-optimal. Within this
-                       // category, we don't try to optimize layout:
-                       // we just say the optimal order is the order
-                       // we encounter them.
-                       slotIdx = len(slots)
-                       slots = append(slots, slot{srv: srv})
-               }
-               slots[slotIdx].repl = repl
-       }
-
-       // number of replicas already found in positions better than
-       // the position we're contemplating now.
-       reportedBestRepl := 0
-       // To be safe we assume two replicas with the same Mtime are
-       // in fact the same replica being reported more than
-       // once. len(uniqueBestRepl) is the number of distinct
-       // replicas in the best rendezvous positions we've considered
-       // so far.
-       uniqueBestRepl := make(map[int64]bool, len(bal.serviceRoots))
-       // pulls is the number of Pull changes we have already
-       // requested. (For purposes of deciding whether to Pull to
-       // rendezvous position N, we should assume all pulls we have
-       // requested on rendezvous positions M<N will be successful.)
-       pulls := 0
        var changes []string
        for _, slot := range slots {
-               change := changeNone
-               srv, repl := slot.srv, slot.repl
                // TODO: request a Touch if Mtime is duplicated.
-               if repl != nil {
-                       // This service has a replica. We should
-                       // delete it if [1] we already have enough
-                       // distinct replicas in better rendezvous
-                       // positions and [2] this replica's Mtime is
-                       // distinct from all of the better replicas'
-                       // Mtimes.
-                       if !srv.ReadOnly &&
-                               !repl.KeepMount.ReadOnly &&
-                               repl.Mtime < bal.MinMtime &&
-                               len(uniqueBestRepl) >= blk.Desired &&
-                               !uniqueBestRepl[repl.Mtime] {
-                               srv.AddTrash(Trash{
-                                       SizedDigest: blkid,
-                                       Mtime:       repl.Mtime,
-                               })
-                               change = changeTrash
-                       } else {
-                               change = changeStay
-                       }
-                       uniqueBestRepl[repl.Mtime] = true
-                       reportedBestRepl++
-               } else if pulls+reportedBestRepl < blk.Desired &&
-                       len(blk.Replicas) > 0 &&
-                       !srv.ReadOnly {
-                       // This service doesn't have a replica. We
-                       // should pull one to this server if we don't
-                       // already have enough (existing+requested)
-                       // replicas in better rendezvous positions.
-                       srv.AddPull(Pull{
+               var change int
+               switch {
+               case !underreplicated && slot.repl != nil && !slot.want && !unsafeToDelete[slot.repl.Mtime]:
+                       slot.mnt.KeepService.AddTrash(Trash{
+                               SizedDigest: blkid,
+                               Mtime:       slot.repl.Mtime,
+                               From:        slot.mnt,
+                       })
+                       change = changeTrash
+               case len(blk.Replicas) == 0:
+                       change = changeNone
+               case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+                       slot.mnt.KeepService.AddPull(Pull{
                                SizedDigest: blkid,
-                               Source:      blk.Replicas[0].KeepService,
+                               From:        blk.Replicas[0].KeepMount.KeepService,
+                               To:          slot.mnt,
                        })
-                       pulls++
                        change = changePull
+               default:
+                       change = changeStay
                }
                if bal.Dumper != nil {
                        var mtime int64
-                       if repl != nil {
-                               mtime = repl.Mtime
+                       if slot.repl != nil {
+                               mtime = slot.repl.Mtime
                        }
-                       changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], mtime))
+                       srv := slot.mnt.KeepService
+                       changes = append(changes, fmt.Sprintf("%s:%d/%s=%s,%d", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))
                }
        }
        if bal.Dumper != nil {
-               bal.Dumper.Printf("%s have=%d want=%d %s", blkid, len(blk.Replicas), blk.Desired, strings.Join(changes, " "))
+               bal.Dumper.Printf("%s have=%d want=%v %s", blkid, have, want, strings.Join(changes, " "))
+       }
+       return balanceResult{
+               blk:        blk,
+               blkid:      blkid,
+               have:       have,
+               want:       want,
+               classState: classState,
        }
 }
 
@@ -538,29 +783,77 @@ func (bb blocksNBytes) String() string {
 }
 
 type balancerStats struct {
-       lost, overrep, unref, garbage, underrep, justright blocksNBytes
-       desired, current                                   blocksNBytes
-       pulls, trashes                                     int
-       replHistogram                                      []int
+       lost          blocksNBytes
+       overrep       blocksNBytes
+       unref         blocksNBytes
+       garbage       blocksNBytes
+       underrep      blocksNBytes
+       unachievable  blocksNBytes
+       justright     blocksNBytes
+       desired       blocksNBytes
+       current       blocksNBytes
+       pulls         int
+       trashes       int
+       replHistogram []int
+       classStats    map[string]replicationStats
+}
+
+type replicationStats struct {
+       desired      blocksNBytes
+       surplus      blocksNBytes
+       short        blocksNBytes
+       unachievable blocksNBytes
 }
 
-func (bal *Balancer) getStatistics() (s balancerStats) {
+type balancedBlockState struct {
+       desired      int
+       surplus      int
+       unachievable bool
+}
+
+func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
+       var s balancerStats
        s.replHistogram = make([]int, 2)
-       bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
-               surplus := len(blk.Replicas) - blk.Desired
-               bytes := blkid.Size()
+       s.classStats = make(map[string]replicationStats, len(bal.classes))
+       for result := range results {
+               surplus := result.have - result.want
+               bytes := result.blkid.Size()
+
+               for class, state := range result.classState {
+                       cs := s.classStats[class]
+                       if state.unachievable {
+                               cs.unachievable.blocks++
+                               cs.unachievable.bytes += bytes
+                       }
+                       if state.desired > 0 {
+                               cs.desired.replicas += state.desired
+                               cs.desired.blocks++
+                               cs.desired.bytes += bytes * int64(state.desired)
+                       }
+                       if state.surplus > 0 {
+                               cs.surplus.replicas += state.surplus
+                               cs.surplus.blocks++
+                               cs.surplus.bytes += bytes * int64(state.surplus)
+                       } else if state.surplus < 0 {
+                               cs.short.replicas += -state.surplus
+                               cs.short.blocks++
+                               cs.short.bytes += bytes * int64(-state.surplus)
+                       }
+                       s.classStats[class] = cs
+               }
+
                switch {
-               case len(blk.Replicas) == 0 && blk.Desired > 0:
+               case result.have == 0 && result.want > 0:
                        s.lost.replicas -= surplus
                        s.lost.blocks++
                        s.lost.bytes += bytes * int64(-surplus)
-               case len(blk.Replicas) < blk.Desired:
+               case surplus < 0:
                        s.underrep.replicas -= surplus
                        s.underrep.blocks++
                        s.underrep.bytes += bytes * int64(-surplus)
-               case len(blk.Replicas) > 0 && blk.Desired == 0:
+               case surplus > 0 && result.want == 0:
                        counter := &s.garbage
-                       for _, r := range blk.Replicas {
+                       for _, r := range result.blk.Replicas {
                                if r.Mtime >= bal.MinMtime {
                                        counter = &s.unref
                                        break
@@ -569,67 +862,74 @@ func (bal *Balancer) getStatistics() (s balancerStats) {
                        counter.replicas += surplus
                        counter.blocks++
                        counter.bytes += bytes * int64(surplus)
-               case len(blk.Replicas) > blk.Desired:
+               case surplus > 0:
                        s.overrep.replicas += surplus
                        s.overrep.blocks++
-                       s.overrep.bytes += bytes * int64(len(blk.Replicas)-blk.Desired)
+                       s.overrep.bytes += bytes * int64(result.have-result.want)
                default:
-                       s.justright.replicas += blk.Desired
+                       s.justright.replicas += result.want
                        s.justright.blocks++
-                       s.justright.bytes += bytes * int64(blk.Desired)
+                       s.justright.bytes += bytes * int64(result.want)
                }
 
-               if blk.Desired > 0 {
-                       s.desired.replicas += blk.Desired
+               if result.want > 0 {
+                       s.desired.replicas += result.want
                        s.desired.blocks++
-                       s.desired.bytes += bytes * int64(blk.Desired)
+                       s.desired.bytes += bytes * int64(result.want)
                }
-               if len(blk.Replicas) > 0 {
-                       s.current.replicas += len(blk.Replicas)
+               if result.have > 0 {
+                       s.current.replicas += result.have
                        s.current.blocks++
-                       s.current.bytes += bytes * int64(len(blk.Replicas))
+                       s.current.bytes += bytes * int64(result.have)
                }
 
-               for len(s.replHistogram) <= len(blk.Replicas) {
+               for len(s.replHistogram) <= result.have {
                        s.replHistogram = append(s.replHistogram, 0)
                }
-               s.replHistogram[len(blk.Replicas)]++
-       })
+               s.replHistogram[result.have]++
+       }
        for _, srv := range bal.KeepServices {
                s.pulls += len(srv.ChangeSet.Pulls)
                s.trashes += len(srv.ChangeSet.Trashes)
        }
-       return
+       bal.stats = s
 }
 
 // PrintStatistics writes statistics about the computed changes to
 // bal.Logger. It should not be called until ComputeChangeSets has
 // finished.
 func (bal *Balancer) PrintStatistics() {
-       s := bal.getStatistics()
        bal.logf("===")
-       bal.logf("%s lost (0=have<want)", s.lost)
-       bal.logf("%s underreplicated (0<have<want)", s.underrep)
-       bal.logf("%s just right (have=want)", s.justright)
-       bal.logf("%s overreplicated (have>want>0)", s.overrep)
-       bal.logf("%s unreferenced (have>want=0, new)", s.unref)
-       bal.logf("%s garbage (have>want=0, old)", s.garbage)
+       bal.logf("%s lost (0=have<want)", bal.stats.lost)
+       bal.logf("%s underreplicated (0<have<want)", bal.stats.underrep)
+       bal.logf("%s just right (have=want)", bal.stats.justright)
+       bal.logf("%s overreplicated (have>want>0)", bal.stats.overrep)
+       bal.logf("%s unreferenced (have>want=0, new)", bal.stats.unref)
+       bal.logf("%s garbage (have>want=0, old)", bal.stats.garbage)
+       for _, class := range bal.classes {
+               cs := bal.stats.classStats[class]
+               bal.logf("===")
+               bal.logf("storage class %q: %s desired", class, cs.desired)
+               bal.logf("storage class %q: %s short", class, cs.short)
+               bal.logf("storage class %q: %s surplus", class, cs.surplus)
+               bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
+       }
        bal.logf("===")
-       bal.logf("%s total commitment (excluding unreferenced)", s.desired)
-       bal.logf("%s total usage", s.current)
+       bal.logf("%s total commitment (excluding unreferenced)", bal.stats.desired)
+       bal.logf("%s total usage", bal.stats.current)
        bal.logf("===")
        for _, srv := range bal.KeepServices {
                bal.logf("%s: %v\n", srv, srv.ChangeSet)
        }
        bal.logf("===")
-       bal.printHistogram(s, 60)
+       bal.printHistogram(60)
        bal.logf("===")
 }
 
-func (bal *Balancer) printHistogram(s balancerStats, hashColumns int) {
+func (bal *Balancer) printHistogram(hashColumns int) {
        bal.logf("Replication level distribution (counting N replicas on a single server as N):")
        maxCount := 0
-       for _, count := range s.replHistogram {
+       for _, count := range bal.stats.replHistogram {
                if maxCount < count {
                        maxCount = count
                }
@@ -637,7 +937,7 @@ func (bal *Balancer) printHistogram(s balancerStats, hashColumns int) {
        hashes := strings.Repeat("#", hashColumns)
        countWidth := 1 + int(math.Log10(float64(maxCount+1)))
        scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
-       for repl, count := range s.replHistogram {
+       for repl, count := range bal.stats.replHistogram {
                nHashes := int(scaleCount * math.Log10(float64(count+1)))
                bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
        }
@@ -661,8 +961,11 @@ func (bal *Balancer) CheckSanityLate() error {
 
        anyDesired := false
        bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
-               if blk.Desired > 0 {
-                       anyDesired = true
+               for _, desired := range blk.Desired {
+                       if desired > 0 {
+                               anyDesired = true
+                               break
+                       }
                }
        })
        if !anyDesired {
@@ -729,3 +1032,11 @@ func (bal *Balancer) logf(f string, args ...interface{}) {
                bal.Logger.Printf(f, args...)
        }
 }
+
+// Rendezvous hash sort function. Less efficient than sorting on
+// precomputed rendezvous hashes, but also rarely used.
+func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
+       a := md5.Sum([]byte(string(blkid[:32]) + i))
+       b := md5.Sum([]byte(string(blkid[:32]) + j))
+       return bytes.Compare(a[:], b[:]) < 0
+}
index 08cfcce5849e4bb98440f40a8e241fefce74b352..28776abc47c600ce8540949d8b6fdd7ed63708ff 100644 (file)
@@ -413,10 +413,9 @@ func (s *runSuite) TestDryRun(c *check.C) {
        }
        c.Check(trashReqs.Count(), check.Equals, 0)
        c.Check(pullReqs.Count(), check.Equals, 0)
-       stats := bal.getStatistics()
-       c.Check(stats.pulls, check.Not(check.Equals), 0)
-       c.Check(stats.underrep.replicas, check.Not(check.Equals), 0)
-       c.Check(stats.overrep.replicas, check.Not(check.Equals), 0)
+       c.Check(bal.stats.pulls, check.Not(check.Equals), 0)
+       c.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0)
+       c.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0)
 }
 
 func (s *runSuite) TestCommit(c *check.C) {
@@ -438,12 +437,11 @@ func (s *runSuite) TestCommit(c *check.C) {
        c.Check(err, check.IsNil)
        c.Check(trashReqs.Count(), check.Equals, 8)
        c.Check(pullReqs.Count(), check.Equals, 4)
-       stats := bal.getStatistics()
        // "foo" block is overreplicated by 2
-       c.Check(stats.trashes, check.Equals, 2)
+       c.Check(bal.stats.trashes, check.Equals, 2)
        // "bar" block is underreplicated by 1, and its only copy is
        // in a poor rendezvous position
-       c.Check(stats.pulls, check.Equals, 2)
+       c.Check(bal.stats.pulls, check.Equals, 2)
 }
 
 func (s *runSuite) TestRunForever(c *check.C) {
index 167e8741dba3ed25d1f7ae8c51a89bebf277f3d9..2e664bedfb19fe8054d39083e6ee4f5cf6e477c6 100644 (file)
@@ -41,11 +41,16 @@ type slots []int
 
 type tester struct {
        known       int
-       desired     int
+       desired     map[string]int
        current     slots
        timestamps  []int64
        shouldPull  slots
        shouldTrash slots
+
+       shouldPullMounts  []string
+       shouldTrashMounts []string
+
+       expectResult balanceResult
 }
 
 func (bal *balancerSuite) SetUpSuite(c *check.C) {
@@ -76,17 +81,23 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                                UUID: fmt.Sprintf("zzzzz-bi6l4-%015x", i),
                        },
                }
-               srv.mounts = []*KeepMount{{KeepMount: arvados.KeepMount{UUID: fmt.Sprintf("mount-%015x", i)}, KeepService: srv}}
+               srv.mounts = []*KeepMount{{
+                       KeepMount: arvados.KeepMount{
+                               UUID: fmt.Sprintf("zzzzz-mount-%015x", i),
+                       },
+                       KeepService: srv,
+               }}
                bal.srvs[i] = srv
                bal.KeepServices[srv.UUID] = srv
        }
 
        bal.MinMtime = time.Now().UnixNano() - bal.signatureTTL*1e9
+       bal.cleanupMounts()
 }
 
 func (bal *balancerSuite) TestPerfect(c *check.C) {
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{0, 1},
                shouldPull:  nil,
                shouldTrash: nil})
@@ -94,21 +105,21 @@ func (bal *balancerSuite) TestPerfect(c *check.C) {
 
 func (bal *balancerSuite) TestDecreaseRepl(c *check.C) {
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{0, 2, 1},
                shouldTrash: slots{2}})
 }
 
 func (bal *balancerSuite) TestDecreaseReplToZero(c *check.C) {
        bal.try(c, tester{
-               desired:     0,
+               desired:     map[string]int{"default": 0},
                current:     slots{0, 1, 3},
                shouldTrash: slots{0, 1, 3}})
 }
 
 func (bal *balancerSuite) TestIncreaseRepl(c *check.C) {
        bal.try(c, tester{
-               desired:    4,
+               desired:    map[string]int{"default": 4},
                current:    slots{0, 1},
                shouldPull: slots{2, 3}})
 }
@@ -116,77 +127,83 @@ func (bal *balancerSuite) TestIncreaseRepl(c *check.C) {
 func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
        bal.srvList(0, slots{3})[0].ReadOnly = true
        bal.try(c, tester{
-               desired:    4,
+               desired:    map[string]int{"default": 4},
                current:    slots{0, 1},
                shouldPull: slots{2, 4}})
 }
 
 func (bal *balancerSuite) TestFixUnbalanced(c *check.C) {
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{2, 0},
                shouldPull: slots{1}})
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{2, 7},
                shouldPull: slots{0, 1}})
        // if only one of the pulls succeeds, we'll see this next:
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{2, 1, 7},
                shouldPull:  slots{0},
                shouldTrash: slots{7}})
        // if both pulls succeed, we'll see this next:
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{2, 0, 1, 7},
                shouldTrash: slots{2, 7}})
 
        // unbalanced + excessive replication => pull + trash
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{2, 5, 7},
                shouldPull:  slots{0, 1},
                shouldTrash: slots{7}})
 }
 
 func (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {
+       for _, srv := range bal.srvs {
+               for i := 0; i < 3; i++ {
+                       m := *(srv.mounts[0])
+                       srv.mounts = append(srv.mounts, &m)
+               }
+       }
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{0, 0},
                shouldPull: slots{1}})
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{2, 2},
                shouldPull: slots{0, 1}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{0, 0, 1},
                shouldTrash: slots{0}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{1, 1, 0},
                shouldTrash: slots{1}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{1, 0, 1, 0, 2},
                shouldTrash: slots{0, 1, 2}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{1, 1, 1, 0, 2},
                shouldTrash: slots{1, 1, 2}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{1, 1, 2},
                shouldPull:  slots{0},
                shouldTrash: slots{1}})
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{1, 1, 0},
                timestamps:  []int64{12345678, 12345678, 12345679},
                shouldTrash: nil})
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{1, 1},
                shouldPull: slots{0}})
 }
@@ -195,7 +212,7 @@ func (bal *balancerSuite) TestIncreaseReplTimestampCollision(c *check.C) {
        // For purposes of increasing replication, we assume identical
        // replicas are distinct.
        bal.try(c, tester{
-               desired:    4,
+               desired:    map[string]int{"default": 4},
                current:    slots{0, 1},
                timestamps: []int64{12345678, 12345678},
                shouldPull: slots{2, 3}})
@@ -205,11 +222,11 @@ func (bal *balancerSuite) TestDecreaseReplTimestampCollision(c *check.C) {
        // For purposes of decreasing replication, we assume identical
        // replicas are NOT distinct.
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{0, 1, 2},
                timestamps: []int64{12345678, 12345678, 12345678}})
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{0, 1, 2},
                timestamps: []int64{12345678, 10000000, 10000000}})
 }
@@ -219,35 +236,342 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
        newTime := bal.MinMtime + 3600
        // The excess replica is too new to delete.
        bal.try(c, tester{
-               desired:    2,
+               desired:    map[string]int{"default": 2},
                current:    slots{0, 1, 2},
                timestamps: []int64{oldTime, newTime, newTime + 1}})
        // The best replicas are too new to delete, but the excess
        // replica is old enough.
        bal.try(c, tester{
-               desired:     2,
+               desired:     map[string]int{"default": 2},
                current:     slots{0, 1, 2},
                timestamps:  []int64{newTime, newTime + 1, oldTime},
                shouldTrash: slots{2}})
 }
 
+func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
+       bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+       bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+       c.Check(len(bal.srvs[3].mounts), check.Equals, 1)
+       bal.cleanupMounts()
+       c.Check(len(bal.srvs[3].mounts), check.Equals, 0)
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{2}})
+}
+
+func (bal *balancerSuite) TestVolumeReplication(c *check.C) {
+       bal.srvs[0].mounts[0].KeepMount.Replication = 2  // srv 0
+       bal.srvs[14].mounts[0].KeepMount.Replication = 2 // srv e
+       bal.cleanupMounts()
+       // block 0 rendezvous is 3,e,a -- so slot 1 has repl=2
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{0}})
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1},
+               shouldPull: nil})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 2},
+               shouldTrash: slots{2}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 3},
+               current:     slots{0, 2, 3, 4},
+               shouldPull:  slots{1},
+               shouldTrash: slots{4},
+               expectResult: balanceResult{
+                       have: 4,
+                       want: 3,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      3,
+                               surplus:      1,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 3},
+               current:     slots{0, 1, 2, 3, 4},
+               shouldTrash: slots{2, 3, 4}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 4},
+               current:     slots{0, 1, 2, 3, 4},
+               shouldTrash: slots{3, 4},
+               expectResult: balanceResult{
+                       have: 6,
+                       want: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      4,
+                               surplus:      2,
+                               unachievable: false}}}})
+       // block 1 rendezvous is 0,9,7 -- so slot 0 has repl=2
+       bal.try(c, tester{
+               known:   1,
+               desired: map[string]int{"default": 2},
+               current: slots{0},
+               expectResult: balanceResult{
+                       have: 2,
+                       want: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      0,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 3},
+               current:    slots{0},
+               shouldPull: slots{1}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{0},
+               shouldPull: slots{1, 2}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{2},
+               shouldPull: slots{0, 1}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{7},
+               shouldPull: slots{0, 1, 2},
+               expectResult: balanceResult{
+                       have: 1,
+                       want: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      4,
+                               surplus:      -3,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:       1,
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 2, 3, 4},
+               shouldPull:  slots{0},
+               shouldTrash: slots{3, 4}})
+       bal.try(c, tester{
+               known:       1,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 2},
+               shouldTrash: slots{1, 2},
+               expectResult: balanceResult{
+                       have: 4,
+                       want: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      2,
+                               unachievable: false}}}})
+}
+
+func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
+       bal.srvs[0].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[9].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+       // block 0 belongs on servers 3 and e, which have different
+       // device IDs.
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{0}})
+       // block 1 belongs on servers 0 and 9, which both report
+       // having a replica, but the replicas are on the same device
+       // ID -- so we should pull to the third position (7).
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1},
+               shouldPull: slots{2}})
+       // block 1 can be pulled to the doubly-mounted device, but the
+       // pull should only be done on the first of the two servers.
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2},
+               current:    slots{2},
+               shouldPull: slots{0}})
+       // block 0 has one replica on a single device mounted on two
+       // servers (e,9 at positions 1,9). Trashing the replica on 9
+       // would lose the block.
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1, 9},
+               shouldPull: slots{0},
+               expectResult: balanceResult{
+                       have: 1,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      -1,
+                               unachievable: false}}}})
+       // block 0 is overreplicated, but the second and third
+       // replicas are the same replica according to DeviceID
+       // (despite different Mtimes). Don't trash the third replica.
+       bal.try(c, tester{
+               known:   0,
+               desired: map[string]int{"default": 2},
+               current: slots{0, 1, 9},
+               expectResult: balanceResult{
+                       have: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      0,
+                               unachievable: false}}}})
+       // block 0 is overreplicated; the third and fifth replicas are
+       // extra, but the fourth is another view of the second and
+       // shouldn't be trashed.
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 5, 9, 12},
+               shouldTrash: slots{5, 12},
+               expectResult: balanceResult{
+                       have: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      2,
+                               unachievable: false}}}})
+}
+
+func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
+       // For known blocks 0/1/2/3, server 9 is slot 9/1/14/0 in
+       // probe order. For these tests we give it two mounts, one
+       // with classes=[special], one with
+       // classes=[special,special2].
+       bal.srvs[9].mounts = []*KeepMount{{
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special"},
+                       UUID:           "zzzzz-mount-special00000009",
+                       DeviceID:       "9-special",
+               },
+               KeepService: bal.srvs[9],
+       }, {
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special", "special2"},
+                       UUID:           "zzzzz-mount-special20000009",
+                       DeviceID:       "9-special-and-special2",
+               },
+               KeepService: bal.srvs[9],
+       }}
+       // For known blocks 0/1/2/3, server 13 (d) is slot 5/3/11/1 in
+       // probe order. We give it two mounts, one with
+       // classes=[special3], one with classes=[default].
+       bal.srvs[13].mounts = []*KeepMount{{
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special2"},
+                       UUID:           "zzzzz-mount-special2000000d",
+                       DeviceID:       "13-special2",
+               },
+               KeepService: bal.srvs[13],
+       }, {
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"default"},
+                       UUID:           "zzzzz-mount-00000000000000d",
+                       DeviceID:       "13-default",
+               },
+               KeepService: bal.srvs[13],
+       }}
+       // Pull to slot 9 because that's the only server with the
+       // desired class "special".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"default": 2, "special": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{9},
+               shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+       // If some storage classes are not satisfied, don't trash any
+       // excess replicas. (E.g., if someone desires repl=1 on
+       // class=durable, and we have two copies on class=volatile, we
+       // should wait for pull to succeed before trashing anything).
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{9},
+               shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+       // Once storage classes are satisfied, trash excess replicas
+       // that appear earlier in probe order but aren't needed to
+       // satisfy the desired classes.
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"special": 1},
+               current:     slots{0, 1, 9},
+               shouldTrash: slots{0, 1}})
+       // Pull to slot 5, the best server with class "special2".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special2": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{5},
+               shouldPullMounts: []string{"zzzzz-mount-special2000000d"}})
+       // Pull to slot 5 and 9 to get replication 2 in desired class
+       // "special2".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special2": 2},
+               current:          slots{0, 1},
+               shouldPull:       slots{5, 9},
+               shouldPullMounts: []string{"zzzzz-mount-special20000009", "zzzzz-mount-special2000000d"}})
+       // Slot 0 has a replica in "default", slot 1 has a replica
+       // in "special"; we need another replica in "default", i.e.,
+       // on slot 2.
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2, "special": 1},
+               current:    slots{0, 1},
+               shouldPull: slots{2}})
+       // Pull to best probe position 0 (despite wrong storage class)
+       // if it's impossible to achieve desired replication in the
+       // desired class (only slots 1 and 3 have special2).
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"special2": 3},
+               current:    slots{3},
+               shouldPull: slots{0, 1}})
+       // Trash excess replica.
+       bal.try(c, tester{
+               known:       3,
+               desired:     map[string]int{"special": 1},
+               current:     slots{0, 1},
+               shouldTrash: slots{1}})
+       // Leave one copy on slot 1 because slot 0 (server 9) only
+       // gives us repl=1.
+       bal.try(c, tester{
+               known:   3,
+               desired: map[string]int{"special": 2},
+               current: slots{0, 1}})
+}
+
 // Clear all servers' changesets, balance a single block, and verify
 // the appropriate changes for that block have been added to the
 // changesets.
 func (bal *balancerSuite) try(c *check.C, t tester) {
-       bal.setupServiceRoots()
+       bal.setupLookupTables()
        blk := &BlockState{
+               Replicas: bal.replList(t.known, t.current),
                Desired:  t.desired,
-               Replicas: bal.replList(t.known, t.current)}
+       }
        for i, t := range t.timestamps {
                blk.Replicas[i].Mtime = t
        }
        for _, srv := range bal.srvs {
                srv.ChangeSet = &ChangeSet{}
        }
-       bal.balanceBlock(knownBlkid(t.known), blk)
+       result := bal.balanceBlock(knownBlkid(t.known), blk)
 
        var didPull, didTrash slots
+       var didPullMounts, didTrashMounts []string
        for i, srv := range bal.srvs {
                var slot int
                for probeOrder, srvNum := range bal.knownRendezvous[t.known] {
@@ -257,10 +581,12 @@ func (bal *balancerSuite) try(c *check.C, t tester) {
                }
                for _, pull := range srv.Pulls {
                        didPull = append(didPull, slot)
+                       didPullMounts = append(didPullMounts, pull.To.UUID)
                        c.Check(pull.SizedDigest, check.Equals, knownBlkid(t.known))
                }
                for _, trash := range srv.Trashes {
                        didTrash = append(didTrash, slot)
+                       didTrashMounts = append(didTrashMounts, trash.From.UUID)
                        c.Check(trash.SizedDigest, check.Equals, knownBlkid(t.known))
                }
        }
@@ -270,6 +596,23 @@ func (bal *balancerSuite) try(c *check.C, t tester) {
        }
        c.Check(didPull, check.DeepEquals, t.shouldPull)
        c.Check(didTrash, check.DeepEquals, t.shouldTrash)
+       if t.shouldPullMounts != nil {
+               sort.Strings(didPullMounts)
+               c.Check(didPullMounts, check.DeepEquals, t.shouldPullMounts)
+       }
+       if t.shouldTrashMounts != nil {
+               sort.Strings(didTrashMounts)
+               c.Check(didTrashMounts, check.DeepEquals, t.shouldTrashMounts)
+       }
+       if t.expectResult.have > 0 {
+               c.Check(result.have, check.Equals, t.expectResult.have)
+       }
+       if t.expectResult.want > 0 {
+               c.Check(result.want, check.Equals, t.expectResult.want)
+       }
+       if t.expectResult.classState != nil {
+               c.Check(result.classState, check.DeepEquals, t.expectResult.classState)
+       }
 }
 
 // srvList returns the KeepServices, sorted in rendezvous order and
@@ -286,9 +629,14 @@ func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepSe
 // replList is like srvList but returns an "existing replicas" slice,
 // suitable for a BlockState test fixture.
 func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
+       nextMnt := map[*KeepService]int{}
        mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
        for _, srv := range bal.srvList(knownBlockID, order) {
-               repls = append(repls, Replica{srv.mounts[0], mtime})
+               // round-robin repls onto each srv's mounts
+               n := nextMnt[srv]
+               nextMnt[srv] = (n + 1) % len(srv.mounts)
+
+               repls = append(repls, Replica{srv.mounts[n], mtime})
                mtime++
        }
        return
index 958cdb596b61155c7138aeba05782b4eeffec7a5..22e89c019ab9fa5a5fb833bf84bbc63df7a4e93b 100644 (file)
@@ -18,21 +18,39 @@ type Replica struct {
        Mtime int64
 }
 
-// BlockState indicates the number of desired replicas (according to
-// the collections we know about) and the replicas actually stored
-// (according to the keepstore indexes we know about).
+// BlockState indicates the desired storage class and number of
+// replicas (according to the collections we know about) and the
+// replicas actually stored (according to the keepstore indexes we
+// know about).
 type BlockState struct {
        Replicas []Replica
-       Desired  int
+       Desired  map[string]int
+       // TODO: Support combinations of classes ("private + durable")
+       // by replacing the map[string]int with a map[*[]string]int
+       // here, where the map keys come from a pool of semantically
+       // distinct class combinations.
+       //
+       // TODO: Use a pool of semantically distinct Desired maps to
+       // conserve memory (typically there are far more BlockState
+       // objects in memory than distinct Desired profiles).
 }
 
+var defaultClasses = []string{"default"}
+
 func (bs *BlockState) addReplica(r Replica) {
        bs.Replicas = append(bs.Replicas, r)
 }
 
-func (bs *BlockState) increaseDesired(n int) {
-       if bs.Desired < n {
-               bs.Desired = n
+func (bs *BlockState) increaseDesired(classes []string, n int) {
+       if len(classes) == 0 {
+               classes = defaultClasses
+       }
+       for _, class := range classes {
+               if bs.Desired == nil {
+                       bs.Desired = map[string]int{class: n}
+               } else if d, ok := bs.Desired[class]; !ok || d < n {
+                       bs.Desired[class] = n
+               }
        }
 }
 
@@ -88,12 +106,12 @@ func (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceI
 }
 
 // IncreaseDesired updates the map to indicate the desired replication
-// for the given blocks is at least n.
-func (bsm *BlockStateMap) IncreaseDesired(n int, blocks []arvados.SizedDigest) {
+// for the given blocks in the given storage class is at least n.
+func (bsm *BlockStateMap) IncreaseDesired(classes []string, n int, blocks []arvados.SizedDigest) {
        bsm.mutex.Lock()
        defer bsm.mutex.Unlock()
 
        for _, blkid := range blocks {
-               bsm.get(blkid).increaseDesired(n)
+               bsm.get(blkid).increaseDesired(classes, n)
        }
 }
index f88cf8ea9fdb6fd68be5cb5c5cbc1186434147bf..5437f761937747d199eba3ebd9a0696d2c2c0583 100644 (file)
@@ -16,25 +16,30 @@ import (
 // store it locally.
 type Pull struct {
        arvados.SizedDigest
-       Source *KeepService
+       From *KeepService
+       To   *KeepMount
 }
 
 // MarshalJSON formats a pull request the way keepstore wants to see
 // it.
 func (p Pull) MarshalJSON() ([]byte, error) {
        type KeepstorePullRequest struct {
-               Locator string   `json:"locator"`
-               Servers []string `json:"servers"`
+               Locator   string   `json:"locator"`
+               Servers   []string `json:"servers"`
+               MountUUID string   `json:"mount_uuid"`
        }
        return json.Marshal(KeepstorePullRequest{
-               Locator: string(p.SizedDigest[:32]),
-               Servers: []string{p.Source.URLBase()}})
+               Locator:   string(p.SizedDigest[:32]),
+               Servers:   []string{p.From.URLBase()},
+               MountUUID: p.To.KeepMount.UUID,
+       })
 }
 
 // Trash is a request to delete a block.
 type Trash struct {
        arvados.SizedDigest
        Mtime int64
+       From  *KeepMount
 }
 
 // MarshalJSON formats a trash request the way keepstore wants to see
@@ -43,10 +48,13 @@ func (t Trash) MarshalJSON() ([]byte, error) {
        type KeepstoreTrashRequest struct {
                Locator    string `json:"locator"`
                BlockMtime int64  `json:"block_mtime"`
+               MountUUID  string `json:"mount_uuid"`
        }
        return json.Marshal(KeepstoreTrashRequest{
                Locator:    string(t.SizedDigest[:32]),
-               BlockMtime: t.Mtime})
+               BlockMtime: t.Mtime,
+               MountUUID:  t.From.KeepMount.UUID,
+       })
 }
 
 // ChangeSet is a set of change requests that will be sent to a
index 5eb850d6a99aa3f736a2968a8bbb9db72aaaa20c..6421a4d5dade60aab269690bc0f3ed2833685cde 100644 (file)
@@ -17,6 +17,9 @@ var _ = check.Suite(&changeSetSuite{})
 type changeSetSuite struct{}
 
 func (s *changeSetSuite) TestJSONFormat(c *check.C) {
+       mnt := &KeepMount{
+               KeepMount: arvados.KeepMount{
+                       UUID: "zzzzz-mount-abcdefghijklmno"}}
        srv := &KeepService{
                KeepService: arvados.KeepService{
                        UUID:           "zzzzz-bi6l4-000000000000001",
@@ -27,13 +30,15 @@ func (s *changeSetSuite) TestJSONFormat(c *check.C) {
 
        buf, err := json.Marshal([]Pull{{
                SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
-               Source:      srv}})
+               To:          mnt,
+               From:        srv}})
        c.Check(err, check.IsNil)
-       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","servers":["http://keep1.zzzzz.arvadosapi.com:25107"]}]`)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
 
        buf, err = json.Marshal([]Trash{{
                SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
+               From:        mnt,
                Mtime:       123456789}})
        c.Check(err, check.IsNil)
-       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","block_mtime":123456789}]`)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
 }
index 947033564df01e479d05682617fc041417e5d54f..90235cbf3188d91bc274412ddd5522dc639fa812 100644 (file)
@@ -9,6 +9,7 @@ import (
        "flag"
        "fmt"
        "log"
+       "net/http"
        "os"
        "os/signal"
        "syscall"
@@ -45,6 +46,9 @@ type Config struct {
        // more memory, but can reduce store-and-forward latency when
        // fetching pages)
        CollectionBuffers int
+
+       // Timeout for outgoing http request/response cycle.
+       RequestTimeout arvados.Duration
 }
 
 // RunOptions controls runtime behavior. The flags/options that belong
@@ -107,6 +111,14 @@ func main() {
                log.Fatal(config.DumpAndExit(cfg))
        }
 
+       to := time.Duration(cfg.RequestTimeout)
+       if to == 0 {
+               to = 30 * time.Minute
+       }
+       arvados.DefaultSecureClient.Timeout = to
+       arvados.InsecureHTTPClient.Timeout = to
+       http.DefaultClient.Timeout = to
+
        log.Printf("keep-balance %s started", version)
 
        if *debugFlag {
index 0f4effe6f4e9b7c4e2590cfeb48ef5cd729ec5cd..4c7d5067182fe89783e104c56063fdaf86545c1b 100644 (file)
@@ -19,7 +19,8 @@ KeepServiceTypes:
     - disk
 RunPeriod: 600s
 CollectionBatchSize: 100000
-CollectionBuffers: 1000`)
+CollectionBuffers: 1000
+RequestTimeout: 30m`)
 
 func usage() {
        fmt.Fprintf(os.Stderr, `
@@ -86,6 +87,11 @@ Tuning resource usage:
     while the current page is still being processed. If this is zero
     or omitted, pages are processed serially.
 
+    RequestTimeout is the maximum time keep-balance will spend on a
+    single HTTP request (getting a page of collections, getting the
+    block index from a keepstore server, or sending a trash or pull
+    list to a keepstore server). Defaults to 30 minutes.
+
 Limitations:
 
     keep-balance does not attempt to discover whether committed pull
index 9ee99903c8d1e537d487a67d1c77d848fc93c807..59e8de3bc9f884dec899e22072c3afe684aceb1a 100644 (file)
@@ -99,7 +99,7 @@ func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvad
        }
        var updated arvados.Collection
        defer c.pdhs.Remove(coll.UUID)
-       err := client.RequestAndDecode(&updated, "PATCH", "/arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
+       err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
        if err == nil {
                c.collections.Add(client.AuthToken+"\000"+coll.PortableDataHash, &cachedCollection{
                        expire:     time.Now().Add(time.Duration(c.TTL)),
index eb323674b9013daa80b7ee7bc1d472dcdd21cf01..0e2f17c35b85df02b98df4d3e29a974d18deb17d 100644 (file)
@@ -6,11 +6,13 @@ package main
 
 import (
        "bytes"
+       "fmt"
        "io"
        "io/ioutil"
        "net/url"
        "os"
        "os/exec"
+       "path/filepath"
        "strings"
        "time"
 
@@ -19,34 +21,66 @@ import (
        check "gopkg.in/check.v1"
 )
 
-func (s *IntegrationSuite) TestWebdavWithCadaver(c *check.C) {
+func (s *IntegrationSuite) TestCadaverHTTPAuth(c *check.C) {
+       s.testCadaver(c, arvadostest.ActiveToken, func(newCollection arvados.Collection) (string, string, string) {
+               r := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/"
+               w := "/c=" + newCollection.UUID + "/"
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/"
+               return r, w, pdh
+       }, nil)
+}
+
+func (s *IntegrationSuite) TestCadaverPathAuth(c *check.C) {
+       s.testCadaver(c, "", func(newCollection arvados.Collection) (string, string, string) {
+               r := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken + "/"
+               w := "/c=" + newCollection.UUID + "/t=" + arvadostest.ActiveToken + "/"
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/t=" + arvadostest.ActiveToken + "/"
+               return r, w, pdh
+       }, nil)
+}
+
+func (s *IntegrationSuite) TestCadaverUserProject(c *check.C) {
+       rpath := "/users/active/foo_file_in_dir/"
+       s.testCadaver(c, arvadostest.ActiveToken, func(newCollection arvados.Collection) (string, string, string) {
+               wpath := "/users/active/" + newCollection.Name
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/"
+               return rpath, wpath, pdh
+       }, func(path string) bool {
+               // Skip tests that rely on writes, because /users/
+               // tree is read-only.
+               return !strings.HasPrefix(path, rpath) || strings.HasPrefix(path, rpath+"_/")
+       })
+}
+
+func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc func(arvados.Collection) (string, string, string), skip func(string) bool) {
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
+
        testdata := []byte("the human tragedy consists in the necessity of living with the consequences of actions performed under the pressure of compulsions we do not understand")
 
-       localfile, err := ioutil.TempFile("", "localfile")
+       tempdir, err := ioutil.TempDir("", "keep-web-test-")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(tempdir)
+
+       localfile, err := ioutil.TempFile(tempdir, "localfile")
        c.Assert(err, check.IsNil)
-       defer os.Remove(localfile.Name())
        localfile.Write(testdata)
 
-       emptyfile, err := ioutil.TempFile("", "emptyfile")
+       emptyfile, err := ioutil.TempFile(tempdir, "emptyfile")
        c.Assert(err, check.IsNil)
-       defer os.Remove(emptyfile.Name())
 
-       checkfile, err := ioutil.TempFile("", "checkfile")
+       checkfile, err := ioutil.TempFile(tempdir, "checkfile")
        c.Assert(err, check.IsNil)
-       defer os.Remove(checkfile.Name())
 
        var newCollection arvados.Collection
        arv := arvados.NewClientFromEnv()
        arv.AuthToken = arvadostest.ActiveToken
-       err = arv.RequestAndDecode(&newCollection, "POST", "/arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
+       err = arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
        c.Assert(err, check.IsNil)
-       writePath := "/c=" + newCollection.UUID + "/t=" + arv.AuthToken + "/"
 
-       pdhPath := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/t=" + arv.AuthToken + "/"
+       readPath, writePath, pdhPath := pathFunc(newCollection)
 
        matchToday := time.Now().Format("Jan +2")
 
-       readPath := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken + "/"
        type testcase struct {
                path  string
                cmd   string
@@ -211,22 +245,15 @@ func (s *IntegrationSuite) TestWebdavWithCadaver(c *check.C) {
                },
        } {
                c.Logf("%s %+v", "http://"+s.testServer.Addr, trial)
+               if skip != nil && skip(trial.path) {
+                       c.Log("(skip)")
+                       continue
+               }
 
                os.Remove(checkfile.Name())
 
-               cmd := exec.Command("cadaver", "http://"+s.testServer.Addr+trial.path)
-               cmd.Stdin = bytes.NewBufferString(trial.cmd)
-               stdout, err := cmd.StdoutPipe()
-               c.Assert(err, check.Equals, nil)
-               cmd.Stderr = cmd.Stdout
-               go cmd.Start()
-
-               var buf bytes.Buffer
-               _, err = io.Copy(&buf, stdout)
-               c.Check(err, check.Equals, nil)
-               err = cmd.Wait()
-               c.Check(err, check.Equals, nil)
-               c.Check(buf.String(), check.Matches, trial.match)
+               stdout := s.runCadaver(c, password, trial.path, trial.cmd)
+               c.Check(stdout, check.Matches, trial.match)
 
                if trial.data == nil {
                        continue
@@ -239,3 +266,75 @@ func (s *IntegrationSuite) TestWebdavWithCadaver(c *check.C) {
                c.Check(err, check.IsNil)
        }
 }
+
+func (s *IntegrationSuite) TestCadaverByID(c *check.C) {
+       for _, path := range []string{"/by_id", "/by_id/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*collection is empty.*`)
+       }
+       for _, path := range []string{
+               "/by_id/" + arvadostest.FooPdh,
+               "/by_id/" + arvadostest.FooPdh + "/",
+               "/by_id/" + arvadostest.FooCollection,
+               "/by_id/" + arvadostest.FooCollection + "/",
+       } {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*\s+foo\s+3 .*`)
+       }
+}
+
+func (s *IntegrationSuite) TestCadaverUsersDir(c *check.C) {
+       for _, path := range []string{"/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+by_id\s+0 .*`)
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+users\s+0 .*`)
+       }
+       for _, path := range []string{"/users", "/users/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+active.*`)
+       }
+       for _, path := range []string{"/users/active", "/users/active/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+A Project\s+0 .*`)
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+bar_file\s+0 .*`)
+       }
+       for _, path := range []string{"/users/admin", "/users/doesnotexist", "/users/doesnotexist/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*404 Not Found.*`)
+       }
+}
+
+func (s *IntegrationSuite) runCadaver(c *check.C, password, path, stdin string) string {
+       tempdir, err := ioutil.TempDir("", "keep-web-test-")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(tempdir)
+
+       cmd := exec.Command("cadaver", "http://"+s.testServer.Addr+path)
+       if password != "" {
+               // cadaver won't try username/password authentication
+               // unless the server responds 401 to an
+               // unauthenticated request, which it only does in
+               // AttachmentOnlyHost, TrustAllContent, and
+               // per-collection vhost cases.
+               s.testServer.Config.AttachmentOnlyHost = s.testServer.Addr
+
+               cmd.Env = append(os.Environ(), "HOME="+tempdir)
+               f, err := os.OpenFile(filepath.Join(tempdir, ".netrc"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+               c.Assert(err, check.IsNil)
+               _, err = fmt.Fprintf(f, "default login none password %s\n", password)
+               c.Assert(err, check.IsNil)
+               c.Assert(f.Close(), check.IsNil)
+       }
+       cmd.Stdin = bytes.NewBufferString(stdin)
+       stdout, err := cmd.StdoutPipe()
+       c.Assert(err, check.Equals, nil)
+       cmd.Stderr = cmd.Stdout
+       go cmd.Start()
+
+       var buf bytes.Buffer
+       _, err = io.Copy(&buf, stdout)
+       c.Check(err, check.Equals, nil)
+       err = cmd.Wait()
+       c.Check(err, check.Equals, nil)
+       return buf.String()
+}
index b7da3b0e5ad2df7642319f16a97015bb3e45de63..89cd26ac49a8b76fcf0053633ca26917477c9478 100644 (file)
 //   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
 //   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
 //   http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
+//
+// The following URLs are read-only, but otherwise interchangeable
+// with the above:
+//
 //   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
 //   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
+//   http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
+//   http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
+//
+// If the collection is named "MyCollection" and located in a project
+// called "MyProject" which is in the home project of a user with
+// username is "bob", the following read-only URL is also available
+// when authenticating as bob:
+//
+//   http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
 //
 // An additional form is supported specifically to make it more
 // convenient to maintain support for existing Workbench download
 //
 //   http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
 //
+// Collections can also be accessed (read-only) via "/by_id/X" where X
+// is a UUID or portable data hash.
+//
 // Authorization mechanisms
 //
 // A token can be provided in an Authorization header:
 //
 // Indexes
 //
-// Currently, keep-web does not generate HTML index listings, nor does
-// it serve a default file like "index.html" when a directory is
-// requested. These features are likely to be added in future
-// versions. Until then, keep-web responds with 404 if a directory
-// name (or any path ending with "/") is requested.
+// Keep-web returns a generic HTML index listing when a directory is
+// requested with the GET method. It does not serve a default file
+// like "index.html". Directory listings are also returned for WebDAV
+// PROPFIND requests.
 //
 // Compatibility
 //
index 19a2040b4a5735551c0f7bf8a610c1fb109399b9..7d17be6e7cfe8c59305b452c8d788bca5748acdc 100644 (file)
@@ -10,10 +10,10 @@ import (
        "html"
        "html/template"
        "io"
-       "log"
        "net/http"
        "net/url"
        "os"
+       "path/filepath"
        "sort"
        "strconv"
        "strings"
@@ -25,6 +25,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/health"
        "git.curoverse.com/arvados.git/sdk/go/httpserver"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       log "github.com/Sirupsen/logrus"
        "golang.org/x/net/webdav"
 )
 
@@ -112,12 +113,12 @@ type updateOnSuccess struct {
 }
 
 func (uos *updateOnSuccess) Write(p []byte) (int, error) {
-       if uos.err != nil {
-               return 0, uos.err
-       }
        if !uos.sentHeader {
                uos.WriteHeader(http.StatusOK)
        }
+       if uos.err != nil {
+               return 0, uos.err
+       }
        return uos.ResponseWriter.Write(p)
 }
 
@@ -163,6 +164,12 @@ var (
                "HEAD": true,
                "POST": true,
        }
+       // top-level dirs to serve with siteFS
+       siteFSDir = map[string]bool{
+               "":      true, // root directory
+               "by_id": true,
+               "users": true,
+       }
 )
 
 // ServeHTTP implements http.Handler.
@@ -176,6 +183,9 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
                remoteAddr = xff + "," + remoteAddr
        }
+       if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
+               r.URL.Scheme = xfp
+       }
 
        w := httpserver.WrapResponseWriter(wOrig)
        defer func() {
@@ -184,13 +194,12 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                } else if w.WroteStatus() == 0 {
                        w.WriteHeader(statusCode)
                } else if w.WroteStatus() != statusCode {
-                       httpserver.Log(r.RemoteAddr, "WARNING",
+                       log.WithField("RequestID", r.Header.Get("X-Request-Id")).Warn(
                                fmt.Sprintf("Our status changed from %d to %d after we sent headers", w.WroteStatus(), statusCode))
                }
                if statusText == "" {
                        statusText = http.StatusText(statusCode)
                }
-               httpserver.Log(remoteAddr, statusCode, statusText, w.WroteBodyBytes(), r.Method, r.Host, r.URL.Path, r.URL.RawQuery)
        }()
 
        if strings.HasPrefix(r.URL.Path, "/_health/") && r.Method == "GET" {
@@ -226,21 +235,15 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                w.Header().Set("Access-Control-Expose-Headers", "Content-Range")
        }
 
-       arv := h.clientPool.Get()
-       if arv == nil {
-               statusCode, statusText = http.StatusInternalServerError, "Pool failed: "+h.clientPool.Err().Error()
-               return
-       }
-       defer h.clientPool.Put(arv)
-
        pathParts := strings.Split(r.URL.Path[1:], "/")
 
        var stripParts int
-       var targetID string
+       var collectionID string
        var tokens []string
        var reqTokens []string
        var pathToken bool
        var attachment bool
+       var useSiteFS bool
        credentialsOK := h.Config.TrustAllContent
 
        if r.Host != "" && r.Host == h.Config.AttachmentOnlyHost {
@@ -250,36 +253,43 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                attachment = true
        }
 
-       if targetID = parseCollectionIDFromDNSName(r.Host); targetID != "" {
+       if collectionID = parseCollectionIDFromDNSName(r.Host); collectionID != "" {
                // http://ID.collections.example/PATH...
                credentialsOK = true
        } else if r.URL.Path == "/status.json" {
                h.serveStatus(w, r)
                return
+       } else if siteFSDir[pathParts[0]] {
+               useSiteFS = true
        } else if len(pathParts) >= 1 && strings.HasPrefix(pathParts[0], "c=") {
                // /c=ID[/PATH...]
-               targetID = parseCollectionIDFromURL(pathParts[0][2:])
+               collectionID = parseCollectionIDFromURL(pathParts[0][2:])
                stripParts = 1
        } else if len(pathParts) >= 2 && pathParts[0] == "collections" {
                if len(pathParts) >= 4 && pathParts[1] == "download" {
                        // /collections/download/ID/TOKEN/PATH...
-                       targetID = parseCollectionIDFromURL(pathParts[2])
+                       collectionID = parseCollectionIDFromURL(pathParts[2])
                        tokens = []string{pathParts[3]}
                        stripParts = 4
                        pathToken = true
                } else {
                        // /collections/ID/PATH...
-                       targetID = parseCollectionIDFromURL(pathParts[1])
+                       collectionID = parseCollectionIDFromURL(pathParts[1])
                        tokens = h.Config.AnonymousTokens
                        stripParts = 2
                }
        }
 
-       if targetID == "" {
+       if collectionID == "" && !useSiteFS {
                statusCode = http.StatusNotFound
                return
        }
 
+       forceReload := false
+       if cc := r.Header.Get("Cache-Control"); strings.Contains(cc, "no-cache") || strings.Contains(cc, "must-revalidate") {
+               forceReload = true
+       }
+
        formToken := r.FormValue("api_token")
        if formToken != "" && r.Header.Get("Origin") != "" && attachment && r.URL.Query().Get("api_token") == "" {
                // The client provided an explicit token in the POST
@@ -306,6 +316,14 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                return
        }
 
+       if useSiteFS {
+               if tokens == nil {
+                       tokens = auth.NewCredentialsFromHTTPRequest(r).Tokens
+               }
+               h.serveSiteFS(w, r, tokens, credentialsOK, attachment)
+               return
+       }
+
        targetPath := pathParts[stripParts:]
        if tokens == nil && len(targetPath) > 0 && strings.HasPrefix(targetPath[0], "t=") {
                // http://ID.example/t=TOKEN/PATH...
@@ -338,16 +356,18 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                stripParts++
        }
 
-       forceReload := false
-       if cc := r.Header.Get("Cache-Control"); strings.Contains(cc, "no-cache") || strings.Contains(cc, "must-revalidate") {
-               forceReload = true
+       arv := h.clientPool.Get()
+       if arv == nil {
+               statusCode, statusText = http.StatusInternalServerError, "Pool failed: "+h.clientPool.Err().Error()
+               return
        }
+       defer h.clientPool.Put(arv)
 
        var collection *arvados.Collection
        tokenResult := make(map[string]int)
        for _, arv.ApiToken = range tokens {
                var err error
-               collection, err = h.Config.Cache.Get(arv, targetID, forceReload)
+               collection, err = h.Config.Cache.Get(arv, collectionID, forceReload)
                if err == nil {
                        // Success
                        break
@@ -402,6 +422,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                statusCode, statusText = http.StatusInternalServerError, err.Error()
                return
        }
+       kc.RequestID = r.Header.Get("X-Request-Id")
 
        var basename string
        if len(targetPath) > 0 {
@@ -409,19 +430,21 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        }
        applyContentDispositionHdr(w, r, basename, attachment)
 
-       client := &arvados.Client{
+       client := (&arvados.Client{
                APIHost:   arv.ApiServer,
                AuthToken: arv.ApiToken,
                Insecure:  arv.ApiInsecure,
-       }
+       }).WithRequestID(r.Header.Get("X-Request-Id"))
+
        fs, err := collection.FileSystem(client, kc)
        if err != nil {
                statusCode, statusText = http.StatusInternalServerError, err.Error()
                return
        }
 
-       targetIsPDH := arvadosclient.PDHMatch(targetID)
-       if targetIsPDH && writeMethod[r.Method] {
+       writefs, writeOK := fs.(arvados.CollectionFileSystem)
+       targetIsPDH := arvadosclient.PDHMatch(collectionID)
+       if (targetIsPDH || !writeOK) && writeMethod[r.Method] {
                statusCode, statusText = http.StatusMethodNotAllowed, errReadOnly.Error()
                return
        }
@@ -435,7 +458,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                        w = &updateOnSuccess{
                                ResponseWriter: w,
                                update: func() error {
-                                       return h.Config.Cache.Update(client, *collection, fs)
+                                       return h.Config.Cache.Update(client, *collection, writefs)
                                }}
                }
                h := webdav.Handler{
@@ -473,7 +496,7 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                // "dirname/fnm".
                h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
        } else if stat.IsDir() {
-               h.serveDirectory(w, r, collection.Name, fs, openPath, stripParts)
+               h.serveDirectory(w, r, collection.Name, fs, openPath, true)
        } else {
                http.ServeContent(w, r, basename, stat.ModTime(), f)
                if r.Header.Get("Range") == "" && int64(w.WroteBodyBytes()) != stat.Size() {
@@ -489,10 +512,78 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        }
 }
 
+func (h *handler) serveSiteFS(w http.ResponseWriter, r *http.Request, tokens []string, credentialsOK, attachment bool) {
+       if len(tokens) == 0 {
+               w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
+               http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+               return
+       }
+       if writeMethod[r.Method] {
+               http.Error(w, errReadOnly.Error(), http.StatusMethodNotAllowed)
+               return
+       }
+       arv := h.clientPool.Get()
+       if arv == nil {
+               http.Error(w, "Pool failed: "+h.clientPool.Err().Error(), http.StatusInternalServerError)
+               return
+       }
+       defer h.clientPool.Put(arv)
+       arv.ApiToken = tokens[0]
+
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       kc.RequestID = r.Header.Get("X-Request-Id")
+       client := (&arvados.Client{
+               APIHost:   arv.ApiServer,
+               AuthToken: arv.ApiToken,
+               Insecure:  arv.ApiInsecure,
+       }).WithRequestID(r.Header.Get("X-Request-Id"))
+       fs := client.SiteFileSystem(kc)
+       f, err := fs.Open(r.URL.Path)
+       if os.IsNotExist(err) {
+               http.Error(w, err.Error(), http.StatusNotFound)
+               return
+       } else if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       defer f.Close()
+       if fi, err := f.Stat(); err == nil && fi.IsDir() && r.Method == "GET" {
+               if !strings.HasSuffix(r.URL.Path, "/") {
+                       h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
+               } else {
+                       h.serveDirectory(w, r, fi.Name(), fs, r.URL.Path, false)
+               }
+               return
+       }
+       if r.Method == "GET" {
+               _, basename := filepath.Split(r.URL.Path)
+               applyContentDispositionHdr(w, r, basename, attachment)
+       }
+       wh := webdav.Handler{
+               Prefix: "/",
+               FileSystem: &webdavFS{
+                       collfs:        fs,
+                       writing:       writeMethod[r.Method],
+                       alwaysReadEOF: r.Method == "PROPFIND",
+               },
+               LockSystem: h.webdavLS,
+               Logger: func(_ *http.Request, err error) {
+                       if err != nil {
+                               log.Printf("error from webdav handler: %q", err)
+                       }
+               },
+       }
+       wh.ServeHTTP(w, r)
+}
+
 var dirListingTemplate = `<!DOCTYPE HTML>
 <HTML><HEAD>
   <META name="robots" content="NOINDEX">
-  <TITLE>{{ .Collection.Name }}</TITLE>
+  <TITLE>{{ .CollectionName }}</TITLE>
   <STYLE type="text/css">
     body {
       margin: 1.5em;
@@ -516,19 +607,26 @@ var dirListingTemplate = `<!DOCTYPE HTML>
   </STYLE>
 </HEAD>
 <BODY>
+
 <H1>{{ .CollectionName }}</H1>
 
 <P>This collection of data files is being shared with you through
 Arvados.  You can download individual files listed below.  To download
-the entire collection with wget, try:</P>
+the entire directory tree with wget, try:</P>
 
-<PRE>$ wget --mirror --no-parent --no-host --cut-dirs={{ .StripParts }} https://{{ .Request.Host }}{{ .Request.URL }}</PRE>
+<PRE>$ wget --mirror --no-parent --no-host --cut-dirs={{ .StripParts }} https://{{ .Request.Host }}{{ .Request.URL.Path }}</PRE>
 
 <H2>File Listing</H2>
 
 {{if .Files}}
 <UL>
-{{range .Files}}  <LI>{{.Size | printf "%15d  " | nbsp}}<A href="{{.Name}}">{{.Name}}</A></LI>{{end}}
+{{range .Files}}
+{{if .IsDir }}
+  <LI>{{" " | printf "%15s  " | nbsp}}<A href="{{print "./" .Name}}/">{{.Name}}/</A></LI>
+{{else}}
+  <LI>{{.Size | printf "%15d  " | nbsp}}<A href="{{print "./" .Name}}">{{.Name}}</A></LI>
+{{end}}
+{{end}}
 </UL>
 {{else}}
 <P>(No files; this collection is empty.)</P>
@@ -548,11 +646,12 @@ the entire collection with wget, try:</P>
 `
 
 type fileListEnt struct {
-       Name string
-       Size int64
+       Name  string
+       Size  int64
+       IsDir bool
 }
 
-func (h *handler) serveDirectory(w http.ResponseWriter, r *http.Request, collectionName string, fs http.FileSystem, base string, stripParts int) {
+func (h *handler) serveDirectory(w http.ResponseWriter, r *http.Request, collectionName string, fs http.FileSystem, base string, recurse bool) {
        var files []fileListEnt
        var walk func(string) error
        if !strings.HasSuffix(base, "/") {
@@ -572,15 +671,16 @@ func (h *handler) serveDirectory(w http.ResponseWriter, r *http.Request, collect
                        return err
                }
                for _, ent := range ents {
-                       if ent.IsDir() {
+                       if recurse && ent.IsDir() {
                                err = walk(path + ent.Name() + "/")
                                if err != nil {
                                        return err
                                }
                        } else {
                                files = append(files, fileListEnt{
-                                       Name: path + ent.Name(),
-                                       Size: ent.Size(),
+                                       Name:  path + ent.Name(),
+                                       Size:  ent.Size(),
+                                       IsDir: ent.IsDir(),
                                })
                        }
                }
@@ -609,7 +709,7 @@ func (h *handler) serveDirectory(w http.ResponseWriter, r *http.Request, collect
                "CollectionName": collectionName,
                "Files":          files,
                "Request":        r,
-               "StripParts":     stripParts,
+               "StripParts":     strings.Count(strings.TrimRight(r.URL.Path, "/"), "/"),
        })
 }
 
@@ -676,6 +776,7 @@ func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, loc
                u = newu
        }
        redir := (&url.URL{
+               Scheme:   r.URL.Scheme,
                Host:     r.Host,
                Path:     u.Path,
                RawQuery: redirQuery.Encode(),
index 21e47c8dc7c3e0a64f8d320e24b5cd9041fe7117..206bf6f4381fd98d4e7c4244e787c040de558aad 100644 (file)
@@ -12,10 +12,12 @@ import (
        "net/http"
        "net/http/httptest"
        "net/url"
+       "os"
        "path/filepath"
        "regexp"
        "strings"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/auth"
        check "gopkg.in/check.v1"
@@ -333,7 +335,20 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenRequestAttachment(c *check
                http.StatusOK,
                "foo",
        )
-       c.Check(strings.Split(resp.Header().Get("Content-Disposition"), ";")[0], check.Equals, "attachment")
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenSiteFS(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+       resp := s.testVhostRedirectTokenToCookie(c, "GET",
+               "download.example.com/by_id/"+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
 }
 
 func (s *IntegrationSuite) TestVhostRedirectQueryTokenTrustAllContent(c *check.C) {
@@ -417,6 +432,38 @@ func (s *IntegrationSuite) TestAnonymousTokenError(c *check.C) {
        )
 }
 
+func (s *IntegrationSuite) TestSpecialCharsInPath(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+
+       client := s.testServer.Config.Client
+       client.AuthToken = arvadostest.ActiveToken
+       fs, err := (&arvados.Collection{}).FileSystem(&client, nil)
+       c.Assert(err, check.IsNil)
+       f, err := fs.OpenFile("https:\\\"odd' path chars", os.O_CREATE, 0777)
+       c.Assert(err, check.IsNil)
+       f.Close()
+       mtxt, err := fs.MarshalManifest(".")
+       c.Assert(err, check.IsNil)
+       coll := arvados.Collection{ManifestText: mtxt}
+       err = client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", client.UpdateBody(coll), nil)
+       c.Assert(err, check.IsNil)
+
+       u, _ := url.Parse("http://download.example.com/c=" + coll.UUID + "/")
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header: http.Header{
+                       "Authorization": {"Bearer " + client.AuthToken},
+               },
+       }
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*href="./https:%5c%22odd%27%20path%20chars"\S+https:\\&#34;odd&#39; path chars.*`)
+}
+
 // XHRs can't follow redirect-with-cookie so they rely on method=POST
 // and disposition=attachment (telling us it's acceptable to respond
 // with content instead of a redirect) and an Origin header that gets
@@ -466,7 +513,7 @@ func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, ho
        if resp.Code != http.StatusSeeOther {
                return resp
        }
-       c.Check(resp.Body.String(), check.Matches, `.*href="//`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
+       c.Check(resp.Body.String(), check.Matches, `.*href="http://`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
        cookies := (&http.Response{Header: resp.Header()}).Cookies()
 
        u, _ = u.Parse(resp.Header().Get("Location"))
@@ -493,10 +540,11 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                "Authorization": {"OAuth2 " + arvadostest.ActiveToken},
        }
        for _, trial := range []struct {
-               uri     string
-               header  http.Header
-               expect  []string
-               cutDirs int
+               uri      string
+               header   http.Header
+               expect   []string
+               redirect string
+               cutDirs  int
        }{
                {
                        uri:     strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + ".example.com/",
@@ -508,7 +556,7 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        uri:     strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + ".example.com/dir1/",
                        header:  authHeader,
                        expect:  []string{"foo", "bar"},
-                       cutDirs: 0,
+                       cutDirs: 1,
                },
                {
                        uri:     "download.example.com/collections/" + arvadostest.FooAndBarFilesInDirUUID + "/",
@@ -516,6 +564,50 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        expect:  []string{"dir1/foo", "dir1/bar"},
                        cutDirs: 2,
                },
+               {
+                       uri:     "download.example.com/users/active/foo_file_in_dir/",
+                       header:  authHeader,
+                       expect:  []string{"dir1/"},
+                       cutDirs: 3,
+               },
+               {
+                       uri:     "download.example.com/users/active/foo_file_in_dir/dir1/",
+                       header:  authHeader,
+                       expect:  []string{"bar"},
+                       cutDirs: 4,
+               },
+               {
+                       uri:     "download.example.com/",
+                       header:  authHeader,
+                       expect:  []string{"users/"},
+                       cutDirs: 0,
+               },
+               {
+                       uri:      "download.example.com/users",
+                       header:   authHeader,
+                       redirect: "/users/",
+                       expect:   []string{"active/"},
+                       cutDirs:  1,
+               },
+               {
+                       uri:     "download.example.com/users/",
+                       header:  authHeader,
+                       expect:  []string{"active/"},
+                       cutDirs: 1,
+               },
+               {
+                       uri:      "download.example.com/users/active",
+                       header:   authHeader,
+                       redirect: "/users/active/",
+                       expect:   []string{"foo_file_in_dir/"},
+                       cutDirs:  2,
+               },
+               {
+                       uri:     "download.example.com/users/active/",
+                       header:  authHeader,
+                       expect:  []string{"foo_file_in_dir/"},
+                       cutDirs: 2,
+               },
                {
                        uri:     "collections.example.com/collections/download/" + arvadostest.FooAndBarFilesInDirUUID + "/" + arvadostest.ActiveToken + "/",
                        header:  nil,
@@ -541,22 +633,24 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        cutDirs: 1,
                },
                {
-                       uri:     "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1/",
-                       header:  authHeader,
-                       expect:  []string{"foo", "bar"},
-                       cutDirs: 1,
+                       uri:      "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1",
+                       header:   authHeader,
+                       redirect: "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1/",
+                       expect:   []string{"foo", "bar"},
+                       cutDirs:  2,
                },
                {
                        uri:     "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/_/dir1/",
                        header:  authHeader,
                        expect:  []string{"foo", "bar"},
-                       cutDirs: 2,
+                       cutDirs: 3,
                },
                {
-                       uri:     arvadostest.FooAndBarFilesInDirUUID + ".example.com/dir1?api_token=" + arvadostest.ActiveToken,
-                       header:  authHeader,
-                       expect:  []string{"foo", "bar"},
-                       cutDirs: 0,
+                       uri:      arvadostest.FooAndBarFilesInDirUUID + ".example.com/dir1?api_token=" + arvadostest.ActiveToken,
+                       header:   authHeader,
+                       redirect: "/dir1/",
+                       expect:   []string{"foo", "bar"},
+                       cutDirs:  1,
                },
                {
                        uri:    "collections.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/theperthcountyconspiracydoesnotexist/",
@@ -572,7 +666,7 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        Host:       u.Host,
                        URL:        u,
                        RequestURI: u.RequestURI(),
-                       Header:     trial.header,
+                       Header:     copyHeader(trial.header),
                }
                s.testServer.Handler.ServeHTTP(resp, req)
                var cookies []*http.Cookie
@@ -583,7 +677,7 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                                Host:       u.Host,
                                URL:        u,
                                RequestURI: u.RequestURI(),
-                               Header:     trial.header,
+                               Header:     copyHeader(trial.header),
                        }
                        cookies = append(cookies, (&http.Response{Header: resp.Header()}).Cookies()...)
                        for _, c := range cookies {
@@ -592,12 +686,15 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        resp = httptest.NewRecorder()
                        s.testServer.Handler.ServeHTTP(resp, req)
                }
+               if trial.redirect != "" {
+                       c.Check(req.URL.Path, check.Equals, trial.redirect)
+               }
                if trial.expect == nil {
                        c.Check(resp.Code, check.Equals, http.StatusNotFound)
                } else {
                        c.Check(resp.Code, check.Equals, http.StatusOK)
                        for _, e := range trial.expect {
-                               c.Check(resp.Body.String(), check.Matches, `(?ms).*href="`+e+`".*`)
+                               c.Check(resp.Body.String(), check.Matches, `(?ms).*href="./`+e+`".*`)
                        }
                        c.Check(resp.Body.String(), check.Matches, `(?ms).*--cut-dirs=`+fmt.Sprintf("%d", trial.cutDirs)+` .*`)
                }
@@ -608,7 +705,7 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        Host:       u.Host,
                        URL:        u,
                        RequestURI: u.RequestURI(),
-                       Header:     trial.header,
+                       Header:     copyHeader(trial.header),
                        Body:       ioutil.NopCloser(&bytes.Buffer{}),
                }
                resp = httptest.NewRecorder()
@@ -624,7 +721,7 @@ func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
                        Host:       u.Host,
                        URL:        u,
                        RequestURI: u.RequestURI(),
-                       Header:     trial.header,
+                       Header:     copyHeader(trial.header),
                        Body:       ioutil.NopCloser(&bytes.Buffer{}),
                }
                resp = httptest.NewRecorder()
@@ -660,3 +757,11 @@ func (s *IntegrationSuite) TestHealthCheckPing(c *check.C) {
        c.Check(resp.Code, check.Equals, http.StatusOK)
        c.Check(resp.Body.String(), check.Matches, `{"health":"OK"}\n`)
 }
+
+func copyHeader(h http.Header) http.Header {
+       hc := http.Header{}
+       for k, v := range h {
+               hc[k] = append([]string(nil), v...)
+       }
+       return hc
+}
index 724af27c7e0e746b44218f5269d23b71228e6655..d09fce706c4a50033649b32df152afa30ab85dc6 100644 (file)
@@ -7,12 +7,12 @@ package main
 import (
        "flag"
        "fmt"
-       "log"
        "os"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/config"
+       log "github.com/Sirupsen/logrus"
        "github.com/coreos/go-systemd/daemon"
 )
 
@@ -65,6 +65,10 @@ func init() {
        if os.Getenv("ARVADOS_API_TOKEN") == "" {
                os.Setenv("ARVADOS_API_TOKEN", "xxx")
        }
+
+       log.SetFormatter(&log.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       })
 }
 
 func main() {
index 0edcf31708b6a0d1e9688536d8523a24827c0a29..e51376c3bc35cc10a92bf5a6f7c646a18bea3476 100644 (file)
@@ -14,7 +14,7 @@ type server struct {
 }
 
 func (srv *server) Start() error {
-       srv.Handler = &handler{Config: srv.Config}
+       srv.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(nil, &handler{Config: srv.Config}))
        srv.Addr = srv.Config.Listen
        return srv.Server.Start()
 }
index 02f03d04afd2af68abe4e18f9d816696fbcffdf6..ee585ad5b212af1f12f2bad3f162f8c1c11f3a2f 100644 (file)
@@ -59,7 +59,6 @@ func (s *IntegrationSuite) TestNoToken(c *check.C) {
 func (s *IntegrationSuite) Test404(c *check.C) {
        for _, uri := range []string{
                // Routing errors (always 404 regardless of what's stored in Keep)
-               "/",
                "/foo",
                "/download",
                "/collections",
index 432c6af6d89847068cfbc154a413ade33c5585dc..5b23c9c5fa9f10bffec55d48e6950fd0ac76d639 100644 (file)
@@ -36,7 +36,7 @@ var (
 // existence automatically so sequences like "mkcol foo; put foo/bar"
 // work as expected.
 type webdavFS struct {
-       collfs  arvados.CollectionFileSystem
+       collfs  arvados.FileSystem
        writing bool
        // webdav PROPFIND reads the first few bytes of each file
        // whose filename extension isn't recognized, which is
@@ -47,6 +47,9 @@ type webdavFS struct {
 }
 
 func (fs *webdavFS) makeparents(name string) {
+       if !fs.writing {
+               return
+       }
        dir, _ := path.Split(name)
        if dir == "" || dir == "/" {
                return
@@ -66,7 +69,7 @@ func (fs *webdavFS) Mkdir(ctx context.Context, name string, perm os.FileMode) er
 }
 
 func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (f webdav.File, err error) {
-       writing := flag&(os.O_WRONLY|os.O_RDWR) != 0
+       writing := flag&(os.O_WRONLY|os.O_RDWR|os.O_TRUNC) != 0
        if writing {
                fs.makeparents(name)
        }
@@ -75,8 +78,13 @@ func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os
                // webdav module returns 404 on all OpenFile errors,
                // but returns 405 Method Not Allowed if OpenFile()
                // succeeds but Write() or Close() fails. We'd rather
-               // have 405.
-               f = writeFailer{File: f, err: errReadOnly}
+               // have 405. writeFailer ensures Close() fails if the
+               // file is opened for writing *or* Write() is called.
+               var err error
+               if writing {
+                       err = errReadOnly
+               }
+               f = writeFailer{File: f, err: err}
        }
        if fs.alwaysReadEOF {
                f = readEOF{File: f}
@@ -109,10 +117,15 @@ type writeFailer struct {
 }
 
 func (wf writeFailer) Write([]byte) (int, error) {
+       wf.err = errReadOnly
        return 0, wf.err
 }
 
 func (wf writeFailer) Close() error {
+       err := wf.File.Close()
+       if err != nil {
+               wf.err = err
+       }
        return wf.err
 }
 
index 52db776a4319bf34846c23dfed0a8f8b63fe757b..473171e1f5c41c0dd371844e4432c46d588e29ab 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
 package main
 
 import "golang.org/x/net/webdav"
index 0b17d977564a71c3feaedeb436024bce5c34ac07..b6c8bd66aa40f026051ba8b4885ce562fb580721 100644 (file)
@@ -182,7 +182,7 @@ func main() {
 
        // Start serving requests.
        router = MakeRESTRouter(!cfg.DisableGet, !cfg.DisablePut, kc, time.Duration(cfg.Timeout), cfg.ManagementToken)
-       http.Serve(listener, httpserver.AddRequestIDs(httpserver.LogRequests(router)))
+       http.Serve(listener, httpserver.AddRequestIDs(httpserver.LogRequests(nil, router)))
 
        log.Println("shutting down")
 }
@@ -257,6 +257,7 @@ func CheckAuthorizationHeader(kc *keepclient.KeepClient, cache *ApiTokenCache, r
        var err error
        arv := *kc.Arvados
        arv.ApiToken = tok
+       arv.RequestID = req.Header.Get("X-Request-Id")
        if op == "read" {
                err = arv.Call("HEAD", "keep_services", "", "accessible", nil, nil)
        } else {
@@ -273,6 +274,14 @@ func CheckAuthorizationHeader(kc *keepclient.KeepClient, cache *ApiTokenCache, r
        return true, tok
 }
 
+// We need to make a private copy of the default http transport early
+// in initialization, then make copies of our private copy later. It
+// won't be safe to copy http.DefaultTransport itself later, because
+// its private mutexes might have already been used. (Without this,
+// the test suite sometimes panics "concurrent map writes" in
+// net/http.(*Transport).removeIdleConnLocked().)
+var defaultTransport = *(http.DefaultTransport.(*http.Transport))
+
 type proxyHandler struct {
        http.Handler
        *keepclient.KeepClient
@@ -286,7 +295,7 @@ type proxyHandler struct {
 func MakeRESTRouter(enable_get bool, enable_put bool, kc *keepclient.KeepClient, timeout time.Duration, mgmtToken string) http.Handler {
        rest := mux.NewRouter()
 
-       transport := *(http.DefaultTransport.(*http.Transport))
+       transport := defaultTransport
        transport.DialContext = (&net.Dialer{
                Timeout:   keepclient.DefaultConnectTimeout,
                KeepAlive: keepclient.DefaultKeepAlive,
@@ -478,6 +487,15 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
 
        locatorIn := mux.Vars(req)["locator"]
 
+       // Check if the client specified storage classes
+       if req.Header.Get("X-Keep-Storage-Classes") != "" {
+               var scl []string
+               for _, sc := range strings.Split(req.Header.Get("X-Keep-Storage-Classes"), ",") {
+                       scl = append(scl, strings.Trim(sc, " "))
+               }
+               kc.StorageClasses = scl
+       }
+
        _, err = fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &expectLength)
        if err != nil || expectLength < 0 {
                err = LengthRequiredError
@@ -621,13 +639,13 @@ func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
 
 func (h *proxyHandler) makeKeepClient(req *http.Request) *keepclient.KeepClient {
        kc := *h.KeepClient
+       kc.RequestID = req.Header.Get("X-Request-Id")
        kc.HTTPClient = &proxyClient{
                client: &http.Client{
                        Timeout:   h.timeout,
                        Transport: h.transport,
                },
-               proto:     req.Proto,
-               requestID: req.Header.Get("X-Request-Id"),
+               proto: req.Proto,
        }
        return &kc
 }
index 65e22e3b3ed3d761530089a33d2cd712dc9550b4..e87fa4afd0db660c16af8a7ec78e68027620c531 100644 (file)
@@ -162,6 +162,33 @@ func (s *ServerRequiredSuite) TestLoopDetection(c *C) {
        c.Check(err, ErrorMatches, `.*loop detected.*`)
 }
 
+func (s *ServerRequiredSuite) TestStorageClassesHeader(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       // Set up fake keepstore to record request headers
+       var hdr http.Header
+       ts := httptest.NewServer(http.HandlerFunc(
+               func(w http.ResponseWriter, r *http.Request) {
+                       hdr = r.Header
+                       http.Error(w, "Error", http.StatusInternalServerError)
+               }))
+       defer ts.Close()
+
+       // Point keepproxy router's keepclient to the fake keepstore
+       sr := map[string]string{
+               TestProxyUUID: ts.URL,
+       }
+       router.(*proxyHandler).KeepClient.SetServiceRoots(sr, sr, sr)
+
+       // Set up client to ask for storage classes to keepproxy
+       kc.StorageClasses = []string{"secure"}
+       content := []byte("Very important data")
+       _, _, err := kc.PutB(content)
+       c.Check(err, NotNil)
+       c.Check(hdr.Get("X-Keep-Storage-Classes"), Equals, "secure")
+}
+
 func (s *ServerRequiredSuite) TestDesiredReplicas(c *C) {
        kc := runProxy(c, nil, false)
        defer closeListener()
@@ -587,30 +614,29 @@ func (s *ServerRequiredSuite) TestPutAskGetInvalidToken(c *C) {
 }
 
 func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
-       arv, err := arvadosclient.MakeArvadosClient()
-       c.Assert(err, Equals, nil)
+       kc := runProxy(c, nil, false)
+       defer closeListener()
 
-       // keepclient with no such keep server
-       kc := keepclient.New(arv)
+       // Point keepproxy to a non-existant keepstore
        locals := map[string]string{
                TestProxyUUID: "http://localhost:12345",
        }
-       kc.SetServiceRoots(locals, nil, nil)
+       router.(*proxyHandler).KeepClient.SetServiceRoots(locals, nil, nil)
 
-       // Ask should result in temporary connection refused error
+       // Ask should result in temporary bad gateway error
        hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
-       _, _, err = kc.Ask(hash)
+       _, _, err := kc.Ask(hash)
        c.Check(err, NotNil)
        errNotFound, _ := err.(*keepclient.ErrNotFound)
        c.Check(errNotFound.Temporary(), Equals, true)
-       c.Assert(err, ErrorMatches, ".*connection refused.*")
+       c.Assert(err, ErrorMatches, ".*HTTP 502.*")
 
-       // Get should result in temporary connection refused error
+       // Get should result in temporary bad gateway error
        _, _, _, err = kc.Get(hash)
        c.Check(err, NotNil)
        errNotFound, _ = err.(*keepclient.ErrNotFound)
        c.Check(errNotFound.Temporary(), Equals, true)
-       c.Assert(err, ErrorMatches, ".*connection refused.*")
+       c.Assert(err, ErrorMatches, ".*HTTP 502.*")
 }
 
 func (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {
index 3fa2671df58331bb52c16651ded3924e9390ee90..0faf4aea0e3c35354e30dc33f1e7005d491ab4d5 100644 (file)
@@ -13,13 +13,11 @@ import (
 var viaAlias = "keepproxy"
 
 type proxyClient struct {
-       client    keepclient.HTTPClient
-       proto     string
-       requestID string
+       client keepclient.HTTPClient
+       proto  string
 }
 
 func (pc *proxyClient) Do(req *http.Request) (*http.Response, error) {
        req.Header.Add("Via", pc.proto+" "+viaAlias)
-       req.Header.Add("X-Request-Id", pc.requestID)
        return pc.client.Do(req)
 }
index 828a1f1b7a485b4353f32ace30de5c8cf9a40192..5da2055b7736d117f6a7015a8486a948ee80a4d7 100644 (file)
@@ -18,6 +18,7 @@ import (
        "strconv"
        "strings"
        "sync"
+       "sync/atomic"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -620,49 +621,67 @@ func (v *AzureBlobVolume) isKeepBlock(s string) bool {
 // and deletes them from the volume.
 func (v *AzureBlobVolume) EmptyTrash() {
        var bytesDeleted, bytesInTrash int64
-       var blocksDeleted, blocksInTrash int
-       params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
+       var blocksDeleted, blocksInTrash int64
 
-       for {
-               resp, err := v.container.ListBlobs(params)
+       doBlob := func(b storage.Blob) {
+               // Check whether the block is flagged as trash
+               if b.Metadata["expires_at"] == "" {
+                       return
+               }
+
+               atomic.AddInt64(&blocksInTrash, 1)
+               atomic.AddInt64(&bytesInTrash, b.Properties.ContentLength)
+
+               expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
                if err != nil {
-                       log.Printf("EmptyTrash: ListBlobs: %v", err)
-                       break
+                       log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
+                       return
                }
-               for _, b := range resp.Blobs {
-                       // Check if the block is expired
-                       if b.Metadata["expires_at"] == "" {
-                               continue
-                       }
 
-                       blocksInTrash++
-                       bytesInTrash += b.Properties.ContentLength
+               if expiresAt > time.Now().Unix() {
+                       return
+               }
 
-                       expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
-                       if err != nil {
-                               log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
-                               continue
-                       }
+               err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
+                       IfMatch: b.Properties.Etag,
+               })
+               if err != nil {
+                       log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
+                       return
+               }
+               atomic.AddInt64(&blocksDeleted, 1)
+               atomic.AddInt64(&bytesDeleted, b.Properties.ContentLength)
+       }
 
-                       if expiresAt > time.Now().Unix() {
-                               continue
+       var wg sync.WaitGroup
+       todo := make(chan storage.Blob, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for b := range todo {
+                               doBlob(b)
                        }
+               }()
+       }
 
-                       err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
-                               IfMatch: b.Properties.Etag,
-                       })
-                       if err != nil {
-                               log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
-                               continue
-                       }
-                       blocksDeleted++
-                       bytesDeleted += b.Properties.ContentLength
+       params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
+       for {
+               resp, err := v.container.ListBlobs(params)
+               if err != nil {
+                       log.Printf("EmptyTrash: ListBlobs: %v", err)
+                       break
+               }
+               for _, b := range resp.Blobs {
+                       todo <- b
                }
                if resp.NextMarker == "" {
                        break
                }
                params.Marker = resp.NextMarker
        }
+       close(todo)
+       wg.Wait()
 
        log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
index 60a7911768f009ef6209292d6c1e04b6cccbe6e7..1cb6dc380d0a24a002072b1a7465ef640882dd6c 100644 (file)
@@ -536,9 +536,13 @@ func TestAzureBlobVolumeCreateBlobRace(t *testing.T) {
        azureWriteRaceInterval = time.Second
        azureWriteRacePollTime = time.Millisecond
 
-       allDone := make(chan struct{})
+       var wg sync.WaitGroup
+
        v.azHandler.race = make(chan chan struct{})
+
+       wg.Add(1)
        go func() {
+               defer wg.Done()
                err := v.Put(context.Background(), TestHash, TestBlock)
                if err != nil {
                        t.Error(err)
@@ -547,21 +551,22 @@ func TestAzureBlobVolumeCreateBlobRace(t *testing.T) {
        continuePut := make(chan struct{})
        // Wait for the stub's Put to create the empty blob
        v.azHandler.race <- continuePut
+       wg.Add(1)
        go func() {
+               defer wg.Done()
                buf := make([]byte, len(TestBlock))
                _, err := v.Get(context.Background(), TestHash, buf)
                if err != nil {
                        t.Error(err)
                }
-               close(allDone)
        }()
        // Wait for the stub's Get to get the empty blob
        close(v.azHandler.race)
        // Allow stub's Put to continue, so the real data is ready
        // when the volume's Get retries
        <-continuePut
-       // Wait for volume's Get to return the real data
-       <-allDone
+       // Wait for Get() and Put() to finish
+       wg.Wait()
 }
 
 func TestAzureBlobVolumeCreateBlobRaceDeadline(t *testing.T) {
index 17d6acdb68cca7463b9a7e9e49b9e1d9f3510229..3db20e29ce64eaca4d8c5ddd58389595d2f9ce16 100644 (file)
@@ -40,6 +40,11 @@ type Config struct {
        EnableDelete        bool
        TrashLifetime       arvados.Duration
        TrashCheckInterval  arvados.Duration
+       PullWorkers         int
+       TrashWorkers        int
+       EmptyTrashWorkers   int
+       TLSCertificateFile  string
+       TLSKeyFile          string
 
        Volumes VolumeList
 
@@ -47,7 +52,8 @@ type Config struct {
        systemAuthToken string
        debugLogf       func(string, ...interface{})
 
-       ManagementToken string
+       ManagementToken string `doc: The secret key that must be provided by monitoring services
+wishing to access the health check endpoint (/_health).`
 
        metrics
 }
index 8b37b906eb7ee11f79813c9749c6de2d3af48623..fb327a386b0f33fdae30f1e0d3e4f880c8d0bfa1 100644 (file)
@@ -92,7 +92,7 @@ func MakeRESTRouter() http.Handler {
 
        mux := http.NewServeMux()
        mux.Handle("/", theConfig.metrics.Instrument(
-               httpserver.AddRequestIDs(httpserver.LogRequests(rtr.limiter))))
+               httpserver.AddRequestIDs(httpserver.LogRequests(nil, rtr.limiter))))
        mux.HandleFunc("/metrics.json", theConfig.metrics.exportJSON)
        mux.Handle("/metrics", theConfig.metrics.exportProm)
 
@@ -547,7 +547,7 @@ func PullHandler(resp http.ResponseWriter, req *http.Request) {
        pullq.ReplaceQueue(plist)
 }
 
-// TrashRequest consists of a block locator and it's Mtime
+// TrashRequest consists of a block locator and its Mtime
 type TrashRequest struct {
        Locator    string `json:"locator"`
        BlockMtime int64  `json:"block_mtime"`
index 03eef7e76b0b897ed2cb70b95f22989b76436123..79e3017d55a8f7e108ee8d6b2d7effc3950a7260 100644 (file)
@@ -8,7 +8,6 @@ import (
        "flag"
        "fmt"
        "net"
-       "net/http"
        "os"
        "os/signal"
        "syscall"
@@ -165,19 +164,23 @@ func main() {
                log.Fatal(err)
        }
 
-       // Initialize Pull queue and worker
+       // Initialize keepclient for pull workers
        keepClient := &keepclient.KeepClient{
                Arvados:       &arvadosclient.ArvadosClient{},
                Want_replicas: 1,
        }
 
-       // Initialize the pullq and worker
+       // Initialize the pullq and workers
        pullq = NewWorkQueue()
-       go RunPullWorker(pullq, keepClient)
+       for i := 0; i < 1 || i < theConfig.PullWorkers; i++ {
+               go RunPullWorker(pullq, keepClient)
+       }
 
-       // Initialize the trashq and worker
+       // Initialize the trashq and workers
        trashq = NewWorkQueue()
-       go RunTrashWorker(trashq)
+       for i := 0; i < 1 || i < theConfig.TrashWorkers; i++ {
+               go RunTrashWorker(trashq)
+       }
 
        // Start emptyTrash goroutine
        doneEmptyingTrash := make(chan bool)
@@ -199,7 +202,8 @@ func main() {
                log.Printf("Error notifying init daemon: %v", err)
        }
        log.Println("listening at", listener.Addr())
-       srv := &http.Server{Handler: router}
+       srv := &server{}
+       srv.Handler = router
        srv.Serve(listener)
 }
 
index a60b2fc27e321f553c9784691702282ecb39a6e4..bdab58927bdc243605b8cf1d7e95b34d2f610272 100644 (file)
@@ -18,6 +18,7 @@ import (
        "regexp"
        "strings"
        "sync"
+       "sync/atomic"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -428,7 +429,7 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
        case <-ctx.Done():
                theConfig.debugLogf("%s: taking PutReader's input away: %s", v, ctx.Err())
                // Our pipe might be stuck in Write(), waiting for
-               // io.Copy() to read. If so, un-stick it. This means
+               // PutReader() to read. If so, un-stick it. This means
                // PutReader will get corrupt data, but that's OK: the
                // size and MD5 won't match, so the write will fail.
                go io.Copy(ioutil.Discard, bufr)
@@ -437,6 +438,8 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
                theConfig.debugLogf("%s: abandoning PutReader goroutine", v)
                return ctx.Err()
        case <-ready:
+               // Unblock pipe in case PutReader did not consume it.
+               io.Copy(ioutil.Discard, bufr)
                return v.translateError(err)
        }
 }
@@ -764,26 +767,21 @@ func (v *S3Volume) translateError(err error) error {
 func (v *S3Volume) EmptyTrash() {
        var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
 
-       // Use a merge sort to find matching sets of trash/X and recent/X.
-       trashL := s3Lister{
-               Bucket:   v.bucket.Bucket,
-               Prefix:   "trash/",
-               PageSize: v.IndexPageSize,
-       }
        // Define "ready to delete" as "...when EmptyTrash started".
        startT := time.Now()
-       for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+
+       emptyOneKey := func(trash *s3.Key) {
                loc := trash.Key[6:]
                if !v.isKeepBlock(loc) {
-                       continue
+                       return
                }
-               bytesInTrash += trash.Size
-               blocksInTrash++
+               atomic.AddInt64(&bytesInTrash, trash.Size)
+               atomic.AddInt64(&blocksInTrash, 1)
 
                trashT, err := time.Parse(time.RFC3339, trash.LastModified)
                if err != nil {
                        log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
-                       continue
+                       return
                }
                recent, err := v.bucket.Head("recent/"+loc, nil)
                if err != nil && os.IsNotExist(v.translateError(err)) {
@@ -792,15 +790,15 @@ func (v *S3Volume) EmptyTrash() {
                        if err != nil {
                                log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
                        }
-                       continue
+                       return
                } else if err != nil {
                        log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
-                       continue
+                       return
                }
                recentT, err := v.lastModified(recent)
                if err != nil {
                        log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
-                       continue
+                       return
                }
                if trashT.Sub(recentT) < theConfig.BlobSignatureTTL.Duration() {
                        if age := startT.Sub(recentT); age >= theConfig.BlobSignatureTTL.Duration()-time.Duration(v.RaceWindow) {
@@ -815,39 +813,67 @@ func (v *S3Volume) EmptyTrash() {
                                log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
                                v.fixRace(loc)
                                v.Touch(loc)
-                               continue
+                               return
                        }
                        _, err := v.bucket.Head(loc, nil)
                        if os.IsNotExist(err) {
                                log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
                                v.fixRace(loc)
-                               continue
+                               return
                        } else if err != nil {
                                log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
-                               continue
+                               return
                        }
                }
                if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
-                       continue
+                       return
                }
                err = v.bucket.Del(trash.Key)
                if err != nil {
                        log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
-                       continue
+                       return
                }
-               bytesDeleted += trash.Size
-               blocksDeleted++
+               atomic.AddInt64(&bytesDeleted, trash.Size)
+               atomic.AddInt64(&blocksDeleted, 1)
 
                _, err = v.bucket.Head(loc, nil)
-               if os.IsNotExist(err) {
-                       err = v.bucket.Del("recent/" + loc)
-                       if err != nil {
-                               log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
-                       }
-               } else if err != nil {
-                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
+               if err == nil {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q succeeded immediately after deleting %q", v, loc, loc)
+                       return
+               }
+               if !os.IsNotExist(v.translateError(err)) {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+                       return
+               }
+               err = v.bucket.Del("recent/" + loc)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
                }
        }
+
+       var wg sync.WaitGroup
+       todo := make(chan *s3.Key, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for key := range todo {
+                               emptyOneKey(key)
+                       }
+               }()
+       }
+
+       trashL := s3Lister{
+               Bucket:   v.bucket.Bucket,
+               Prefix:   "trash/",
+               PageSize: v.IndexPageSize,
+       }
+       for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+               todo <- trash
+       }
+       close(todo)
+       wg.Wait()
+
        if err := trashL.Error(); err != nil {
                log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
        }
diff --git a/services/keepstore/server.go b/services/keepstore/server.go
new file mode 100644 (file)
index 0000000..3f67277
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/tls"
+       "net"
+       "net/http"
+       "os"
+       "os/signal"
+       "syscall"
+)
+
+type server struct {
+       http.Server
+
+       // channel (size=1) with the current keypair
+       currentCert chan *tls.Certificate
+}
+
+func (srv *server) Serve(l net.Listener) error {
+       if theConfig.TLSCertificateFile == "" && theConfig.TLSKeyFile == "" {
+               return srv.Server.Serve(l)
+       }
+       // https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/
+       srv.TLSConfig = &tls.Config{
+               GetCertificate:           srv.getCertificate,
+               PreferServerCipherSuites: true,
+               CurvePreferences: []tls.CurveID{
+                       tls.CurveP256,
+                       tls.X25519,
+               },
+               MinVersion: tls.VersionTLS12,
+               CipherSuites: []uint16{
+                       tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+                       tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+                       tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+                       tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+                       tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+                       tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+               },
+       }
+       srv.currentCert = make(chan *tls.Certificate, 1)
+       go srv.refreshCertificate(theConfig.TLSCertificateFile, theConfig.TLSKeyFile)
+       return srv.Server.ServeTLS(l, "", "")
+}
+
+func (srv *server) refreshCertificate(certfile, keyfile string) {
+       cert, err := tls.LoadX509KeyPair(certfile, keyfile)
+       if err != nil {
+               log.WithError(err).Fatal("error loading X509 key pair")
+       }
+       srv.currentCert <- &cert
+
+       reload := make(chan os.Signal, 1)
+       signal.Notify(reload, syscall.SIGHUP)
+       for range reload {
+               cert, err := tls.LoadX509KeyPair(certfile, keyfile)
+               if err != nil {
+                       log.WithError(err).Warn("error loading X509 key pair")
+                       continue
+               }
+               // Throw away old cert and start using new one
+               <-srv.currentCert
+               srv.currentCert <- &cert
+       }
+}
+
+func (srv *server) getCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
+       if srv.currentCert == nil {
+               panic("srv.currentCert not initialized")
+       }
+       cert := <-srv.currentCert
+       srv.currentCert <- cert
+       return cert, nil
+}
diff --git a/services/keepstore/server_test.go b/services/keepstore/server_test.go
new file mode 100644 (file)
index 0000000..84adf36
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/tls"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "testing"
+)
+
+func TestTLS(t *testing.T) {
+       defer func() {
+               theConfig.TLSKeyFile = ""
+               theConfig.TLSCertificateFile = ""
+       }()
+       theConfig.TLSKeyFile = "../api/tmp/self-signed.key"
+       theConfig.TLSCertificateFile = "../api/tmp/self-signed.pem"
+       srv := &server{}
+       srv.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               w.Write([]byte("OK"))
+       })
+       l, err := net.Listen("tcp", ":")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer l.Close()
+       go srv.Serve(l)
+       defer srv.Shutdown(context.Background())
+       c := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
+       resp, err := c.Get("https://" + l.Addr().String() + "/")
+       if err != nil {
+               t.Fatal(err)
+       }
+       body, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               t.Error(err)
+       }
+       if !bytes.Equal(body, []byte("OK")) {
+               t.Errorf("expected OK, got %q", body)
+       }
+}
index 5f6fd90a1cda96bd41f0039d9b5be491c39a050b..8e83f6ce5f02c33182f40f89d42c0d7e3d4b59b7 100644 (file)
@@ -118,6 +118,36 @@ TrashCheckInterval:
     How often to check for (and delete) trashed blocks whose
     TrashLifetime has expired.
 
+TrashWorkers:
+
+    Maximum number of concurrent trash operations. Default is 1, i.e.,
+    trash lists are processed serially.
+
+EmptyTrashWorkers:
+
+    Maximum number of concurrent block deletion operations (per
+    volume) when emptying trash. Default is 1.
+
+PullWorkers:
+
+    Maximum number of concurrent pull operations. Default is 1, i.e.,
+    pull lists are processed serially.
+
+TLSCertificateFile:
+
+    Path to server certificate file in X509 format. Enables TLS mode.
+
+    Example: /var/lib/acme/live/keep0.example.com/fullchain
+
+TLSKeyFile:
+
+    Path to server key file in X509 format. Enables TLS mode.
+
+    The key pair is read from disk during startup, and whenever SIGHUP
+    is received.
+
+    Example: /var/lib/acme/live/keep0.example.com/privkey
+
 Volumes:
 
     List of storage volumes. If omitted or empty, the default is to
index 5a04ffd944c17ab51de93a41fd1d6994fff1ecbe..23d675359244942097072d88e1bd98daf9d46c6c 100644 (file)
@@ -18,6 +18,7 @@ import (
        "strconv"
        "strings"
        "sync"
+       "sync/atomic"
        "syscall"
        "time"
 )
@@ -725,39 +726,61 @@ var unixTrashLocRegexp = regexp.MustCompile(`/([0-9a-f]{32})\.trash\.(\d+)$`)
 // and deletes those with deadline < now.
 func (v *UnixVolume) EmptyTrash() {
        var bytesDeleted, bytesInTrash int64
-       var blocksDeleted, blocksInTrash int
+       var blocksDeleted, blocksInTrash int64
 
-       err := filepath.Walk(v.Root, func(path string, info os.FileInfo, err error) error {
-               if err != nil {
-                       log.Printf("EmptyTrash: filepath.Walk: %v: %v", path, err)
-                       return nil
-               }
+       doFile := func(path string, info os.FileInfo) {
                if info.Mode().IsDir() {
-                       return nil
+                       return
                }
                matches := unixTrashLocRegexp.FindStringSubmatch(path)
                if len(matches) != 3 {
-                       return nil
+                       return
                }
                deadline, err := strconv.ParseInt(matches[2], 10, 64)
                if err != nil {
                        log.Printf("EmptyTrash: %v: ParseInt(%v): %v", path, matches[2], err)
-                       return nil
+                       return
                }
-               bytesInTrash += info.Size()
-               blocksInTrash++
+               atomic.AddInt64(&bytesInTrash, info.Size())
+               atomic.AddInt64(&blocksInTrash, 1)
                if deadline > time.Now().Unix() {
-                       return nil
+                       return
                }
                err = v.os.Remove(path)
                if err != nil {
                        log.Printf("EmptyTrash: Remove %v: %v", path, err)
+                       return
+               }
+               atomic.AddInt64(&bytesDeleted, info.Size())
+               atomic.AddInt64(&blocksDeleted, 1)
+       }
+
+       type dirent struct {
+               path string
+               info os.FileInfo
+       }
+       var wg sync.WaitGroup
+       todo := make(chan dirent, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for e := range todo {
+                               doFile(e.path, e.info)
+                       }
+               }()
+       }
+
+       err := filepath.Walk(v.Root, func(path string, info os.FileInfo, err error) error {
+               if err != nil {
+                       log.Printf("EmptyTrash: filepath.Walk: %v: %v", path, err)
                        return nil
                }
-               bytesDeleted += info.Size()
-               blocksDeleted++
+               todo <- dirent{path, info}
                return nil
        })
+       close(todo)
+       wg.Wait()
 
        if err != nil {
                log.Printf("EmptyTrash error for %v: %v", v.String(), err)
index 28ae431669b05cd21521e3211f68af1c58e5d35d..4fb31a742e91ea76372f6fa8986748dde7414d21 100644 (file)
@@ -7,12 +7,14 @@ if not File.exists?('/usr/bin/git') then
   exit
 end
 
+git_latest_tag = `git describe --abbrev=0`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
 git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
 git_timestamp = Time.at(git_timestamp.to_i).utc
 
 Gem::Specification.new do |s|
   s.name        = 'arvados-login-sync'
-  s.version     = "0.1.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
   s.date        = git_timestamp.strftime("%Y-%m-%d")
   s.summary     = "Set up local login accounts for Arvados users"
   s.description = "Creates and updates local login accounts for Arvados users. Built from git commit #{git_hash}"
index 475ab98196fe5983048579829efea2c842255c1f..eb680043e4b50bf3f44bbd28a97bd551a39c12de 100755 (executable)
@@ -57,18 +57,19 @@ begin
 
   pwnam = Hash.new()
   logins.reject! do |l|
-    return false if pwnam[l[:username]]
-    begin
-      pwnam[l[:username]] = Etc.getpwnam(l[:username])
-    rescue
-      if skip_missing_users
-        STDERR.puts "Account #{l[:username]} not found. Skipping"
-        true
-      end
-    else
-      if pwnam[l[:username]].uid < uid_min
-        STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
-        true
+    if not pwnam[l[:username]]
+      begin
+        pwnam[l[:username]] = Etc.getpwnam(l[:username])
+      rescue
+        if skip_missing_users
+          STDERR.puts "Account #{l[:username]} not found. Skipping"
+          true
+        end
+      else
+        if pwnam[l[:username]].uid < uid_min
+          STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
+          true
+        end
       end
     end
   end
index f51509cb5b9976d5c762d15351d2d5bdb4b5e7c3..17942c2cffa784993b4338dc64711e56f5e17028 100644 (file)
@@ -15,21 +15,24 @@ class TestAddUser < Minitest::Test
     File.open(@tmpdir+'/succeed', 'w') do |f| end
     invoke_sync binstubs: ['new_user']
     spied = File.read(@tmpdir+'/spy')
-    assert_match %r{useradd -m -c active -s /bin/bash -G fuse active}, spied
+    assert_match %r{useradd -m -c active -s /bin/bash -G (fuse)? active}, spied
     # BUG(TC): This assertion succeeds only if docker and fuse groups
     # exist on the host, but is insensitive to the admin group (groups
     # are quietly ignored by login-sync if they don't exist on the
     # current host).
-    assert_match %r{useradd -m -c adminroot -s /bin/bash -G docker(,admin)?,fuse adminroot}, spied
+    assert_match %r{useradd -m -c adminroot -s /bin/bash -G (docker)?(,admin)?(,fuse)? adminroot}, spied
   end
 
   def test_useradd_success
     # binstub_new_user/useradd will succeed.
     File.open(@tmpdir+'/succeed', 'w') do |f|
       f.puts 'useradd -m -c active -s /bin/bash -G fuse active'
+      f.puts 'useradd -m -c active -s /bin/bash -G  active'
       # Accept either form; see note about groups in test_useradd_error.
       f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,fuse adminroot'
       f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin,fuse adminroot'
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker adminroot'
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin adminroot'
     end
     $stderr.puts "*** Expect crash after getpwnam() fails:"
     invoke_sync binstubs: ['new_user']
index 3c04118abe2ec2bb3f5fee4c1a74e078d61119ed..b124c66540aab804db3ad555b048c9ef5097e8d0 100644 (file)
@@ -33,7 +33,7 @@ def arvados_timestamp(timestr):
         subsecs = float(subsec_match.group(1))
         timestr = timestr[:subsec_match.start()] + 'Z'
     return calendar.timegm(time.strptime(timestr + 'UTC',
-                                         ARVADOS_TIMEFMT + '%Z'))
+                                         ARVADOS_TIMEFMT + '%Z')) + subsecs
 
 def timestamp_fresh(timestamp, fresh_time):
     return (time.time() - timestamp) < fresh_time
index 9106ea67ccc8ffac7813d64baa5ebc537548fa21..b4fec5096d5a8e2767169fce3910f45136ddaee3 100644 (file)
@@ -130,7 +130,7 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
     @RetryMixin._retry()
     def create_cloud_node(self):
         self._logger.info("Sending create_node request for node size %s.",
-                          self.cloud_size.name)
+                          self.cloud_size.id)
         try:
             self.cloud_node = self._cloud.create_node(self.cloud_size,
                                                       self.arvados_node)
@@ -432,6 +432,11 @@ class ComputeNodeMonitorActor(config.actor_class):
         reason for the decision.
         """
 
+        # If this node's size is invalid (because it has a stale arvados_node_size
+        # tag), return True so that it's properly shut down.
+        if self.cloud_node.size.id == 'invalid':
+            return (True, "node's size tag '%s' not recognizable" % (self.cloud_node.extra['arvados_node_size'],))
+
         # Collect states and then consult state transition table whether we
         # should shut down.  Possible states are:
         # crunch_worker_state = ['unpaired', 'busy', 'idle', 'down']
index 1cf8f4e41d776e5861c41816aff34cf2d98604db..5b7785afd93744b2565a6b467f56e9e2617bb25a 100644 (file)
@@ -5,7 +5,7 @@
 
 from __future__ import absolute_import, print_function
 
-import subprocess
+import subprocess32 as subprocess
 import time
 
 from . import ComputeNodeMonitorActor
index 22ffa24079b6d32ea05a6d7daa9dc3aab777f867..48d19f592bbdb0b87d905bac377c849000b59ef1 100644 (file)
@@ -176,7 +176,7 @@ class BaseComputeNodeDriver(RetryMixin):
         try:
             kwargs = self.create_kwargs.copy()
             kwargs.update(self.arvados_create_kwargs(size, arvados_node))
-            kwargs['size'] = size
+            kwargs['size'] = size.real
             return self.real.create_node(**kwargs)
         except CLOUD_ERRORS as create_error:
             # Workaround for bug #6702: sometimes the create node request
index aa8f3c769557a029f380f59609495ca422a7f7af..719124d4000f724a271077d9f1614c50c6788f8d 100644 (file)
@@ -46,6 +46,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
 
     def arvados_create_kwargs(self, size, arvados_node):
         tags = {
+            # Set up tag indicating the Arvados assigned Cloud Size id.
+            'arvados_node_size': size.id,
             'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
             'arv-ping-url': self._make_ping_url(arvados_node)
         }
@@ -83,11 +85,12 @@ echo %s > /var/tmp/arv-node-data/meta-data/instance-type
         # Do our own filtering based on tag.
         nodes = [node for node in
                 super(ComputeNodeDriver, self).list_nodes(ex_fetch_nic=False, ex_fetch_power_state=False)
-                if node.extra["tags"].get("arvados-class") == self.tags["arvados-class"]]
+                if node.extra.get("tags", {}).get("arvados-class") == self.tags["arvados-class"]]
         for n in nodes:
             # Need to populate Node.size
             if not n.size:
                 n.size = self.sizes()[n.extra["properties"]["hardwareProfile"]["vmSize"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size')
         return nodes
 
     def broken(self, cloud_node):
index 07ed90dfa517cc656f75886ff2dedde19761566f..56812d258a92212b02a53d9775534d8b23b50b69 100644 (file)
@@ -91,18 +91,27 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
                     "VolumeSize": volsize,
                     "VolumeType": "gp2"
                 }}]
+        if size.preemptible:
+            # Request a Spot instance for this node
+            kw['ex_spot_market'] = True
         return kw
 
     def sync_node(self, cloud_node, arvados_node):
         self.real.ex_create_tags(cloud_node,
                                  {'Name': arvados_node_fqdn(arvados_node)})
 
+    def create_node(self, size, arvados_node):
+        # Set up tag indicating the Arvados assigned Cloud Size id.
+        self.create_kwargs['ex_metadata'].update({'arvados_node_size': size.id})
+        return super(ComputeNodeDriver, self).create_node(size, arvados_node)
+
     def list_nodes(self):
         # Need to populate Node.size
         nodes = super(ComputeNodeDriver, self).list_nodes()
         for n in nodes:
             if not n.size:
                 n.size = self.sizes()[n.extra["instance_type"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size')
         return nodes
 
     @classmethod
index f1238db40321601abe42dee33824983de34d1044..11025f7840bc00fe6c188ad6b0f9e9bea1795cba 100644 (file)
@@ -101,25 +101,27 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
                   'ex_disks_gce_struct': disks,
                   }
         result['ex_metadata'].update({
-                'arv-ping-url': self._make_ping_url(arvados_node),
-                'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
-                'hostname': arvados_node_fqdn(arvados_node),
-                })
+            'arvados_node_size': size.id,
+            'arv-ping-url': self._make_ping_url(arvados_node),
+            'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
+            'hostname': arvados_node_fqdn(arvados_node),
+        })
         return result
 
-
     def list_nodes(self):
         # The GCE libcloud driver only supports filtering node lists by zone.
         # Do our own filtering based on tag list.
         nodelist = [node for node in
                     super(ComputeNodeDriver, self).list_nodes()
                     if self.node_tags.issubset(node.extra.get('tags', []))]
-        # As of 0.18, the libcloud GCE driver sets node.size to the size's name.
-        # It's supposed to be the actual size object.  Check that it's not,
-        # and monkeypatch the results when that's the case.
-        if nodelist and not hasattr(nodelist[0].size, 'id'):
-            for node in nodelist:
+        for node in nodelist:
+            # As of 0.18, the libcloud GCE driver sets node.size to the size's name.
+            # It's supposed to be the actual size object.  Check that it's not,
+            # and monkeypatch the results when that's the case.
+            if not hasattr(node.size, 'id'):
                 node.size = self.sizes()[node.size]
+            # Get arvados-assigned cloud size id
+            node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size')
         return nodelist
 
     @classmethod
index e47f9fcb1d036b78f94af0af25e8c37dc17b5ad0..8c6757e51c451c253e5f16f57570d85bb52f3d7a 100644 (file)
@@ -17,6 +17,7 @@ from apiclient import errors as apierror
 
 from .baseactor import BaseNodeManagerActor
 
+from functools import partial
 from libcloud.common.types import LibcloudError
 from libcloud.common.exceptions import BaseHTTPError
 
@@ -69,12 +70,23 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
                 if not self.has_option(sec_name, opt_name):
                     self.set(sec_name, opt_name, value)
 
-    def get_section(self, section, transformer=None):
+    def get_section(self, section, transformers={}, default_transformer=None):
+        transformer_map = {
+            str: self.get,
+            int: self.getint,
+            bool: self.getboolean,
+            float: self.getfloat,
+        }
         result = self._dict()
         for key, value in self.items(section):
+            transformer = None
+            if transformers.get(key) in transformer_map:
+                transformer = partial(transformer_map[transformers[key]], section)
+            elif default_transformer in transformer_map:
+                transformer = partial(transformer_map[default_transformer], section)
             if transformer is not None:
                 try:
-                    value = transformer(value)
+                    value = transformer(key)
                 except (TypeError, ValueError):
                     pass
             result[key] = value
@@ -128,31 +140,41 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
                                         self.get_section('Cloud Create'),
                                         driver_class=driver_class)
 
-    def node_sizes(self, all_sizes):
+    def node_sizes(self):
         """Finds all acceptable NodeSizes for our installation.
 
         Returns a list of (NodeSize, kwargs) pairs for each NodeSize object
         returned by libcloud that matches a size listed in our config file.
         """
-
+        all_sizes = self.new_cloud_client().list_sizes()
         size_kwargs = {}
+        section_types = {
+            'instance_type': str,
+            'price': float,
+            'preemptible': bool,
+        }
         for sec_name in self.sections():
             sec_words = sec_name.split(None, 2)
             if sec_words[0] != 'Size':
                 continue
-            size_spec = self.get_section(sec_name, int)
-            if 'price' in size_spec:
-                size_spec['price'] = float(size_spec['price'])
+            size_spec = self.get_section(sec_name, section_types, int)
+            if 'preemptible' not in size_spec:
+                size_spec['preemptible'] = False
+            if 'instance_type' not in size_spec:
+                # Assume instance type is Size name if missing
+                size_spec['instance_type'] = sec_words[1]
+            size_spec['id'] = sec_words[1]
             size_kwargs[sec_words[1]] = size_spec
         # EC2 node sizes are identified by id. GCE sizes are identified by name.
         matching_sizes = []
         for size in all_sizes:
-            if size.id in size_kwargs:
-                matching_sizes.append((size, size_kwargs[size.id]))
-            elif size.name in size_kwargs:
-                matching_sizes.append((size, size_kwargs[size.name]))
+            matching_sizes += [
+                (size, size_kwargs[s]) for s in size_kwargs
+                if size_kwargs[s]['instance_type'] == size.id
+                or size_kwargs[s]['instance_type'] == size.name
+            ]
         return matching_sizes
 
     def shutdown_windows(self):
-        return [int(n)
+        return [float(n)
                 for n in self.get('Cloud', 'shutdown_windows').split(',')]
index 0d6fdfca9a6c057c18ee6d4ea63e4cdade539a9b..6e85b85ab2c8277aae88d27ff0ad96917226af98 100644 (file)
@@ -318,7 +318,7 @@ class NodeManagerDaemonActor(actor_class):
         busy_count = counts["busy"]
         wishlist_count = self._size_wishlist(size)
 
-        self._logger.info("%s: wishlist %i, up %i (booting %i, unpaired %i, idle %i, busy %i), down %i, shutdown %i", size.name,
+        self._logger.info("%s: wishlist %i, up %i (booting %i, unpaired %i, idle %i, busy %i), down %i, shutdown %i", size.id,
                           wishlist_count,
                           up_count,
                           counts["booting"],
@@ -338,7 +338,7 @@ class NodeManagerDaemonActor(actor_class):
             can_boot = int((self.max_total_price - total_price) / size.price)
             if can_boot == 0:
                 self._logger.info("Not booting %s (price %s) because with it would exceed max_total_price of %s (current total_price is %s)",
-                                  size.name, size.price, self.max_total_price, total_price)
+                                  size.id, size.price, self.max_total_price, total_price)
             return can_boot
         else:
             return wanted
@@ -392,7 +392,7 @@ class NodeManagerDaemonActor(actor_class):
             return None
         arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
         self._logger.info("Want %i more %s nodes.  Booting a node.",
-                          nodes_wanted, cloud_size.name)
+                          nodes_wanted, cloud_size.id)
         new_setup = self._node_setup.start(
             timer_actor=self._timer,
             arvados_client=self._new_arvados(),
index 90b32290b76932fa93dbb1ff0854aeb2219eaf4c..1020b4a80ced597911b886c40789dea39f1d5598 100644 (file)
@@ -7,7 +7,7 @@ from __future__ import absolute_import, print_function
 
 import logging
 import re
-import subprocess
+import subprocess32 as subprocess
 
 import arvados.util
 
@@ -24,6 +24,26 @@ class ServerCalculator(object):
     that would best satisfy the jobs, choosing the cheapest size that
     satisfies each job, and ignoring jobs that can't be satisfied.
     """
+    class InvalidCloudSize(object):
+        """
+        Dummy CloudSizeWrapper-like class, to be used when a cloud node doesn't
+        have a recognizable arvados_node_size tag.
+        """
+        def __init__(self):
+            self.id = 'invalid'
+            self.name = 'invalid'
+            self.ram = 0
+            self.disk = 0
+            self.scratch = 0
+            self.cores = 0
+            self.bandwidth = 0
+            self.price = 9999999
+            self.preemptible = False
+            self.extra = {}
+
+        def meets_constraints(self, **kwargs):
+            return False
+
 
     class CloudSizeWrapper(object):
         def __init__(self, real_size, node_mem_scaling, **kwargs):
@@ -38,7 +58,9 @@ class ServerCalculator(object):
                 self.disk = 0
             self.scratch = self.disk * 1000
             self.ram = int(self.ram * node_mem_scaling)
+            self.preemptible = False
             for name, override in kwargs.iteritems():
+                if name == 'instance_type': continue
                 if not hasattr(self, name):
                     raise ValueError("unrecognized size field '%s'" % (name,))
                 setattr(self, name, override)
@@ -80,10 +102,12 @@ class ServerCalculator(object):
         wants = {'cores': want_value('min_cores_per_node'),
                  'ram': want_value('min_ram_mb_per_node'),
                  'scratch': want_value('min_scratch_mb_per_node')}
+        # EC2 node sizes are identified by id. GCE sizes are identified by name.
         for size in self.cloud_sizes:
             if (size.meets_constraints(**wants) and
-                (specified_size is None or size.id == specified_size)):
-                    return size
+                (specified_size is None or
+                    size.id == specified_size or size.name == specified_size)):
+                        return size
         return None
 
     def servers_for_queue(self, queue):
@@ -101,7 +125,7 @@ class ServerCalculator(object):
                     "Job's min_nodes constraint is greater than the configured "
                     "max_nodes (%d)" % self.max_nodes)
             elif (want_count*cloud_size.price <= self.max_price):
-                servers.extend([cloud_size.real] * want_count)
+                servers.extend([cloud_size] * want_count)
             else:
                 unsatisfiable_jobs[job['uuid']] = (
                     "Job's price (%d) is above system's max_price "
@@ -115,7 +139,7 @@ class ServerCalculator(object):
         for s in self.cloud_sizes:
             if s.id == sizeid:
                 return s
-        return None
+        return self.InvalidCloudSize()
 
 
 class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
@@ -224,5 +248,5 @@ class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
                                    job_uuid,
                                    error)
         self._logger.debug("Calculated wishlist: %s",
-                           ', '.join(s.name for s in server_list) or "(empty)")
+                           ', '.join(s.id for s in server_list) or "(empty)")
         return super(JobQueueMonitorActor, self)._got_response(server_list)
index 888abf5a768d51cb34fe85b30ed9d1252b7dea4c..f65e0806ec56df96f81c5bed87f657b15355fdec 100644 (file)
@@ -71,7 +71,7 @@ def setup_logging(path, level, **sublevels):
     return root_logger
 
 def build_server_calculator(config):
-    cloud_size_list = config.node_sizes(config.new_cloud_client().list_sizes())
+    cloud_size_list = config.node_sizes()
     if not cloud_size_list:
         abort("No valid node sizes configured")
     return ServerCalculator(cloud_size_list,
@@ -80,7 +80,7 @@ def build_server_calculator(config):
                             config.getfloat('Daemon', 'node_mem_scaling'))
 
 def launch_pollers(config, server_calculator):
-    poll_time = config.getint('Daemon', 'poll_time')
+    poll_time = config.getfloat('Daemon', 'poll_time')
     max_poll_time = config.getint('Daemon', 'max_poll_time')
 
     timer = TimedCallBackActor.start(poll_time / 10.0).tell_proxy()
index 4b9d5b60fb0ce5131d865f4b3d97b0652afb88c8..0abb3b3a379cbbbec7e619fdcca081ec98a340ea 100644 (file)
@@ -5,7 +5,7 @@
 
 from __future__ import absolute_import, print_function
 
-import subprocess
+import subprocess32 as subprocess
 
 from . import clientactor
 from . import config
@@ -80,8 +80,8 @@ class CloudNodeListMonitorActor(clientactor.RemotePollLoopActor):
     def _send_request(self):
         nodes = self._client.list_nodes()
         for n in nodes:
-            # Replace with libcloud NodeSize object with compatible
+            # Replace the libcloud NodeSize object with compatible
             # CloudSizeWrapper object which merges the size info reported from
             # the cloud with size information from the configuration file.
-            n.size = self._calculator.find_size(n.size.id)
+            n.size = self._calculator.find_size(n.extra['arvados_node_size'])
         return nodes
index 5d033081213c5faa72dc33c656dd9c3167f140da..2a592f9ee7499924d5a02c83ed2b4931f0a1e6bf 100644 (file)
@@ -43,13 +43,16 @@ class FakeDriver(NodeDriver):
         global all_nodes, create_calls
         create_calls += 1
         nodeid = "node%i" % create_calls
+        if ex_tags is None:
+            ex_tags = {}
+        ex_tags.update({'arvados_node_size': size.id})
         n = Node(nodeid, nodeid, NodeState.RUNNING, [], [], self, size=size, extra={"tags": ex_tags})
         all_nodes.append(n)
         if ex_customdata:
             ping_url = re.search(r"echo '(.*)' > /var/tmp/arv-node-data/arv-ping-url", ex_customdata).groups(1)[0]
         if ex_userdata:
             ping_url = ex_userdata
-        if ex_metadata:
+        elif ex_metadata:
             ping_url = ex_metadata["arv-ping-url"]
         ping_url += "&instance_id=" + nodeid
         ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
@@ -130,10 +133,10 @@ class RetryDriver(FakeDriver):
         create_calls += 1
         if create_calls < 2:
             raise RateLimitReachedError(429, "Rate limit exceeded",
-                                        headers={'retry-after': '12'})
+                                        headers={'retry-after': '2'})
         elif create_calls < 3:
             raise BaseHTTPError(429, "Rate limit exceeded",
-                                {'retry-after': '2'})
+                                {'retry-after': '1'})
         else:
             return super(RetryDriver, self).create_node(name=name,
                     size=size,
@@ -161,7 +164,12 @@ class FakeAwsDriver(FakeDriver):
                                                       auth=auth,
                                                       ex_metadata=ex_metadata,
                                                       ex_userdata=ex_userdata)
-        n.extra = {"launch_time": time.strftime(ARVADOS_TIMEFMT, time.gmtime())[:-1]}
+        n.extra = {
+            "launch_time": time.strftime(ARVADOS_TIMEFMT, time.gmtime())[:-1],
+            "tags" : {
+                "arvados_node_size": size.id
+            }
+        }
         return n
 
     def list_sizes(self, **kwargs):
@@ -187,7 +195,8 @@ class FakeGceDriver(FakeDriver):
                                                    ex_metadata=ex_metadata)
         n.extra = {
             "metadata": {
-                "items": [{"key": k, "value": v} for k,v in ex_metadata.iteritems()]
+                "items": [{"key": k, "value": v} for k,v in ex_metadata.iteritems()],
+                "arvados_node_size": size.id
             },
             "zone": "fake"
         }
index a1fa2dc32c21a6676f740ba8321fa4175c143733..117f9b224bff2f4ca567b64e09de81b4ed34c692 100644 (file)
@@ -169,12 +169,24 @@ security_groups = idstring1, idstring2
 # You may also want to define the amount of scratch space (expressed
 # in GB) for Crunch jobs.  You can also override Amazon's provided
 # data fields (such as price per hour) by setting them here.
+#
+# Additionally, you can ask for a preemptible instance (AWS's spot instance)
+# by adding the appropriate boolean configuration flag. If you want to have
+# both spot & reserved versions of the same size, you can do so by renaming
+# the Size section and specifying the instance type inside it.
 
 [Size m4.large]
 cores = 2
 price = 0.126
 scratch = 100
 
+[Size m4.large.spot]
+instance_type = m4.large
+preemptible = true
+cores = 2
+price = 0.126
+scratch = 100
+
 [Size m4.xlarge]
 cores = 4
 price = 0.252
index 3b8502c0535ef14777af2e211162d7774714c27d..1e41f3dad2fd32cfa3f42c461f2b21362796cb8e 100644 (file)
@@ -35,19 +35,24 @@ setup(name='arvados-node-manager',
           ('share/doc/arvados-node-manager', ['agpl-3.0.txt', 'README.rst']),
       ],
       install_requires=[
-          'apache-libcloud>=2.3',
+          'apache-libcloud>=2.3.1.dev1',
           'arvados-python-client>=0.1.20170731145219',
           'future',
           'pykka',
           'python-daemon',
-          'setuptools'
+          'setuptools',
+          'subprocess32>=3.5.1',
+      ],
+      dependency_links=[
+          "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.3.1.dev1.zip"
       ],
       test_suite='tests',
       tests_require=[
           'requests',
           'pbr<1.7.0',
           'mock>=1.0',
-          'apache-libcloud>=2.3',
+          'apache-libcloud>=2.3.1.dev1',
+          'subprocess32>=3.5.1',
       ],
       zip_safe=False
       )
index 01f053c3701c16885bd6930d4ab5c4590ba923cb..a11a6d807ef9348d9a17deac9e0c2092ed929f46 100644 (file)
@@ -38,16 +38,16 @@ max_nodes = 8
 max_total_price = 0
 
 # Poll Azure nodes and Arvados for new information every N seconds.
-poll_time = 5
+poll_time = 0.5
 
 # Polls have exponential backoff when services fail to respond.
 # This is the longest time to wait between polls.
-max_poll_time = 300
+max_poll_time = 1
 
 # If Node Manager can't succesfully poll a service for this long,
 # it will never start or stop compute nodes, on the assumption that its
 # information is too outdated.
-poll_stale_after = 600
+poll_stale_after = 1
 
 # If Node Manager boots a cloud node, and it does not pair with an Arvados
 # node before this long, assume that there was a cloud bootstrap failure and
@@ -115,7 +115,7 @@ driver_class = {driver_class}
 # Azure bills by the minute, so it makes sense to agressively shut down idle
 # nodes.  Specify at least two windows.  You can add as many as you need beyond
 # that.
-shutdown_windows = 1, 999999
+shutdown_windows = 0.05, 999999
 
 [Cloud Credentials]
 # Use "azure account list" with the azure CLI to get these values.
index 744d7f849bec3793c0dcd4db77624e013b7e4f25..2bb7d0ea0b64354a5cb5500a52f0ff05a1bcf304 100644 (file)
@@ -38,16 +38,16 @@ max_nodes = 8
 max_total_price = 0
 
 # Poll Azure nodes and Arvados for new information every N seconds.
-poll_time = 5
+poll_time = 0.5
 
 # Polls have exponential backoff when services fail to respond.
 # This is the longest time to wait between polls.
-max_poll_time = 300
+max_poll_time = 1
 
 # If Node Manager can't succesfully poll a service for this long,
 # it will never start or stop compute nodes, on the assumption that its
 # information is too outdated.
-poll_stale_after = 600
+poll_stale_after = 1
 
 # If Node Manager boots a cloud node, and it does not pair with an Arvados
 # node before this long, assume that there was a cloud bootstrap failure and
@@ -115,7 +115,7 @@ driver_class = {driver_class}
 # Azure bills by the minute, so it makes sense to agressively shut down idle
 # nodes.  Specify at least two windows.  You can add as many as you need beyond
 # that.
-shutdown_windows = 1, 999999
+shutdown_windows = 0.05, 999999
 
 [Cloud Credentials]
 
index 1c39ccf668519baa1e4a91330573844c4a5b28e6..11131efbc3fa845e17c822ecf2a22366c61b0d12 100644 (file)
@@ -38,16 +38,16 @@ max_nodes = 8
 max_total_price = 0
 
 # Poll Azure nodes and Arvados for new information every N seconds.
-poll_time = 5
+poll_time = 0.5
 
 # Polls have exponential backoff when services fail to respond.
 # This is the longest time to wait between polls.
-max_poll_time = 300
+max_poll_time = 1
 
 # If Node Manager can't succesfully poll a service for this long,
 # it will never start or stop compute nodes, on the assumption that its
 # information is too outdated.
-poll_stale_after = 600
+poll_stale_after = 1
 
 # If Node Manager boots a cloud node, and it does not pair with an Arvados
 # node before this long, assume that there was a cloud bootstrap failure and
@@ -115,7 +115,7 @@ driver_class = {driver_class}
 # Azure bills by the minute, so it makes sense to agressively shut down idle
 # nodes.  Specify at least two windows.  You can add as many as you need beyond
 # that.
-shutdown_windows = 1, 999999
+shutdown_windows = 0.05, 999999
 
 [Cloud Credentials]
 key = 00000000-0000-0000-0000-000000000000
index 508e626639cb2857e989e9f342fbaf8be2da8aba..69a29019e78cb1ab7de2a8fdc41de85c8abc645a 100755 (executable)
@@ -12,7 +12,7 @@ events or behaviors for each test.
 
 """
 
-import subprocess
+import subprocess32 as subprocess
 import os
 import sys
 import re
@@ -21,6 +21,7 @@ import logging
 import stat
 import tempfile
 import shutil
+import errno
 from functools import partial
 import arvados
 import StringIO
@@ -105,18 +106,6 @@ def node_paired(g):
 
     return 0
 
-def remaining_jobs(g):
-    update_script(os.path.join(fake_slurm, "sinfo"), "#!/bin/sh\n" +
-                  "\n".join("echo '%s|alloc|(null)'" % (v) for k,v in compute_nodes.items()))
-
-    for k,v in all_jobs.items():
-        all_jobs[k] = "Running"
-
-    set_squeue(g)
-
-    return 0
-
-
 def node_busy(g):
     update_script(os.path.join(fake_slurm, "sinfo"), "#!/bin/sh\n" +
                   "\n".join("echo '%s|idle|(null)'" % (v) for k,v in compute_nodes.items()))
@@ -124,7 +113,8 @@ def node_busy(g):
 
 def node_shutdown(g):
     global compute_nodes
-    del compute_nodes[g.group(1)]
+    if g.group(1) in compute_nodes:
+        del compute_nodes[g.group(1)]
     return 0
 
 def jobs_req(g):
@@ -186,8 +176,8 @@ def run_test(name, actions, checks, driver_class, jobs, provider):
                                       driver_class=driver_class,
                                       ssh_key=os.path.join(fake_slurm, "id_rsa.pub")))
 
-    # Tests must complete in less than 3 minutes.
-    timeout = time.time() + 180
+    # Tests must complete in less than 30 seconds.
+    timeout = time.time() + 30
     terminated = False
 
     # Now start node manager
@@ -215,7 +205,7 @@ def run_test(name, actions, checks, driver_class, jobs, provider):
                     if code != 0:
                         detail.error("Check failed")
                         if not terminated:
-                            p.terminate()
+                            p.kill()
                             terminated = True
 
             if terminated:
@@ -225,7 +215,7 @@ def run_test(name, actions, checks, driver_class, jobs, provider):
                 detail.error("Exceeded timeout with actions remaining: %s", actions)
                 code += 1
                 if not terminated:
-                    p.terminate()
+                    p.kill()
                     terminated = True
 
             k, v = actions[0]
@@ -236,11 +226,11 @@ def run_test(name, actions, checks, driver_class, jobs, provider):
                 code += v(g)
                 if code != 0:
                     detail.error("Action failed")
-                    p.terminate()
+                    p.kill()
                     terminated = True
 
             if not actions:
-                p.terminate()
+                p.kill()
                 terminated = True
     except KeyboardInterrupt:
         p.kill()
@@ -256,7 +246,18 @@ def run_test(name, actions, checks, driver_class, jobs, provider):
         logger.info("%s passed", name)
     else:
         if isinstance(detail_content, StringIO.StringIO):
-            sys.stderr.write(detail_content.getvalue())
+            detail_content.seek(0)
+            chunk = detail_content.read(4096)
+            while chunk:
+                try:
+                    sys.stderr.write(chunk)
+                    chunk = detail_content.read(4096)
+                except IOError as e:
+                    if e.errno == errno.EAGAIN:
+                        # try again (probably pipe buffer full)
+                        pass
+                    else:
+                        raise
         logger.info("%s failed", name)
 
     return code
@@ -321,7 +322,6 @@ def main():
             ],
             # Checks (things that shouldn't happen)
             {
-                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
                 r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 4),
                 r".*Setting node quota.*": fail,
             },
@@ -341,13 +341,12 @@ def main():
                 (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
                 (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
                 (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
-                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", remaining_jobs),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
                 (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
                 (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown)
             ],
             # Checks (things that shouldn't happen)
             {
-                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
                 r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 2),
                 r".*Sending create_node request.*": partial(expect_count, 5)
             },
@@ -367,7 +366,7 @@ def main():
                 (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
                 (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
                 (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
-                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", remaining_jobs),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
                 (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
                 (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
                 (r".*sending request", jobs_req),
@@ -384,7 +383,6 @@ def main():
             ],
             # Checks (things that shouldn't happen)
             {
-                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
                 r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 6),
                 r".*Sending create_node request.*": partial(expect_count, 9)
             },
@@ -421,8 +419,8 @@ def main():
             # Actions (pattern -> action)
             [
                 (r".*Daemon started", set_squeue),
-                (r".*Rate limit exceeded - scheduling retry in 12 seconds", noop),
                 (r".*Rate limit exceeded - scheduling retry in 2 seconds", noop),
+                (r".*Rate limit exceeded - scheduling retry in 1 seconds", noop),
                 (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", noop),
             ],
             # Checks (things that shouldn't happen)
@@ -444,7 +442,6 @@ def main():
             ],
             # Checks (things that shouldn't happen)
             {
-                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
                 r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 1),
                 r".*Setting node quota.*": fail,
             },
@@ -465,7 +462,6 @@ def main():
             ],
             # Checks (things that shouldn't happen)
             {
-                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
                 r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 1),
                 r".*Setting node quota.*": fail,
             },
index 3f11ff6c2b22d02a47b8f8e9a6bfe246479d10ed..898112bdd8a8c8df86bf80e0a4af1aac3c592ec8 100644 (file)
@@ -37,3 +37,9 @@ class ShutdownTimerTestCase(unittest.TestCase):
         time_mock.return_value += 200
         self.assertEqual(961, timer.next_opening())
         self.assertFalse(timer.window_open())
+
+
+class ArvadosTimestamp(unittest.TestCase):
+    def test_arvados_timestamp(self):
+        self.assertEqual(1527710178, cnode.arvados_timestamp('2018-05-30T19:56:18Z'))
+        self.assertEqual(1527710178.999371, cnode.arvados_timestamp('2018-05-30T19:56:18.999371Z'))
index 5775aa659a31391f13a5071929d9f5562ba3969d..778c9aeaf5ffdbbcecaf90ac8072ace7210ce4a5 100644 (file)
@@ -426,6 +426,15 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT),
                           (False, "node state is ('unpaired', 'open', 'boot wait', 'idle exceeded')"))
 
+    def test_shutdown_when_invalid_cloud_node_size(self):
+        self.make_mocks(1)
+        self.cloud_mock.size.id = 'invalid'
+        self.cloud_mock.extra['arvados_node_size'] = 'stale.type'
+        self.make_actor()
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node's size tag 'stale.type' not recognizable"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
     def test_shutdown_without_arvados_node(self):
         self.make_actor(start_time=0)
         self.shutdowns._set_state(True, 600)
index b61db5cba1c57918c622d1ee815461ba6fe6de77..02d8fb62e0b8b624131974c49e4869dda0c06299 100644 (file)
@@ -5,7 +5,7 @@
 
 from __future__ import absolute_import, print_function
 
-import subprocess
+import subprocess32 as subprocess
 import time
 import unittest
 
@@ -18,7 +18,7 @@ from .test_computenode_dispatch import \
     ComputeNodeSetupActorTestCase, \
     ComputeNodeUpdateActorTestCase
 
-@mock.patch('subprocess.check_output')
+@mock.patch('subprocess32.check_output')
 class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
                                             unittest.TestCase):
     ACTOR_CLASS = slurm_dispatch.ComputeNodeShutdownActor
@@ -117,7 +117,7 @@ class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
         super(SLURMComputeNodeShutdownActorTestCase,
               self).test_uncancellable_shutdown()
 
-@mock.patch('subprocess.check_output')
+@mock.patch('subprocess32.check_output')
 class SLURMComputeNodeUpdateActorTestCase(ComputeNodeUpdateActorTestCase):
     ACTOR_CLASS = slurm_dispatch.ComputeNodeUpdateActor
 
@@ -131,7 +131,7 @@ class SLURMComputeNodeUpdateActorTestCase(ComputeNodeUpdateActorTestCase):
 class SLURMComputeNodeSetupActorTestCase(ComputeNodeSetupActorTestCase):
     ACTOR_CLASS = slurm_dispatch.ComputeNodeSetupActor
 
-    @mock.patch('subprocess.check_output')
+    @mock.patch('subprocess32.check_output')
     def test_update_node_features(self, check_output):
         # `scontrol update` happens only if the Arvados node record
         # has a hostname. ComputeNodeSetupActorTestCase.make_mocks
@@ -141,3 +141,15 @@ class SLURMComputeNodeSetupActorTestCase(ComputeNodeSetupActorTestCase):
         self.make_actor()
         self.wait_for_assignment(self.setup_actor, 'cloud_node')
         check_output.assert_called_with(['scontrol', 'update', 'NodeName=compute99', 'Weight=1000', 'Features=instancetype=z1.test'])
+
+    @mock.patch('subprocess32.check_output')
+    def test_failed_arvados_calls_retried(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_failed_arvados_calls_retried()
+
+    @mock.patch('subprocess32.check_output')
+    def test_subscribe(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_subscribe()
+
+    @mock.patch('subprocess32.check_output')
+    def test_creation_with_arvados_node(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_creation_with_arvados_node()
index 128a29e28d24ba4d5f3f8aae1bc535c9c60af043..4bf4c39efbc45ea069ea91ca3c0e94108d9b248b 100644 (file)
@@ -80,7 +80,7 @@ class ComputeNodeDriverTestCase(unittest.TestCase):
         for an_error, is_cloud_error in errors:
             self.driver_mock().create_node.side_effect = an_error
             with self.assertRaises(an_error):
-                driver.create_node('1', 'id_1')
+                driver.create_node(testutil.MockSize(1), 'id_1')
             if is_cloud_error:
                 error_count += 1
             self.assertEqual(error_count, status.tracker.get('create_node_errors'))
index ce96a8040d83a9a091d2d3331bec1d4275d4a974..ea7a033f0b5f3934f55e15a7f3e15aaf4f279246 100644 (file)
@@ -44,14 +44,25 @@ class AzureComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase
         self.assertIn('ping_secret=ssshh',
                       create_method.call_args[1].get('ex_tags', {}).get('arv-ping-url', ""))
 
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        arv_node["hostname"] = None
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_tags', {'tags': 'missing'}).items()
+        )
+
     def test_name_from_new_arvados_node(self):
         arv_node = testutil.arvados_node_mock(hostname=None)
         driver = self.new_driver()
         self.assertEqual('compute-000000000000063-zzzzz',
                          driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['name'])
 
-
-
     def check_node_tagged(self, cloud_node, expected_tags):
         tag_mock = self.driver_mock().ex_create_tags
         self.assertTrue(tag_mock.called)
@@ -91,6 +102,14 @@ echo z1.test > /var/tmp/arv-node-data/meta-data/instance-type
 """,
                          driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['ex_customdata'])
 
+    def test_list_nodes_ignores_nodes_without_tags(self):
+        driver = self.new_driver(create_kwargs={"tag_arvados-class": "dynamic-compute"})
+        # Mock cloud node without tags
+        nodelist = [testutil.cloud_node_mock(1)]
+        self.driver_mock().list_nodes.return_value = nodelist
+        n = driver.list_nodes()
+        self.assertEqual([], n)
+
     def test_create_raises_but_actually_succeeded(self):
         arv_node = testutil.arvados_node_mock(1, hostname=None)
         driver = self.new_driver(create_kwargs={"tag_arvados-class": "dynamic-compute"})
index 297eac0ef3baade9abcca3691009628f6c3647c2..520c0dc0ccb43a78434924ecdc180be5037804b5 100644 (file)
@@ -56,9 +56,32 @@ class EC2ComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
         driver.create_node(testutil.MockSize(1), arv_node)
         create_method = self.driver_mock().create_node
         self.assertTrue(create_method.called)
+        self.assertIn(
+            ('test', 'testvalue'),
+            create_method.call_args[1].get('ex_metadata', {'arg': 'missing'}).items()
+        )
+
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_metadata', {'arg': 'missing'}).items()
+        )
+
+    def test_create_preemptible_instance(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1, preemptible=True), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
         self.assertEqual(
-            {'test':'testvalue'},
-            create_method.call_args[1].get('ex_metadata', {'arg': 'missing'})
+            True,
+            create_method.call_args[1].get('ex_spot_market', 'arg missing')
         )
 
     def test_hostname_from_arvados_node(self):
index f0942e93785571f8ae4e3cdb7f0c78eb173ee7b6..1446cd2fdae559171af4c93535e591f22840290d 100644 (file)
@@ -51,6 +51,17 @@ class GCEComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
         metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
         self.assertIn('ping_secret=ssshh', metadata.get('arv-ping-url'))
 
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_metadata', {'metadata':'missing'}).items()
+        )
+
     def test_create_raises_but_actually_succeeded(self):
         arv_node = testutil.arvados_node_mock(1, hostname=None)
         driver = self.new_driver()
index 921281bc517bd7b6bc41935193c0b8562395a6da..8002b3b921fb14c313260ac48032b81dc2e261f0 100644 (file)
@@ -29,6 +29,12 @@ creds = dummy_creds
 cores = 1
 price = 0.8
 
+[Size 1.preemptible]
+instance_type = 1
+preemptible = true
+cores = 1
+price = 0.8
+
 [Logging]
 file = /dev/null
 level = DEBUG
@@ -53,13 +59,25 @@ testlogger = INFO
 
     def test_list_sizes(self):
         config = self.load_config()
-        client = config.new_cloud_client()
-        sizes = config.node_sizes(client.list_sizes())
-        self.assertEqual(1, len(sizes))
+        sizes = config.node_sizes()
+        self.assertEqual(2, len(sizes))
         size, kwargs = sizes[0]
         self.assertEqual('Small', size.name)
         self.assertEqual(1, kwargs['cores'])
         self.assertEqual(0.8, kwargs['price'])
+        # preemptible is False by default
+        self.assertEqual(False, kwargs['preemptible'])
+        # instance_type == arvados node size id by default
+        self.assertEqual(kwargs['id'], kwargs['instance_type'])
+        # Now retrieve the preemptible version
+        size, kwargs = sizes[1]
+        self.assertEqual('Small', size.name)
+        self.assertEqual('1.preemptible', kwargs['id'])
+        self.assertEqual(1, kwargs['cores'])
+        self.assertEqual(0.8, kwargs['price'])
+        self.assertEqual(True, kwargs['preemptible'])
+        self.assertEqual('1', kwargs['instance_type'])
+
 
     def test_default_node_mem_scaling(self):
         config = self.load_config()
index 8050e6981411d69f127617e0cb2b44681470341d..d09cbf72359610ac08afa428e39f024d3086835c 100644 (file)
@@ -17,11 +17,24 @@ from arvnodeman.jobqueue import ServerCalculator
 from arvnodeman.computenode.dispatch import ComputeNodeMonitorActor
 from . import testutil
 from . import test_status
+from . import pykka_timeout
 import logging
 
 class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                                      unittest.TestCase):
 
+    def assertwait(self, f, timeout=pykka_timeout*2):
+        deadline = time.time() + timeout
+        while True:
+            try:
+                return f()
+            except AssertionError:
+                if time.time() > deadline:
+                    raise
+                pass
+            time.sleep(.1)
+            self.daemon.ping().get(self.TIMEOUT)
+
     def busywait(self, f):
         for n in xrange(200):
             ok = f()
@@ -146,8 +159,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.assertIn('node_quota', status.tracker._latest)
 
     def check_monitors_arvados_nodes(self, *arv_nodes):
-        self.busywait(lambda: len(arv_nodes) == len(self.monitored_arvados_nodes()))
-        self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes())
+        self.assertwait(lambda: self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes()))
 
     def test_node_pairing(self):
         cloud_node = testutil.cloud_node_mock(1)
@@ -257,7 +269,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                          arvados_nodes=[testutil.arvados_node_mock(1),
                                         testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
                          want_sizes=[size])
-        self.busywait(lambda: 2 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
         for mon_ref in self.monitor_list():
             self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
         self.assertEqual(1, self.node_shutdown.start.call_count)
@@ -269,7 +281,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                          arvados_nodes=[testutil.arvados_node_mock(1),
                                         testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
                          want_sizes=[size])
-        self.busywait(lambda: 2 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
         get_cloud_node = mock.MagicMock(name="get_cloud_node")
         get_cloud_node.get.return_value = cloud_nodes[1]
         mock_node_monitor = mock.MagicMock()
@@ -278,7 +290,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
 
         self.daemon.cloud_nodes.get()[cloud_nodes[1].id].shutdown_actor = mock_shutdown.proxy()
 
-        self.busywait(lambda: 2 == self.alive_monitor_count())
+        self.assertwait(lambda: self.assertEqual(2, self.alive_monitor_count()))
         for mon_ref in self.monitor_list():
             self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
         self.busywait(lambda: 1 == self.node_shutdown.start.call_count)
@@ -298,8 +310,8 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         arv_node = testutil.arvados_node_mock(2, job_uuid=True)
         self.make_daemon([testutil.cloud_node_mock(2, size=size)], [arv_node],
                          [size], avail_sizes=[(size, {"cores":1})])
-        self.busywait(lambda: 1 == self.paired_monitor_count())
-        self.busywait(lambda: self.node_setup.start.called)
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        self.assertwait(lambda: self.assertEqual(1, self.node_setup.start.called))
 
     def test_boot_new_node_below_min_nodes(self):
         min_size = testutil.MockSize(1)
@@ -543,7 +555,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         arv_node = testutil.arvados_node_mock(1)
         size = testutil.MockSize(1)
         self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], want_sizes=[size])
-        self.busywait(lambda: 1 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
         monitor = self.monitor_list()[0].proxy()
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
@@ -553,7 +565,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         cloud_node = testutil.cloud_node_mock(1)
         arv_node = testutil.arvados_node_mock(1)
         self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], min_nodes=1)
-        self.busywait(lambda: 1 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
         monitor = self.monitor_list()[0].proxy()
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.stop_proxy(self.daemon)
@@ -572,7 +584,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
                      testutil.arvados_node_mock(4, job_uuid=None)]
         self.make_daemon(cloud_nodes, arv_nodes, [size])
-        self.busywait(lambda: 2 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
         for mon_ref in self.monitor_list():
             monitor = mon_ref.proxy()
             if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
@@ -591,13 +603,13 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.last_shutdown.success.get.return_value = False
         self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
-        self.busywait(lambda: 1 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
 
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.last_shutdown.success.get.return_value = True
         self.last_shutdown.stop.side_effect = lambda: monitor.stop()
         self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
-        self.busywait(lambda: 0 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(0, self.paired_monitor_count()))
 
     def test_nodes_shutting_down_replaced_below_max_nodes(self):
         size = testutil.MockSize(6)
@@ -616,7 +628,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         cloud_node = testutil.cloud_node_mock(7)
         self.make_daemon([cloud_node], [testutil.arvados_node_mock(7)],
                          max_nodes=1)
-        self.busywait(lambda: 1 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
         monitor = self.monitor_list()[0].proxy()
         self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
         self.assertTrue(self.node_shutdown.start.called)
@@ -630,7 +642,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         arv_nodes = [testutil.arvados_node_mock(n, size=size) for n in [8, 9]]
         self.make_daemon(cloud_nodes, arv_nodes, [size],
                          avail_sizes=[(size, {"cores":1})])
-        self.busywait(lambda: 2 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
         for mon_ref in self.monitor_list():
             self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
         self.assertEqual(1, self.node_shutdown.start.call_count)
@@ -671,7 +683,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         cloud_nodes = [testutil.cloud_node_mock(1, size=size)]
         arv_nodes = [testutil.arvados_node_mock(1, job_uuid=None)]
         self.make_daemon(cloud_nodes, arv_nodes, [size])
-        self.busywait(lambda: 1 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
         for mon_ref in self.monitor_list():
             monitor = mon_ref.proxy()
             if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
@@ -770,7 +782,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                                         testutil.arvados_node_mock(3)],
                          want_sizes=[small, small, big],
                          avail_sizes=avail_sizes)
-        self.busywait(lambda: 3 == self.paired_monitor_count())
+        self.assertwait(lambda: self.assertEqual(3, self.paired_monitor_count()))
         self.daemon.update_server_wishlist([small, big, big]).get(self.TIMEOUT)
 
         self.assertEqual(0, self.node_shutdown.start.call_count)
index 2d1a17eaecd82a7cea0be0103d27fc2e8f07c4c5..8bf3ea87412200a595d70d02c34796f75a2a8543 100644 (file)
@@ -48,9 +48,12 @@ class ActorUnhandledExceptionTest(testutil.ActorTestMixin, unittest.TestCase):
     def test_nonfatal_error(self):
         status.tracker.update({'actor_exceptions': 0})
         kill_mock = mock.Mock('os.kill')
-        act = BogusActor.start(OSError(errno.ENOENT, ""), killfunc=kill_mock).tell_proxy()
+        bgact = BogusActor.start(OSError(errno.ENOENT, ""), killfunc=kill_mock)
+        act_thread = bgact.proxy().get_thread().get()
+        act = bgact.tell_proxy()
         act.doStuff()
         act.actor_ref.stop(block=True)
+        act_thread.join()
         self.assertFalse(kill_mock.called)
         self.assertEqual(1, status.tracker.get('actor_exceptions'))
 
index 8c10f1b426e4bf71b036e17208f4056c27323327..de83b68fed81b5daa313cda732477be311302ee2 100644 (file)
@@ -154,8 +154,8 @@ class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         super(JobQueueMonitorActorTestCase, self).build_monitor(*args, **kwargs)
         self.client.jobs().queue().execute.side_effect = side_effect
 
-    @mock.patch("subprocess.check_call")
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_call")
+    @mock.patch("subprocess32.check_output")
     def test_unsatisfiable_jobs(self, mock_squeue, mock_scancel):
         job_uuid = 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'
         container_uuid = 'yyyyy-dz642-yyyyyyyyyyyyyyy'
@@ -169,7 +169,7 @@ class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         self.client.jobs().cancel.assert_called_with(uuid=job_uuid)
         mock_scancel.assert_called_with(['scancel', '--name='+container_uuid])
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_subscribers_get_server_lists(self, mock_squeue):
         mock_squeue.return_value = ""
 
@@ -179,7 +179,7 @@ class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         self.subscriber.assert_called_with([testutil.MockSize(1),
                                             testutil.MockSize(2)])
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_squeue_server_list(self, mock_squeue):
         mock_squeue.return_value = """1|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzy|(null)|1234567890
 2|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzz|(null)|1234567890
@@ -193,7 +193,7 @@ class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         self.subscriber.assert_called_with([testutil.MockSize(1),
                                             testutil.MockSize(2)])
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_squeue_server_list_suffix(self, mock_squeue):
         mock_squeue.return_value = """1|1024M|0|(ReqNodeNotAvail, UnavailableNodes:compute123)|zzzzz-dz642-zzzzzzzzzzzzzzy|(null)|1234567890
 1|2G|0|(ReqNodeNotAvail)|zzzzz-dz642-zzzzzzzzzzzzzzz|(null)|1234567890
@@ -207,7 +207,7 @@ class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         self.subscriber.assert_called_with([testutil.MockSize(1),
                                             testutil.MockSize(2)])
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_squeue_server_list_instancetype_constraint(self, mock_squeue):
         mock_squeue.return_value = """1|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzy|instancetype=z2.test|1234567890\n"""
         super(JobQueueMonitorActorTestCase, self).build_monitor(jobqueue.ServerCalculator(
index 5becd0c2241386e34b6dfef8e57a29b025335a67..df31a12267c6ab3447272ea66414af5f408fba2b 100644 (file)
@@ -21,7 +21,7 @@ class ArvadosNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
             *args, **kwargs)
         self.client.nodes().list().execute.side_effect = side_effect
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_uuid_is_subscription_key(self, sinfo_mock):
         sinfo_mock.return_value = ""
         node = testutil.arvados_node_mock()
@@ -40,7 +40,7 @@ class ArvadosNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
         self.subscriber.assert_called_with(node)
         self.assertEqual("down", node["crunch_worker_state"])
 
-    @mock.patch("subprocess.check_output")
+    @mock.patch("subprocess32.check_output")
     def test_update_from_sinfo(self, sinfo_mock):
         sinfo_mock.return_value = """compute1|idle|instancetype=a1.test
 compute2|alloc|(null)
@@ -84,6 +84,7 @@ class CloudNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
             self.public_ips = []
             self.size = testutil.MockSize(1)
             self.state = 0
+            self.extra = {'arvados_node_size': self.size.id}
 
 
     def build_monitor(self, side_effect, *args, **kwargs):
index 555144c4d05d2bc562d9bc2357fa93421f64b35f..ee475efe7e756e6bf717f657db7eb3cf1542525e 100644 (file)
@@ -78,7 +78,7 @@ class MockShutdownTimer(object):
 
 
 class MockSize(object):
-    def __init__(self, factor):
+    def __init__(self, factor, preemptible=False):
         self.id = 'z{}.test'.format(factor)
         self.name = 'test size '+self.id
         self.ram = 128 * factor
@@ -87,6 +87,8 @@ class MockSize(object):
         self.bandwidth = 16 * factor
         self.price = float(factor)
         self.extra = {}
+        self.real = self
+        self.preemptible = preemptible
 
     def __eq__(self, other):
         return self.id == other.id
index a5971502f65354b8786b676888970d2fecd27ce7..a26c396e2e1eb48d35407ab3cc561892c3dd0980 100755 (executable)
@@ -345,7 +345,7 @@ case "$subcmd" in
         ;;
 
     sh*)
-        exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM GEM_HOME=/var/lib/gems /bin/bash
+        exec docker exec -ti -e LINES=$(tput lines) -e COLUMNS=$(tput cols) -e TERM=$TERM -e GEM_HOME=/var/lib/gems $ARVBOX_CONTAINER /bin/bash
         ;;
 
     pipe)
index 0cc36ebd2b92cb5ac2c3f35ec02d757adf11d4a9..1ac0e76c373cd3240175a5c3c81c00aeb44b138e 100644 (file)
@@ -39,7 +39,7 @@ ENV GEM_HOME /var/lib/gems
 ENV GEM_PATH /var/lib/gems
 ENV PATH $PATH:/var/lib/gems/bin
 
-ENV GOVERSION 1.8.3
+ENV GOVERSION 1.10.1
 
 # Install golang binary
 RUN curl -f http://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \
index fea9cf6e8beeda2dcec9f15f88d759303286f173..35a8b156dec7cb650015b9e22155a16a18177615 100755 (executable)
@@ -1,4 +1,8 @@
 #!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 # system one time tasks
 
 PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
index 6b092eae678f0b8f825331b9140842d3bb7a8639..5812f3d8b0cea307b793016156b8fa73b3909224 100755 (executable)
@@ -1,4 +1,7 @@
 #!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
 
 PATH=/usr/local/bin:/usr/local/sbin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/X11R6/bin
 
index 525b96bbb667564ecf7f2eef66d37009796a7406..242c035f66dcbb6d865d52bc421ec91881a093fb 100755 (executable)
@@ -1,4 +1,8 @@
 #!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 exec 2>&1
 
 PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
index 02bb2ea91884d110c8ad1add1cf7cab87ce30304..d4d2190b152d284c55bc5b4bdd988787be2d7f69 100755 (executable)
@@ -1,4 +1,8 @@
 #!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 exec 2>&1
 
 PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
index 0e937908e8116f5984c367e4cee25fb718b3f833..da6db3653cd9b48b3bea1570aa5006e173c6b319 100755 (executable)
@@ -28,7 +28,7 @@ ln -sf /usr/src/arvados/sdk/cli/binstubs/arv /usr/local/bin/arv
 # multiple packages, because it will blindly install the latest version of each
 # dependency requested by each package, even if a compatible package version is
 # already installed.
-pip_install pip
+pip_install pip==9.0.3
 
 pip_install wheel
 
index e8f0861be49350eddc232bd8826a4681af3a424f..98dda673d5a3ab70d65ab1d3989b49f539959b69 100644 (file)
@@ -46,8 +46,9 @@ class LiveLogReader(object):
     EOF = None
 
     def __init__(self, job_uuid):
-        logger.debug('load stderr events for job %s', job_uuid)
         self.job_uuid = job_uuid
+        self.event_types = (['stderr'] if '-8i9sb-' in job_uuid else ['crunchstat', 'arv-mount'])
+        logger.debug('load %s events for job %s', self.event_types, self.job_uuid)
 
     def __str__(self):
         return self.job_uuid
@@ -57,7 +58,7 @@ class LiveLogReader(object):
         last_id = 0
         filters = [
             ['object_uuid', '=', self.job_uuid],
-            ['event_type', '=', 'stderr']]
+            ['event_type', 'in', self.event_types]]
         try:
             while True:
                 page = arvados.api().logs().index(
index 78c8d4278f32b2050ff006d168cf61213804645a..562ee839e0832c9b6d394c00f62c9a524f89d30a 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (c) 2009 Dan Vanderkam. All rights reserved.
+//
+// SPDX-License-Identifier: MIT
+
 /**
  * Synchronize zooming and/or selections between a set of dygraphs.
  *
@@ -31,7 +35,6 @@
  * You may also set `range: false` if you wish to only sync the x-axis.
  * The `range` option has no effect unless `zoom` is true (the default).
  *
- * SPDX-License-Identifier: MIT
  * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js
  * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389
  *
index a4f750b4c4d0445567ad20da7ac9408eb12a692d..f18d4e464cdf34ed86c0be1a4631aadc598179df 100644 (file)
                        "revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba",
                        "revisionTime": "2017-12-05T20:32:29Z"
                },
+               {
+                       "checksumSHA1": "st4vb0GmDeoKbsfxdpNZ2MPl76M=",
+                       "path": "github.com/StackExchange/wmi",
+                       "revision": "cdffdb33acae0e14efff2628f9bae377b597840e",
+                       "revisionTime": "2018-04-12T20:51:11Z"
+               },
                {
                        "checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
                        "path": "github.com/beorn7/perks/quantile",
                        "revision": "0ca9ea5df5451ffdf184b4428c902747c2c11cd7",
                        "revisionTime": "2017-03-27T23:54:44Z"
                },
+               {
+                       "checksumSHA1": "Kqv7bA4oJG0nPwQvGWDwGGaKONo=",
+                       "path": "github.com/go-ole/go-ole",
+                       "revision": "7a0fa49edf48165190530c675167e2f319a05268",
+                       "revisionTime": "2018-06-25T08:58:08Z"
+               },
+               {
+                       "checksumSHA1": "PArleDBtadu2qO4hJwHR8a3IOTA=",
+                       "path": "github.com/go-ole/go-ole/oleutil",
+                       "revision": "7a0fa49edf48165190530c675167e2f319a05268",
+                       "revisionTime": "2018-06-25T08:58:08Z"
+               },
                {
                        "checksumSHA1": "wn2shNJMwRZpvuvkf1s7h0wvqHI=",
                        "path": "github.com/gogo/protobuf/proto",
                        "revision": "1744e2970ca51c86172c8190fadad617561ed6e7",
                        "revisionTime": "2017-11-10T11:01:46Z"
                },
+               {
+                       "checksumSHA1": "q14d3C3xvWevU3dSv4P5K0+OSD0=",
+                       "path": "github.com/shirou/gopsutil/cpu",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "LZ9GloiGLTISmQ4dalK2XspH6Wo=",
+                       "path": "github.com/shirou/gopsutil/host",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "cyoqI0gryzjxGTkaAfyUqMiuUR0=",
+                       "path": "github.com/shirou/gopsutil/internal/common",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "vEQLjAO5T5K9zXblEMYdoaBZzj0=",
+                       "path": "github.com/shirou/gopsutil/mem",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "KMWFRa0DVpabo9d8euB4RYjUBQE=",
+                       "path": "github.com/shirou/gopsutil/net",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "fbO7c1gv1kSvWKOb/+5HUWFkBaA=",
+                       "path": "github.com/shirou/gopsutil/process",
+                       "revision": "63728fcf6b24475ecfea044e22242447666c2f52",
+                       "revisionTime": "2018-07-05T13:28:12Z"
+               },
+               {
+                       "checksumSHA1": "Nve7SpDmjsv6+rhkXAkfg/UQx94=",
+                       "path": "github.com/shirou/w32",
+                       "revision": "bb4de0191aa41b5507caa14b0650cdbddcd9280b",
+                       "revisionTime": "2016-09-30T03:27:40Z"
+               },
                {
                        "checksumSHA1": "8QeSG127zQqbA+YfkO1WkKx/iUI=",
                        "path": "github.com/src-d/gcfg",